GDB crash while stepping into function.
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54
55 /* Prototypes for local functions */
56
57 static void signals_info (char *, int);
58
59 static void handle_command (char *, int);
60
61 static void sig_print_info (enum target_signal);
62
63 static void sig_print_header (void);
64
65 static void resume_cleanups (void *);
66
67 static int hook_stop_stub (void *);
68
69 static int restore_selected_frame (void *);
70
71 static void build_infrun (void);
72
73 static int follow_fork (void);
74
75 static void set_schedlock_func (char *args, int from_tty,
76 struct cmd_list_element *c);
77
78 static int currently_stepping (struct thread_info *tp);
79
80 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
81 void *data);
82
83 static void xdb_handle_command (char *args, int from_tty);
84
85 static int prepare_to_proceed (int);
86
87 void _initialize_infrun (void);
88
89 void nullify_last_target_wait_ptid (void);
90
91 /* When set, stop the 'step' command if we enter a function which has
92 no line number information. The normal behavior is that we step
93 over such function. */
94 int step_stop_if_no_debug = 0;
95 static void
96 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
97 struct cmd_list_element *c, const char *value)
98 {
99 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
100 }
101
102 /* In asynchronous mode, but simulating synchronous execution. */
103
104 int sync_execution = 0;
105
106 /* wait_for_inferior and normal_stop use this to notify the user
107 when the inferior stopped in a different thread than it had been
108 running in. */
109
110 static ptid_t previous_inferior_ptid;
111
112 /* Default behavior is to detach newly forked processes (legacy). */
113 int detach_fork = 1;
114
115 int debug_displaced = 0;
116 static void
117 show_debug_displaced (struct ui_file *file, int from_tty,
118 struct cmd_list_element *c, const char *value)
119 {
120 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
121 }
122
123 static int debug_infrun = 0;
124 static void
125 show_debug_infrun (struct ui_file *file, int from_tty,
126 struct cmd_list_element *c, const char *value)
127 {
128 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
129 }
130
131 /* If the program uses ELF-style shared libraries, then calls to
132 functions in shared libraries go through stubs, which live in a
133 table called the PLT (Procedure Linkage Table). The first time the
134 function is called, the stub sends control to the dynamic linker,
135 which looks up the function's real address, patches the stub so
136 that future calls will go directly to the function, and then passes
137 control to the function.
138
139 If we are stepping at the source level, we don't want to see any of
140 this --- we just want to skip over the stub and the dynamic linker.
141 The simple approach is to single-step until control leaves the
142 dynamic linker.
143
144 However, on some systems (e.g., Red Hat's 5.2 distribution) the
145 dynamic linker calls functions in the shared C library, so you
146 can't tell from the PC alone whether the dynamic linker is still
147 running. In this case, we use a step-resume breakpoint to get us
148 past the dynamic linker, as if we were using "next" to step over a
149 function call.
150
151 in_solib_dynsym_resolve_code() says whether we're in the dynamic
152 linker code or not. Normally, this means we single-step. However,
153 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
154 address where we can place a step-resume breakpoint to get past the
155 linker's symbol resolution function.
156
157 in_solib_dynsym_resolve_code() can generally be implemented in a
158 pretty portable way, by comparing the PC against the address ranges
159 of the dynamic linker's sections.
160
161 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
162 it depends on internal details of the dynamic linker. It's usually
163 not too hard to figure out where to put a breakpoint, but it
164 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
165 sanity checking. If it can't figure things out, returning zero and
166 getting the (possibly confusing) stepping behavior is better than
167 signalling an error, which will obscure the change in the
168 inferior's state. */
169
170 /* This function returns TRUE if pc is the address of an instruction
171 that lies within the dynamic linker (such as the event hook, or the
172 dld itself).
173
174 This function must be used only when a dynamic linker event has
175 been caught, and the inferior is being stepped out of the hook, or
176 undefined results are guaranteed. */
177
178 #ifndef SOLIB_IN_DYNAMIC_LINKER
179 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
180 #endif
181
182
183 /* Convert the #defines into values. This is temporary until wfi control
184 flow is completely sorted out. */
185
186 #ifndef CANNOT_STEP_HW_WATCHPOINTS
187 #define CANNOT_STEP_HW_WATCHPOINTS 0
188 #else
189 #undef CANNOT_STEP_HW_WATCHPOINTS
190 #define CANNOT_STEP_HW_WATCHPOINTS 1
191 #endif
192
193 /* Tables of how to react to signals; the user sets them. */
194
195 static unsigned char *signal_stop;
196 static unsigned char *signal_print;
197 static unsigned char *signal_program;
198
199 #define SET_SIGS(nsigs,sigs,flags) \
200 do { \
201 int signum = (nsigs); \
202 while (signum-- > 0) \
203 if ((sigs)[signum]) \
204 (flags)[signum] = 1; \
205 } while (0)
206
207 #define UNSET_SIGS(nsigs,sigs,flags) \
208 do { \
209 int signum = (nsigs); \
210 while (signum-- > 0) \
211 if ((sigs)[signum]) \
212 (flags)[signum] = 0; \
213 } while (0)
214
215 /* Value to pass to target_resume() to cause all threads to resume */
216
217 #define RESUME_ALL minus_one_ptid
218
219 /* Command list pointer for the "stop" placeholder. */
220
221 static struct cmd_list_element *stop_command;
222
223 /* Function inferior was in as of last step command. */
224
225 static struct symbol *step_start_function;
226
227 /* Nonzero if we want to give control to the user when we're notified
228 of shared library events by the dynamic linker. */
229 static int stop_on_solib_events;
230 static void
231 show_stop_on_solib_events (struct ui_file *file, int from_tty,
232 struct cmd_list_element *c, const char *value)
233 {
234 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
235 value);
236 }
237
238 /* Nonzero means expecting a trace trap
239 and should stop the inferior and return silently when it happens. */
240
241 int stop_after_trap;
242
243 /* Save register contents here when executing a "finish" command or are
244 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
245 Thus this contains the return value from the called function (assuming
246 values are returned in a register). */
247
248 struct regcache *stop_registers;
249
250 /* Nonzero after stop if current stack frame should be printed. */
251
252 static int stop_print_frame;
253
254 /* This is a cached copy of the pid/waitstatus of the last event
255 returned by target_wait()/deprecated_target_wait_hook(). This
256 information is returned by get_last_target_status(). */
257 static ptid_t target_last_wait_ptid;
258 static struct target_waitstatus target_last_waitstatus;
259
260 static void context_switch (ptid_t ptid);
261
262 void init_thread_stepping_state (struct thread_info *tss);
263
264 void init_infwait_state (void);
265
266 static const char follow_fork_mode_child[] = "child";
267 static const char follow_fork_mode_parent[] = "parent";
268
269 static const char *follow_fork_mode_kind_names[] = {
270 follow_fork_mode_child,
271 follow_fork_mode_parent,
272 NULL
273 };
274
275 static const char *follow_fork_mode_string = follow_fork_mode_parent;
276 static void
277 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
278 struct cmd_list_element *c, const char *value)
279 {
280 fprintf_filtered (file, _("\
281 Debugger response to a program call of fork or vfork is \"%s\".\n"),
282 value);
283 }
284 \f
285
286 /* Tell the target to follow the fork we're stopped at. Returns true
287 if the inferior should be resumed; false, if the target for some
288 reason decided it's best not to resume. */
289
290 static int
291 follow_fork (void)
292 {
293 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
294 int should_resume = 1;
295 struct thread_info *tp;
296
297 /* Copy user stepping state to the new inferior thread. FIXME: the
298 followed fork child thread should have a copy of most of the
299 parent thread structure's run control related fields, not just these.
300 Initialized to avoid "may be used uninitialized" warnings from gcc. */
301 struct breakpoint *step_resume_breakpoint = NULL;
302 CORE_ADDR step_range_start = 0;
303 CORE_ADDR step_range_end = 0;
304 struct frame_id step_frame_id = { 0 };
305
306 if (!non_stop)
307 {
308 ptid_t wait_ptid;
309 struct target_waitstatus wait_status;
310
311 /* Get the last target status returned by target_wait(). */
312 get_last_target_status (&wait_ptid, &wait_status);
313
314 /* If not stopped at a fork event, then there's nothing else to
315 do. */
316 if (wait_status.kind != TARGET_WAITKIND_FORKED
317 && wait_status.kind != TARGET_WAITKIND_VFORKED)
318 return 1;
319
320 /* Check if we switched over from WAIT_PTID, since the event was
321 reported. */
322 if (!ptid_equal (wait_ptid, minus_one_ptid)
323 && !ptid_equal (inferior_ptid, wait_ptid))
324 {
325 /* We did. Switch back to WAIT_PTID thread, to tell the
326 target to follow it (in either direction). We'll
327 afterwards refuse to resume, and inform the user what
328 happened. */
329 switch_to_thread (wait_ptid);
330 should_resume = 0;
331 }
332 }
333
334 tp = inferior_thread ();
335
336 /* If there were any forks/vforks that were caught and are now to be
337 followed, then do so now. */
338 switch (tp->pending_follow.kind)
339 {
340 case TARGET_WAITKIND_FORKED:
341 case TARGET_WAITKIND_VFORKED:
342 {
343 ptid_t parent, child;
344
345 /* If the user did a next/step, etc, over a fork call,
346 preserve the stepping state in the fork child. */
347 if (follow_child && should_resume)
348 {
349 step_resume_breakpoint
350 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
351 step_range_start = tp->step_range_start;
352 step_range_end = tp->step_range_end;
353 step_frame_id = tp->step_frame_id;
354
355 /* For now, delete the parent's sr breakpoint, otherwise,
356 parent/child sr breakpoints are considered duplicates,
357 and the child version will not be installed. Remove
358 this when the breakpoints module becomes aware of
359 inferiors and address spaces. */
360 delete_step_resume_breakpoint (tp);
361 tp->step_range_start = 0;
362 tp->step_range_end = 0;
363 tp->step_frame_id = null_frame_id;
364 }
365
366 parent = inferior_ptid;
367 child = tp->pending_follow.value.related_pid;
368
369 /* Tell the target to do whatever is necessary to follow
370 either parent or child. */
371 if (target_follow_fork (follow_child))
372 {
373 /* Target refused to follow, or there's some other reason
374 we shouldn't resume. */
375 should_resume = 0;
376 }
377 else
378 {
379 /* This pending follow fork event is now handled, one way
380 or another. The previous selected thread may be gone
381 from the lists by now, but if it is still around, need
382 to clear the pending follow request. */
383 tp = find_thread_ptid (parent);
384 if (tp)
385 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
386
387 /* This makes sure we don't try to apply the "Switched
388 over from WAIT_PID" logic above. */
389 nullify_last_target_wait_ptid ();
390
391 /* If we followed the child, switch to it... */
392 if (follow_child)
393 {
394 switch_to_thread (child);
395
396 /* ... and preserve the stepping state, in case the
397 user was stepping over the fork call. */
398 if (should_resume)
399 {
400 tp = inferior_thread ();
401 tp->step_resume_breakpoint = step_resume_breakpoint;
402 tp->step_range_start = step_range_start;
403 tp->step_range_end = step_range_end;
404 tp->step_frame_id = step_frame_id;
405 }
406 else
407 {
408 /* If we get here, it was because we're trying to
409 resume from a fork catchpoint, but, the user
410 has switched threads away from the thread that
411 forked. In that case, the resume command
412 issued is most likely not applicable to the
413 child, so just warn, and refuse to resume. */
414 warning (_("\
415 Not resuming: switched threads before following fork child.\n"));
416 }
417
418 /* Reset breakpoints in the child as appropriate. */
419 follow_inferior_reset_breakpoints ();
420 }
421 else
422 switch_to_thread (parent);
423 }
424 }
425 break;
426 case TARGET_WAITKIND_SPURIOUS:
427 /* Nothing to follow. */
428 break;
429 default:
430 internal_error (__FILE__, __LINE__,
431 "Unexpected pending_follow.kind %d\n",
432 tp->pending_follow.kind);
433 break;
434 }
435
436 return should_resume;
437 }
438
439 void
440 follow_inferior_reset_breakpoints (void)
441 {
442 struct thread_info *tp = inferior_thread ();
443
444 /* Was there a step_resume breakpoint? (There was if the user
445 did a "next" at the fork() call.) If so, explicitly reset its
446 thread number.
447
448 step_resumes are a form of bp that are made to be per-thread.
449 Since we created the step_resume bp when the parent process
450 was being debugged, and now are switching to the child process,
451 from the breakpoint package's viewpoint, that's a switch of
452 "threads". We must update the bp's notion of which thread
453 it is for, or it'll be ignored when it triggers. */
454
455 if (tp->step_resume_breakpoint)
456 breakpoint_re_set_thread (tp->step_resume_breakpoint);
457
458 /* Reinsert all breakpoints in the child. The user may have set
459 breakpoints after catching the fork, in which case those
460 were never set in the child, but only in the parent. This makes
461 sure the inserted breakpoints match the breakpoint list. */
462
463 breakpoint_re_set ();
464 insert_breakpoints ();
465 }
466
467 /* The child has exited or execed: resume threads of the parent the
468 user wanted to be executing. */
469
470 static int
471 proceed_after_vfork_done (struct thread_info *thread,
472 void *arg)
473 {
474 int pid = * (int *) arg;
475
476 if (ptid_get_pid (thread->ptid) == pid
477 && is_running (thread->ptid)
478 && !is_executing (thread->ptid)
479 && !thread->stop_requested
480 && thread->stop_signal == TARGET_SIGNAL_0)
481 {
482 if (debug_infrun)
483 fprintf_unfiltered (gdb_stdlog,
484 "infrun: resuming vfork parent thread %s\n",
485 target_pid_to_str (thread->ptid));
486
487 switch_to_thread (thread->ptid);
488 clear_proceed_status ();
489 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
490 }
491
492 return 0;
493 }
494
495 /* Called whenever we notice an exec or exit event, to handle
496 detaching or resuming a vfork parent. */
497
498 static void
499 handle_vfork_child_exec_or_exit (int exec)
500 {
501 struct inferior *inf = current_inferior ();
502
503 if (inf->vfork_parent)
504 {
505 int resume_parent = -1;
506
507 /* This exec or exit marks the end of the shared memory region
508 between the parent and the child. If the user wanted to
509 detach from the parent, now is the time. */
510
511 if (inf->vfork_parent->pending_detach)
512 {
513 struct thread_info *tp;
514 struct cleanup *old_chain;
515 struct program_space *pspace;
516 struct address_space *aspace;
517
518 /* follow-fork child, detach-on-fork on */
519
520 old_chain = make_cleanup_restore_current_thread ();
521
522 /* We're letting loose of the parent. */
523 tp = any_live_thread_of_process (inf->vfork_parent->pid);
524 switch_to_thread (tp->ptid);
525
526 /* We're about to detach from the parent, which implicitly
527 removes breakpoints from its address space. There's a
528 catch here: we want to reuse the spaces for the child,
529 but, parent/child are still sharing the pspace at this
530 point, although the exec in reality makes the kernel give
531 the child a fresh set of new pages. The problem here is
532 that the breakpoints module being unaware of this, would
533 likely chose the child process to write to the parent
534 address space. Swapping the child temporarily away from
535 the spaces has the desired effect. Yes, this is "sort
536 of" a hack. */
537
538 pspace = inf->pspace;
539 aspace = inf->aspace;
540 inf->aspace = NULL;
541 inf->pspace = NULL;
542
543 if (debug_infrun || info_verbose)
544 {
545 target_terminal_ours ();
546
547 if (exec)
548 fprintf_filtered (gdb_stdlog,
549 "Detaching vfork parent process %d after child exec.\n",
550 inf->vfork_parent->pid);
551 else
552 fprintf_filtered (gdb_stdlog,
553 "Detaching vfork parent process %d after child exit.\n",
554 inf->vfork_parent->pid);
555 }
556
557 target_detach (NULL, 0);
558
559 /* Put it back. */
560 inf->pspace = pspace;
561 inf->aspace = aspace;
562
563 do_cleanups (old_chain);
564 }
565 else if (exec)
566 {
567 /* We're staying attached to the parent, so, really give the
568 child a new address space. */
569 inf->pspace = add_program_space (maybe_new_address_space ());
570 inf->aspace = inf->pspace->aspace;
571 inf->removable = 1;
572 set_current_program_space (inf->pspace);
573
574 resume_parent = inf->vfork_parent->pid;
575
576 /* Break the bonds. */
577 inf->vfork_parent->vfork_child = NULL;
578 }
579 else
580 {
581 struct cleanup *old_chain;
582 struct program_space *pspace;
583
584 /* If this is a vfork child exiting, then the pspace and
585 aspaces were shared with the parent. Since we're
586 reporting the process exit, we'll be mourning all that is
587 found in the address space, and switching to null_ptid,
588 preparing to start a new inferior. But, since we don't
589 want to clobber the parent's address/program spaces, we
590 go ahead and create a new one for this exiting
591 inferior. */
592
593 /* Switch to null_ptid, so that clone_program_space doesn't want
594 to read the selected frame of a dead process. */
595 old_chain = save_inferior_ptid ();
596 inferior_ptid = null_ptid;
597
598 /* This inferior is dead, so avoid giving the breakpoints
599 module the option to write through to it (cloning a
600 program space resets breakpoints). */
601 inf->aspace = NULL;
602 inf->pspace = NULL;
603 pspace = add_program_space (maybe_new_address_space ());
604 set_current_program_space (pspace);
605 inf->removable = 1;
606 clone_program_space (pspace, inf->vfork_parent->pspace);
607 inf->pspace = pspace;
608 inf->aspace = pspace->aspace;
609
610 /* Put back inferior_ptid. We'll continue mourning this
611 inferior. */
612 do_cleanups (old_chain);
613
614 resume_parent = inf->vfork_parent->pid;
615 /* Break the bonds. */
616 inf->vfork_parent->vfork_child = NULL;
617 }
618
619 inf->vfork_parent = NULL;
620
621 gdb_assert (current_program_space == inf->pspace);
622
623 if (non_stop && resume_parent != -1)
624 {
625 /* If the user wanted the parent to be running, let it go
626 free now. */
627 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
628
629 if (debug_infrun)
630 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
631 resume_parent);
632
633 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
634
635 do_cleanups (old_chain);
636 }
637 }
638 }
639
640 /* Enum strings for "set|show displaced-stepping". */
641
642 static const char follow_exec_mode_new[] = "new";
643 static const char follow_exec_mode_same[] = "same";
644 static const char *follow_exec_mode_names[] =
645 {
646 follow_exec_mode_new,
647 follow_exec_mode_same,
648 NULL,
649 };
650
651 static const char *follow_exec_mode_string = follow_exec_mode_same;
652 static void
653 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
654 struct cmd_list_element *c, const char *value)
655 {
656 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
657 }
658
659 /* EXECD_PATHNAME is assumed to be non-NULL. */
660
661 static void
662 follow_exec (ptid_t pid, char *execd_pathname)
663 {
664 struct target_ops *tgt;
665 struct thread_info *th = inferior_thread ();
666 struct inferior *inf = current_inferior ();
667
668 /* This is an exec event that we actually wish to pay attention to.
669 Refresh our symbol table to the newly exec'd program, remove any
670 momentary bp's, etc.
671
672 If there are breakpoints, they aren't really inserted now,
673 since the exec() transformed our inferior into a fresh set
674 of instructions.
675
676 We want to preserve symbolic breakpoints on the list, since
677 we have hopes that they can be reset after the new a.out's
678 symbol table is read.
679
680 However, any "raw" breakpoints must be removed from the list
681 (e.g., the solib bp's), since their address is probably invalid
682 now.
683
684 And, we DON'T want to call delete_breakpoints() here, since
685 that may write the bp's "shadow contents" (the instruction
686 value that was overwritten witha TRAP instruction). Since
687 we now have a new a.out, those shadow contents aren't valid. */
688
689 mark_breakpoints_out ();
690
691 update_breakpoints_after_exec ();
692
693 /* If there was one, it's gone now. We cannot truly step-to-next
694 statement through an exec(). */
695 th->step_resume_breakpoint = NULL;
696 th->step_range_start = 0;
697 th->step_range_end = 0;
698
699 /* The target reports the exec event to the main thread, even if
700 some other thread does the exec, and even if the main thread was
701 already stopped --- if debugging in non-stop mode, it's possible
702 the user had the main thread held stopped in the previous image
703 --- release it now. This is the same behavior as step-over-exec
704 with scheduler-locking on in all-stop mode. */
705 th->stop_requested = 0;
706
707 /* What is this a.out's name? */
708 printf_unfiltered (_("%s is executing new program: %s\n"),
709 target_pid_to_str (inferior_ptid),
710 execd_pathname);
711
712 /* We've followed the inferior through an exec. Therefore, the
713 inferior has essentially been killed & reborn. */
714
715 gdb_flush (gdb_stdout);
716
717 breakpoint_init_inferior (inf_execd);
718
719 if (gdb_sysroot && *gdb_sysroot)
720 {
721 char *name = alloca (strlen (gdb_sysroot)
722 + strlen (execd_pathname)
723 + 1);
724 strcpy (name, gdb_sysroot);
725 strcat (name, execd_pathname);
726 execd_pathname = name;
727 }
728
729 /* Reset the shared library package. This ensures that we get a
730 shlib event when the child reaches "_start", at which point the
731 dld will have had a chance to initialize the child. */
732 /* Also, loading a symbol file below may trigger symbol lookups, and
733 we don't want those to be satisfied by the libraries of the
734 previous incarnation of this process. */
735 no_shared_libraries (NULL, 0);
736
737 if (follow_exec_mode_string == follow_exec_mode_new)
738 {
739 struct program_space *pspace;
740 struct inferior *new_inf;
741
742 /* The user wants to keep the old inferior and program spaces
743 around. Create a new fresh one, and switch to it. */
744
745 inf = add_inferior (current_inferior ()->pid);
746 pspace = add_program_space (maybe_new_address_space ());
747 inf->pspace = pspace;
748 inf->aspace = pspace->aspace;
749
750 exit_inferior_num_silent (current_inferior ()->num);
751
752 set_current_inferior (inf);
753 set_current_program_space (pspace);
754 }
755
756 gdb_assert (current_program_space == inf->pspace);
757
758 /* That a.out is now the one to use. */
759 exec_file_attach (execd_pathname, 0);
760
761 /* Load the main file's symbols. */
762 symbol_file_add_main (execd_pathname, 0);
763
764 #ifdef SOLIB_CREATE_INFERIOR_HOOK
765 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
766 #else
767 solib_create_inferior_hook (0);
768 #endif
769
770 jit_inferior_created_hook ();
771
772 /* Reinsert all breakpoints. (Those which were symbolic have
773 been reset to the proper address in the new a.out, thanks
774 to symbol_file_command...) */
775 insert_breakpoints ();
776
777 /* The next resume of this inferior should bring it to the shlib
778 startup breakpoints. (If the user had also set bp's on
779 "main" from the old (parent) process, then they'll auto-
780 matically get reset there in the new process.) */
781 }
782
783 /* Non-zero if we just simulating a single-step. This is needed
784 because we cannot remove the breakpoints in the inferior process
785 until after the `wait' in `wait_for_inferior'. */
786 static int singlestep_breakpoints_inserted_p = 0;
787
788 /* The thread we inserted single-step breakpoints for. */
789 static ptid_t singlestep_ptid;
790
791 /* PC when we started this single-step. */
792 static CORE_ADDR singlestep_pc;
793
794 /* If another thread hit the singlestep breakpoint, we save the original
795 thread here so that we can resume single-stepping it later. */
796 static ptid_t saved_singlestep_ptid;
797 static int stepping_past_singlestep_breakpoint;
798
799 /* If not equal to null_ptid, this means that after stepping over breakpoint
800 is finished, we need to switch to deferred_step_ptid, and step it.
801
802 The use case is when one thread has hit a breakpoint, and then the user
803 has switched to another thread and issued 'step'. We need to step over
804 breakpoint in the thread which hit the breakpoint, but then continue
805 stepping the thread user has selected. */
806 static ptid_t deferred_step_ptid;
807 \f
808 /* Displaced stepping. */
809
810 /* In non-stop debugging mode, we must take special care to manage
811 breakpoints properly; in particular, the traditional strategy for
812 stepping a thread past a breakpoint it has hit is unsuitable.
813 'Displaced stepping' is a tactic for stepping one thread past a
814 breakpoint it has hit while ensuring that other threads running
815 concurrently will hit the breakpoint as they should.
816
817 The traditional way to step a thread T off a breakpoint in a
818 multi-threaded program in all-stop mode is as follows:
819
820 a0) Initially, all threads are stopped, and breakpoints are not
821 inserted.
822 a1) We single-step T, leaving breakpoints uninserted.
823 a2) We insert breakpoints, and resume all threads.
824
825 In non-stop debugging, however, this strategy is unsuitable: we
826 don't want to have to stop all threads in the system in order to
827 continue or step T past a breakpoint. Instead, we use displaced
828 stepping:
829
830 n0) Initially, T is stopped, other threads are running, and
831 breakpoints are inserted.
832 n1) We copy the instruction "under" the breakpoint to a separate
833 location, outside the main code stream, making any adjustments
834 to the instruction, register, and memory state as directed by
835 T's architecture.
836 n2) We single-step T over the instruction at its new location.
837 n3) We adjust the resulting register and memory state as directed
838 by T's architecture. This includes resetting T's PC to point
839 back into the main instruction stream.
840 n4) We resume T.
841
842 This approach depends on the following gdbarch methods:
843
844 - gdbarch_max_insn_length and gdbarch_displaced_step_location
845 indicate where to copy the instruction, and how much space must
846 be reserved there. We use these in step n1.
847
848 - gdbarch_displaced_step_copy_insn copies a instruction to a new
849 address, and makes any necessary adjustments to the instruction,
850 register contents, and memory. We use this in step n1.
851
852 - gdbarch_displaced_step_fixup adjusts registers and memory after
853 we have successfuly single-stepped the instruction, to yield the
854 same effect the instruction would have had if we had executed it
855 at its original address. We use this in step n3.
856
857 - gdbarch_displaced_step_free_closure provides cleanup.
858
859 The gdbarch_displaced_step_copy_insn and
860 gdbarch_displaced_step_fixup functions must be written so that
861 copying an instruction with gdbarch_displaced_step_copy_insn,
862 single-stepping across the copied instruction, and then applying
863 gdbarch_displaced_insn_fixup should have the same effects on the
864 thread's memory and registers as stepping the instruction in place
865 would have. Exactly which responsibilities fall to the copy and
866 which fall to the fixup is up to the author of those functions.
867
868 See the comments in gdbarch.sh for details.
869
870 Note that displaced stepping and software single-step cannot
871 currently be used in combination, although with some care I think
872 they could be made to. Software single-step works by placing
873 breakpoints on all possible subsequent instructions; if the
874 displaced instruction is a PC-relative jump, those breakpoints
875 could fall in very strange places --- on pages that aren't
876 executable, or at addresses that are not proper instruction
877 boundaries. (We do generally let other threads run while we wait
878 to hit the software single-step breakpoint, and they might
879 encounter such a corrupted instruction.) One way to work around
880 this would be to have gdbarch_displaced_step_copy_insn fully
881 simulate the effect of PC-relative instructions (and return NULL)
882 on architectures that use software single-stepping.
883
884 In non-stop mode, we can have independent and simultaneous step
885 requests, so more than one thread may need to simultaneously step
886 over a breakpoint. The current implementation assumes there is
887 only one scratch space per process. In this case, we have to
888 serialize access to the scratch space. If thread A wants to step
889 over a breakpoint, but we are currently waiting for some other
890 thread to complete a displaced step, we leave thread A stopped and
891 place it in the displaced_step_request_queue. Whenever a displaced
892 step finishes, we pick the next thread in the queue and start a new
893 displaced step operation on it. See displaced_step_prepare and
894 displaced_step_fixup for details. */
895
896 /* If this is not null_ptid, this is the thread carrying out a
897 displaced single-step. This thread's state will require fixing up
898 once it has completed its step. */
899 static ptid_t displaced_step_ptid;
900
901 struct displaced_step_request
902 {
903 ptid_t ptid;
904 struct displaced_step_request *next;
905 };
906
907 /* A queue of pending displaced stepping requests. */
908 struct displaced_step_request *displaced_step_request_queue;
909
910 /* The architecture the thread had when we stepped it. */
911 static struct gdbarch *displaced_step_gdbarch;
912
913 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
914 for post-step cleanup. */
915 static struct displaced_step_closure *displaced_step_closure;
916
917 /* The address of the original instruction, and the copy we made. */
918 static CORE_ADDR displaced_step_original, displaced_step_copy;
919
920 /* Saved contents of copy area. */
921 static gdb_byte *displaced_step_saved_copy;
922
923 /* Enum strings for "set|show displaced-stepping". */
924
925 static const char can_use_displaced_stepping_auto[] = "auto";
926 static const char can_use_displaced_stepping_on[] = "on";
927 static const char can_use_displaced_stepping_off[] = "off";
928 static const char *can_use_displaced_stepping_enum[] =
929 {
930 can_use_displaced_stepping_auto,
931 can_use_displaced_stepping_on,
932 can_use_displaced_stepping_off,
933 NULL,
934 };
935
936 /* If ON, and the architecture supports it, GDB will use displaced
937 stepping to step over breakpoints. If OFF, or if the architecture
938 doesn't support it, GDB will instead use the traditional
939 hold-and-step approach. If AUTO (which is the default), GDB will
940 decide which technique to use to step over breakpoints depending on
941 which of all-stop or non-stop mode is active --- displaced stepping
942 in non-stop mode; hold-and-step in all-stop mode. */
943
944 static const char *can_use_displaced_stepping =
945 can_use_displaced_stepping_auto;
946
947 static void
948 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
949 struct cmd_list_element *c,
950 const char *value)
951 {
952 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
953 fprintf_filtered (file, _("\
954 Debugger's willingness to use displaced stepping to step over \
955 breakpoints is %s (currently %s).\n"),
956 value, non_stop ? "on" : "off");
957 else
958 fprintf_filtered (file, _("\
959 Debugger's willingness to use displaced stepping to step over \
960 breakpoints is %s.\n"), value);
961 }
962
963 /* Return non-zero if displaced stepping can/should be used to step
964 over breakpoints. */
965
966 static int
967 use_displaced_stepping (struct gdbarch *gdbarch)
968 {
969 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
970 && non_stop)
971 || can_use_displaced_stepping == can_use_displaced_stepping_on)
972 && gdbarch_displaced_step_copy_insn_p (gdbarch)
973 && !RECORD_IS_USED);
974 }
975
976 /* Clean out any stray displaced stepping state. */
977 static void
978 displaced_step_clear (void)
979 {
980 /* Indicate that there is no cleanup pending. */
981 displaced_step_ptid = null_ptid;
982
983 if (displaced_step_closure)
984 {
985 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
986 displaced_step_closure);
987 displaced_step_closure = NULL;
988 }
989 }
990
991 static void
992 displaced_step_clear_cleanup (void *ignore)
993 {
994 displaced_step_clear ();
995 }
996
997 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
998 void
999 displaced_step_dump_bytes (struct ui_file *file,
1000 const gdb_byte *buf,
1001 size_t len)
1002 {
1003 int i;
1004
1005 for (i = 0; i < len; i++)
1006 fprintf_unfiltered (file, "%02x ", buf[i]);
1007 fputs_unfiltered ("\n", file);
1008 }
1009
1010 /* Prepare to single-step, using displaced stepping.
1011
1012 Note that we cannot use displaced stepping when we have a signal to
1013 deliver. If we have a signal to deliver and an instruction to step
1014 over, then after the step, there will be no indication from the
1015 target whether the thread entered a signal handler or ignored the
1016 signal and stepped over the instruction successfully --- both cases
1017 result in a simple SIGTRAP. In the first case we mustn't do a
1018 fixup, and in the second case we must --- but we can't tell which.
1019 Comments in the code for 'random signals' in handle_inferior_event
1020 explain how we handle this case instead.
1021
1022 Returns 1 if preparing was successful -- this thread is going to be
1023 stepped now; or 0 if displaced stepping this thread got queued. */
1024 static int
1025 displaced_step_prepare (ptid_t ptid)
1026 {
1027 struct cleanup *old_cleanups, *ignore_cleanups;
1028 struct regcache *regcache = get_thread_regcache (ptid);
1029 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1030 CORE_ADDR original, copy;
1031 ULONGEST len;
1032 struct displaced_step_closure *closure;
1033
1034 /* We should never reach this function if the architecture does not
1035 support displaced stepping. */
1036 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1037
1038 /* For the first cut, we're displaced stepping one thread at a
1039 time. */
1040
1041 if (!ptid_equal (displaced_step_ptid, null_ptid))
1042 {
1043 /* Already waiting for a displaced step to finish. Defer this
1044 request and place in queue. */
1045 struct displaced_step_request *req, *new_req;
1046
1047 if (debug_displaced)
1048 fprintf_unfiltered (gdb_stdlog,
1049 "displaced: defering step of %s\n",
1050 target_pid_to_str (ptid));
1051
1052 new_req = xmalloc (sizeof (*new_req));
1053 new_req->ptid = ptid;
1054 new_req->next = NULL;
1055
1056 if (displaced_step_request_queue)
1057 {
1058 for (req = displaced_step_request_queue;
1059 req && req->next;
1060 req = req->next)
1061 ;
1062 req->next = new_req;
1063 }
1064 else
1065 displaced_step_request_queue = new_req;
1066
1067 return 0;
1068 }
1069 else
1070 {
1071 if (debug_displaced)
1072 fprintf_unfiltered (gdb_stdlog,
1073 "displaced: stepping %s now\n",
1074 target_pid_to_str (ptid));
1075 }
1076
1077 displaced_step_clear ();
1078
1079 old_cleanups = save_inferior_ptid ();
1080 inferior_ptid = ptid;
1081
1082 original = regcache_read_pc (regcache);
1083
1084 copy = gdbarch_displaced_step_location (gdbarch);
1085 len = gdbarch_max_insn_length (gdbarch);
1086
1087 /* Save the original contents of the copy area. */
1088 displaced_step_saved_copy = xmalloc (len);
1089 ignore_cleanups = make_cleanup (free_current_contents,
1090 &displaced_step_saved_copy);
1091 read_memory (copy, displaced_step_saved_copy, len);
1092 if (debug_displaced)
1093 {
1094 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1095 paddress (gdbarch, copy));
1096 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
1097 };
1098
1099 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1100 original, copy, regcache);
1101
1102 /* We don't support the fully-simulated case at present. */
1103 gdb_assert (closure);
1104
1105 /* Save the information we need to fix things up if the step
1106 succeeds. */
1107 displaced_step_ptid = ptid;
1108 displaced_step_gdbarch = gdbarch;
1109 displaced_step_closure = closure;
1110 displaced_step_original = original;
1111 displaced_step_copy = copy;
1112
1113 make_cleanup (displaced_step_clear_cleanup, 0);
1114
1115 /* Resume execution at the copy. */
1116 regcache_write_pc (regcache, copy);
1117
1118 discard_cleanups (ignore_cleanups);
1119
1120 do_cleanups (old_cleanups);
1121
1122 if (debug_displaced)
1123 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1124 paddress (gdbarch, copy));
1125
1126 return 1;
1127 }
1128
1129 static void
1130 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1131 {
1132 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1133 inferior_ptid = ptid;
1134 write_memory (memaddr, myaddr, len);
1135 do_cleanups (ptid_cleanup);
1136 }
1137
1138 static void
1139 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1140 {
1141 struct cleanup *old_cleanups;
1142
1143 /* Was this event for the pid we displaced? */
1144 if (ptid_equal (displaced_step_ptid, null_ptid)
1145 || ! ptid_equal (displaced_step_ptid, event_ptid))
1146 return;
1147
1148 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
1149
1150 /* Restore the contents of the copy area. */
1151 {
1152 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
1153 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
1154 displaced_step_saved_copy, len);
1155 if (debug_displaced)
1156 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1157 paddress (displaced_step_gdbarch,
1158 displaced_step_copy));
1159 }
1160
1161 /* Did the instruction complete successfully? */
1162 if (signal == TARGET_SIGNAL_TRAP)
1163 {
1164 /* Fix up the resulting state. */
1165 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
1166 displaced_step_closure,
1167 displaced_step_original,
1168 displaced_step_copy,
1169 get_thread_regcache (displaced_step_ptid));
1170 }
1171 else
1172 {
1173 /* Since the instruction didn't complete, all we can do is
1174 relocate the PC. */
1175 struct regcache *regcache = get_thread_regcache (event_ptid);
1176 CORE_ADDR pc = regcache_read_pc (regcache);
1177 pc = displaced_step_original + (pc - displaced_step_copy);
1178 regcache_write_pc (regcache, pc);
1179 }
1180
1181 do_cleanups (old_cleanups);
1182
1183 displaced_step_ptid = null_ptid;
1184
1185 /* Are there any pending displaced stepping requests? If so, run
1186 one now. */
1187 while (displaced_step_request_queue)
1188 {
1189 struct displaced_step_request *head;
1190 ptid_t ptid;
1191 struct regcache *regcache;
1192 struct gdbarch *gdbarch;
1193 CORE_ADDR actual_pc;
1194 struct address_space *aspace;
1195
1196 head = displaced_step_request_queue;
1197 ptid = head->ptid;
1198 displaced_step_request_queue = head->next;
1199 xfree (head);
1200
1201 context_switch (ptid);
1202
1203 regcache = get_thread_regcache (ptid);
1204 actual_pc = regcache_read_pc (regcache);
1205 aspace = get_regcache_aspace (regcache);
1206
1207 if (breakpoint_here_p (aspace, actual_pc))
1208 {
1209 if (debug_displaced)
1210 fprintf_unfiltered (gdb_stdlog,
1211 "displaced: stepping queued %s now\n",
1212 target_pid_to_str (ptid));
1213
1214 displaced_step_prepare (ptid);
1215
1216 gdbarch = get_regcache_arch (regcache);
1217
1218 if (debug_displaced)
1219 {
1220 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1221 gdb_byte buf[4];
1222
1223 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1224 paddress (gdbarch, actual_pc));
1225 read_memory (actual_pc, buf, sizeof (buf));
1226 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1227 }
1228
1229 if (gdbarch_displaced_step_hw_singlestep
1230 (gdbarch, displaced_step_closure))
1231 target_resume (ptid, 1, TARGET_SIGNAL_0);
1232 else
1233 target_resume (ptid, 0, TARGET_SIGNAL_0);
1234
1235 /* Done, we're stepping a thread. */
1236 break;
1237 }
1238 else
1239 {
1240 int step;
1241 struct thread_info *tp = inferior_thread ();
1242
1243 /* The breakpoint we were sitting under has since been
1244 removed. */
1245 tp->trap_expected = 0;
1246
1247 /* Go back to what we were trying to do. */
1248 step = currently_stepping (tp);
1249
1250 if (debug_displaced)
1251 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1252 target_pid_to_str (tp->ptid), step);
1253
1254 target_resume (ptid, step, TARGET_SIGNAL_0);
1255 tp->stop_signal = TARGET_SIGNAL_0;
1256
1257 /* This request was discarded. See if there's any other
1258 thread waiting for its turn. */
1259 }
1260 }
1261 }
1262
1263 /* Update global variables holding ptids to hold NEW_PTID if they were
1264 holding OLD_PTID. */
1265 static void
1266 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1267 {
1268 struct displaced_step_request *it;
1269
1270 if (ptid_equal (inferior_ptid, old_ptid))
1271 inferior_ptid = new_ptid;
1272
1273 if (ptid_equal (singlestep_ptid, old_ptid))
1274 singlestep_ptid = new_ptid;
1275
1276 if (ptid_equal (displaced_step_ptid, old_ptid))
1277 displaced_step_ptid = new_ptid;
1278
1279 if (ptid_equal (deferred_step_ptid, old_ptid))
1280 deferred_step_ptid = new_ptid;
1281
1282 for (it = displaced_step_request_queue; it; it = it->next)
1283 if (ptid_equal (it->ptid, old_ptid))
1284 it->ptid = new_ptid;
1285 }
1286
1287 \f
1288 /* Resuming. */
1289
1290 /* Things to clean up if we QUIT out of resume (). */
1291 static void
1292 resume_cleanups (void *ignore)
1293 {
1294 normal_stop ();
1295 }
1296
1297 static const char schedlock_off[] = "off";
1298 static const char schedlock_on[] = "on";
1299 static const char schedlock_step[] = "step";
1300 static const char *scheduler_enums[] = {
1301 schedlock_off,
1302 schedlock_on,
1303 schedlock_step,
1304 NULL
1305 };
1306 static const char *scheduler_mode = schedlock_off;
1307 static void
1308 show_scheduler_mode (struct ui_file *file, int from_tty,
1309 struct cmd_list_element *c, const char *value)
1310 {
1311 fprintf_filtered (file, _("\
1312 Mode for locking scheduler during execution is \"%s\".\n"),
1313 value);
1314 }
1315
1316 static void
1317 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1318 {
1319 if (!target_can_lock_scheduler)
1320 {
1321 scheduler_mode = schedlock_off;
1322 error (_("Target '%s' cannot support this command."), target_shortname);
1323 }
1324 }
1325
1326 /* True if execution commands resume all threads of all processes by
1327 default; otherwise, resume only threads of the current inferior
1328 process. */
1329 int sched_multi = 0;
1330
1331 /* Try to setup for software single stepping over the specified location.
1332 Return 1 if target_resume() should use hardware single step.
1333
1334 GDBARCH the current gdbarch.
1335 PC the location to step over. */
1336
1337 static int
1338 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1339 {
1340 int hw_step = 1;
1341
1342 if (gdbarch_software_single_step_p (gdbarch)
1343 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1344 {
1345 hw_step = 0;
1346 /* Do not pull these breakpoints until after a `wait' in
1347 `wait_for_inferior' */
1348 singlestep_breakpoints_inserted_p = 1;
1349 singlestep_ptid = inferior_ptid;
1350 singlestep_pc = pc;
1351 }
1352 return hw_step;
1353 }
1354
1355 /* Resume the inferior, but allow a QUIT. This is useful if the user
1356 wants to interrupt some lengthy single-stepping operation
1357 (for child processes, the SIGINT goes to the inferior, and so
1358 we get a SIGINT random_signal, but for remote debugging and perhaps
1359 other targets, that's not true).
1360
1361 STEP nonzero if we should step (zero to continue instead).
1362 SIG is the signal to give the inferior (zero for none). */
1363 void
1364 resume (int step, enum target_signal sig)
1365 {
1366 int should_resume = 1;
1367 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1368 struct regcache *regcache = get_current_regcache ();
1369 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1370 struct thread_info *tp = inferior_thread ();
1371 CORE_ADDR pc = regcache_read_pc (regcache);
1372 struct address_space *aspace = get_regcache_aspace (regcache);
1373
1374 QUIT;
1375
1376 if (debug_infrun)
1377 fprintf_unfiltered (gdb_stdlog,
1378 "infrun: resume (step=%d, signal=%d), "
1379 "trap_expected=%d\n",
1380 step, sig, tp->trap_expected);
1381
1382 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1383 over an instruction that causes a page fault without triggering
1384 a hardware watchpoint. The kernel properly notices that it shouldn't
1385 stop, because the hardware watchpoint is not triggered, but it forgets
1386 the step request and continues the program normally.
1387 Work around the problem by removing hardware watchpoints if a step is
1388 requested, GDB will check for a hardware watchpoint trigger after the
1389 step anyway. */
1390 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1391 remove_hw_watchpoints ();
1392
1393
1394 /* Normally, by the time we reach `resume', the breakpoints are either
1395 removed or inserted, as appropriate. The exception is if we're sitting
1396 at a permanent breakpoint; we need to step over it, but permanent
1397 breakpoints can't be removed. So we have to test for it here. */
1398 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1399 {
1400 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1401 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1402 else
1403 error (_("\
1404 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1405 how to step past a permanent breakpoint on this architecture. Try using\n\
1406 a command like `return' or `jump' to continue execution."));
1407 }
1408
1409 /* If enabled, step over breakpoints by executing a copy of the
1410 instruction at a different address.
1411
1412 We can't use displaced stepping when we have a signal to deliver;
1413 the comments for displaced_step_prepare explain why. The
1414 comments in the handle_inferior event for dealing with 'random
1415 signals' explain what we do instead. */
1416 if (use_displaced_stepping (gdbarch)
1417 && (tp->trap_expected
1418 || (step && gdbarch_software_single_step_p (gdbarch)))
1419 && sig == TARGET_SIGNAL_0)
1420 {
1421 if (!displaced_step_prepare (inferior_ptid))
1422 {
1423 /* Got placed in displaced stepping queue. Will be resumed
1424 later when all the currently queued displaced stepping
1425 requests finish. The thread is not executing at this point,
1426 and the call to set_executing will be made later. But we
1427 need to call set_running here, since from frontend point of view,
1428 the thread is running. */
1429 set_running (inferior_ptid, 1);
1430 discard_cleanups (old_cleanups);
1431 return;
1432 }
1433
1434 step = gdbarch_displaced_step_hw_singlestep
1435 (gdbarch, displaced_step_closure);
1436 }
1437
1438 /* Do we need to do it the hard way, w/temp breakpoints? */
1439 else if (step)
1440 step = maybe_software_singlestep (gdbarch, pc);
1441
1442 if (should_resume)
1443 {
1444 ptid_t resume_ptid;
1445
1446 /* If STEP is set, it's a request to use hardware stepping
1447 facilities. But in that case, we should never
1448 use singlestep breakpoint. */
1449 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1450
1451 /* Decide the set of threads to ask the target to resume. Start
1452 by assuming everything will be resumed, than narrow the set
1453 by applying increasingly restricting conditions. */
1454
1455 /* By default, resume all threads of all processes. */
1456 resume_ptid = RESUME_ALL;
1457
1458 /* Maybe resume only all threads of the current process. */
1459 if (!sched_multi && target_supports_multi_process ())
1460 {
1461 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1462 }
1463
1464 /* Maybe resume a single thread after all. */
1465 if (singlestep_breakpoints_inserted_p
1466 && stepping_past_singlestep_breakpoint)
1467 {
1468 /* The situation here is as follows. In thread T1 we wanted to
1469 single-step. Lacking hardware single-stepping we've
1470 set breakpoint at the PC of the next instruction -- call it
1471 P. After resuming, we've hit that breakpoint in thread T2.
1472 Now we've removed original breakpoint, inserted breakpoint
1473 at P+1, and try to step to advance T2 past breakpoint.
1474 We need to step only T2, as if T1 is allowed to freely run,
1475 it can run past P, and if other threads are allowed to run,
1476 they can hit breakpoint at P+1, and nested hits of single-step
1477 breakpoints is not something we'd want -- that's complicated
1478 to support, and has no value. */
1479 resume_ptid = inferior_ptid;
1480 }
1481 else if ((step || singlestep_breakpoints_inserted_p)
1482 && tp->trap_expected)
1483 {
1484 /* We're allowing a thread to run past a breakpoint it has
1485 hit, by single-stepping the thread with the breakpoint
1486 removed. In which case, we need to single-step only this
1487 thread, and keep others stopped, as they can miss this
1488 breakpoint if allowed to run.
1489
1490 The current code actually removes all breakpoints when
1491 doing this, not just the one being stepped over, so if we
1492 let other threads run, we can actually miss any
1493 breakpoint, not just the one at PC. */
1494 resume_ptid = inferior_ptid;
1495 }
1496 else if (non_stop)
1497 {
1498 /* With non-stop mode on, threads are always handled
1499 individually. */
1500 resume_ptid = inferior_ptid;
1501 }
1502 else if ((scheduler_mode == schedlock_on)
1503 || (scheduler_mode == schedlock_step
1504 && (step || singlestep_breakpoints_inserted_p)))
1505 {
1506 /* User-settable 'scheduler' mode requires solo thread resume. */
1507 resume_ptid = inferior_ptid;
1508 }
1509
1510 if (gdbarch_cannot_step_breakpoint (gdbarch))
1511 {
1512 /* Most targets can step a breakpoint instruction, thus
1513 executing it normally. But if this one cannot, just
1514 continue and we will hit it anyway. */
1515 if (step && breakpoint_inserted_here_p (aspace, pc))
1516 step = 0;
1517 }
1518
1519 if (debug_displaced
1520 && use_displaced_stepping (gdbarch)
1521 && tp->trap_expected)
1522 {
1523 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1524 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1525 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1526 gdb_byte buf[4];
1527
1528 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1529 paddress (resume_gdbarch, actual_pc));
1530 read_memory (actual_pc, buf, sizeof (buf));
1531 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1532 }
1533
1534 /* Install inferior's terminal modes. */
1535 target_terminal_inferior ();
1536
1537 /* Avoid confusing the next resume, if the next stop/resume
1538 happens to apply to another thread. */
1539 tp->stop_signal = TARGET_SIGNAL_0;
1540
1541 target_resume (resume_ptid, step, sig);
1542 }
1543
1544 discard_cleanups (old_cleanups);
1545 }
1546 \f
1547 /* Proceeding. */
1548
1549 /* Clear out all variables saying what to do when inferior is continued.
1550 First do this, then set the ones you want, then call `proceed'. */
1551
1552 static void
1553 clear_proceed_status_thread (struct thread_info *tp)
1554 {
1555 if (debug_infrun)
1556 fprintf_unfiltered (gdb_stdlog,
1557 "infrun: clear_proceed_status_thread (%s)\n",
1558 target_pid_to_str (tp->ptid));
1559
1560 tp->trap_expected = 0;
1561 tp->step_range_start = 0;
1562 tp->step_range_end = 0;
1563 tp->step_frame_id = null_frame_id;
1564 tp->step_stack_frame_id = null_frame_id;
1565 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1566 tp->stop_requested = 0;
1567
1568 tp->stop_step = 0;
1569
1570 tp->proceed_to_finish = 0;
1571
1572 /* Discard any remaining commands or status from previous stop. */
1573 bpstat_clear (&tp->stop_bpstat);
1574 }
1575
1576 static int
1577 clear_proceed_status_callback (struct thread_info *tp, void *data)
1578 {
1579 if (is_exited (tp->ptid))
1580 return 0;
1581
1582 clear_proceed_status_thread (tp);
1583 return 0;
1584 }
1585
1586 void
1587 clear_proceed_status (void)
1588 {
1589 if (!non_stop)
1590 {
1591 /* In all-stop mode, delete the per-thread status of all
1592 threads, even if inferior_ptid is null_ptid, there may be
1593 threads on the list. E.g., we may be launching a new
1594 process, while selecting the executable. */
1595 iterate_over_threads (clear_proceed_status_callback, NULL);
1596 }
1597
1598 if (!ptid_equal (inferior_ptid, null_ptid))
1599 {
1600 struct inferior *inferior;
1601
1602 if (non_stop)
1603 {
1604 /* If in non-stop mode, only delete the per-thread status of
1605 the current thread. */
1606 clear_proceed_status_thread (inferior_thread ());
1607 }
1608
1609 inferior = current_inferior ();
1610 inferior->stop_soon = NO_STOP_QUIETLY;
1611 }
1612
1613 stop_after_trap = 0;
1614
1615 observer_notify_about_to_proceed ();
1616
1617 if (stop_registers)
1618 {
1619 regcache_xfree (stop_registers);
1620 stop_registers = NULL;
1621 }
1622 }
1623
1624 /* Check the current thread against the thread that reported the most recent
1625 event. If a step-over is required return TRUE and set the current thread
1626 to the old thread. Otherwise return FALSE.
1627
1628 This should be suitable for any targets that support threads. */
1629
1630 static int
1631 prepare_to_proceed (int step)
1632 {
1633 ptid_t wait_ptid;
1634 struct target_waitstatus wait_status;
1635 int schedlock_enabled;
1636
1637 /* With non-stop mode on, threads are always handled individually. */
1638 gdb_assert (! non_stop);
1639
1640 /* Get the last target status returned by target_wait(). */
1641 get_last_target_status (&wait_ptid, &wait_status);
1642
1643 /* Make sure we were stopped at a breakpoint. */
1644 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1645 || wait_status.value.sig != TARGET_SIGNAL_TRAP)
1646 {
1647 return 0;
1648 }
1649
1650 schedlock_enabled = (scheduler_mode == schedlock_on
1651 || (scheduler_mode == schedlock_step
1652 && step));
1653
1654 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1655 if (schedlock_enabled)
1656 return 0;
1657
1658 /* Don't switch over if we're about to resume some other process
1659 other than WAIT_PTID's, and schedule-multiple is off. */
1660 if (!sched_multi
1661 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1662 return 0;
1663
1664 /* Switched over from WAIT_PID. */
1665 if (!ptid_equal (wait_ptid, minus_one_ptid)
1666 && !ptid_equal (inferior_ptid, wait_ptid))
1667 {
1668 struct regcache *regcache = get_thread_regcache (wait_ptid);
1669
1670 if (breakpoint_here_p (get_regcache_aspace (regcache),
1671 regcache_read_pc (regcache)))
1672 {
1673 /* If stepping, remember current thread to switch back to. */
1674 if (step)
1675 deferred_step_ptid = inferior_ptid;
1676
1677 /* Switch back to WAIT_PID thread. */
1678 switch_to_thread (wait_ptid);
1679
1680 /* We return 1 to indicate that there is a breakpoint here,
1681 so we need to step over it before continuing to avoid
1682 hitting it straight away. */
1683 return 1;
1684 }
1685 }
1686
1687 return 0;
1688 }
1689
1690 /* Basic routine for continuing the program in various fashions.
1691
1692 ADDR is the address to resume at, or -1 for resume where stopped.
1693 SIGGNAL is the signal to give it, or 0 for none,
1694 or -1 for act according to how it stopped.
1695 STEP is nonzero if should trap after one instruction.
1696 -1 means return after that and print nothing.
1697 You should probably set various step_... variables
1698 before calling here, if you are stepping.
1699
1700 You should call clear_proceed_status before calling proceed. */
1701
1702 void
1703 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1704 {
1705 struct regcache *regcache;
1706 struct gdbarch *gdbarch;
1707 struct thread_info *tp;
1708 CORE_ADDR pc;
1709 struct address_space *aspace;
1710 int oneproc = 0;
1711
1712 /* If we're stopped at a fork/vfork, follow the branch set by the
1713 "set follow-fork-mode" command; otherwise, we'll just proceed
1714 resuming the current thread. */
1715 if (!follow_fork ())
1716 {
1717 /* The target for some reason decided not to resume. */
1718 normal_stop ();
1719 return;
1720 }
1721
1722 regcache = get_current_regcache ();
1723 gdbarch = get_regcache_arch (regcache);
1724 aspace = get_regcache_aspace (regcache);
1725 pc = regcache_read_pc (regcache);
1726
1727 if (step > 0)
1728 step_start_function = find_pc_function (pc);
1729 if (step < 0)
1730 stop_after_trap = 1;
1731
1732 if (addr == (CORE_ADDR) -1)
1733 {
1734 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1735 && execution_direction != EXEC_REVERSE)
1736 /* There is a breakpoint at the address we will resume at,
1737 step one instruction before inserting breakpoints so that
1738 we do not stop right away (and report a second hit at this
1739 breakpoint).
1740
1741 Note, we don't do this in reverse, because we won't
1742 actually be executing the breakpoint insn anyway.
1743 We'll be (un-)executing the previous instruction. */
1744
1745 oneproc = 1;
1746 else if (gdbarch_single_step_through_delay_p (gdbarch)
1747 && gdbarch_single_step_through_delay (gdbarch,
1748 get_current_frame ()))
1749 /* We stepped onto an instruction that needs to be stepped
1750 again before re-inserting the breakpoint, do so. */
1751 oneproc = 1;
1752 }
1753 else
1754 {
1755 regcache_write_pc (regcache, addr);
1756 }
1757
1758 if (debug_infrun)
1759 fprintf_unfiltered (gdb_stdlog,
1760 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1761 paddress (gdbarch, addr), siggnal, step);
1762
1763 if (non_stop)
1764 /* In non-stop, each thread is handled individually. The context
1765 must already be set to the right thread here. */
1766 ;
1767 else
1768 {
1769 /* In a multi-threaded task we may select another thread and
1770 then continue or step.
1771
1772 But if the old thread was stopped at a breakpoint, it will
1773 immediately cause another breakpoint stop without any
1774 execution (i.e. it will report a breakpoint hit incorrectly).
1775 So we must step over it first.
1776
1777 prepare_to_proceed checks the current thread against the
1778 thread that reported the most recent event. If a step-over
1779 is required it returns TRUE and sets the current thread to
1780 the old thread. */
1781 if (prepare_to_proceed (step))
1782 oneproc = 1;
1783 }
1784
1785 /* prepare_to_proceed may change the current thread. */
1786 tp = inferior_thread ();
1787
1788 if (oneproc)
1789 {
1790 tp->trap_expected = 1;
1791 /* If displaced stepping is enabled, we can step over the
1792 breakpoint without hitting it, so leave all breakpoints
1793 inserted. Otherwise we need to disable all breakpoints, step
1794 one instruction, and then re-add them when that step is
1795 finished. */
1796 if (!use_displaced_stepping (gdbarch))
1797 remove_breakpoints ();
1798 }
1799
1800 /* We can insert breakpoints if we're not trying to step over one,
1801 or if we are stepping over one but we're using displaced stepping
1802 to do so. */
1803 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1804 insert_breakpoints ();
1805
1806 if (!non_stop)
1807 {
1808 /* Pass the last stop signal to the thread we're resuming,
1809 irrespective of whether the current thread is the thread that
1810 got the last event or not. This was historically GDB's
1811 behaviour before keeping a stop_signal per thread. */
1812
1813 struct thread_info *last_thread;
1814 ptid_t last_ptid;
1815 struct target_waitstatus last_status;
1816
1817 get_last_target_status (&last_ptid, &last_status);
1818 if (!ptid_equal (inferior_ptid, last_ptid)
1819 && !ptid_equal (last_ptid, null_ptid)
1820 && !ptid_equal (last_ptid, minus_one_ptid))
1821 {
1822 last_thread = find_thread_ptid (last_ptid);
1823 if (last_thread)
1824 {
1825 tp->stop_signal = last_thread->stop_signal;
1826 last_thread->stop_signal = TARGET_SIGNAL_0;
1827 }
1828 }
1829 }
1830
1831 if (siggnal != TARGET_SIGNAL_DEFAULT)
1832 tp->stop_signal = siggnal;
1833 /* If this signal should not be seen by program,
1834 give it zero. Used for debugging signals. */
1835 else if (!signal_program[tp->stop_signal])
1836 tp->stop_signal = TARGET_SIGNAL_0;
1837
1838 annotate_starting ();
1839
1840 /* Make sure that output from GDB appears before output from the
1841 inferior. */
1842 gdb_flush (gdb_stdout);
1843
1844 /* Refresh prev_pc value just prior to resuming. This used to be
1845 done in stop_stepping, however, setting prev_pc there did not handle
1846 scenarios such as inferior function calls or returning from
1847 a function via the return command. In those cases, the prev_pc
1848 value was not set properly for subsequent commands. The prev_pc value
1849 is used to initialize the starting line number in the ecs. With an
1850 invalid value, the gdb next command ends up stopping at the position
1851 represented by the next line table entry past our start position.
1852 On platforms that generate one line table entry per line, this
1853 is not a problem. However, on the ia64, the compiler generates
1854 extraneous line table entries that do not increase the line number.
1855 When we issue the gdb next command on the ia64 after an inferior call
1856 or a return command, we often end up a few instructions forward, still
1857 within the original line we started.
1858
1859 An attempt was made to have init_execution_control_state () refresh
1860 the prev_pc value before calculating the line number. This approach
1861 did not work because on platforms that use ptrace, the pc register
1862 cannot be read unless the inferior is stopped. At that point, we
1863 are not guaranteed the inferior is stopped and so the regcache_read_pc ()
1864 call can fail. Setting the prev_pc value here ensures the value is
1865 updated correctly when the inferior is stopped. */
1866 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1867
1868 /* Fill in with reasonable starting values. */
1869 init_thread_stepping_state (tp);
1870
1871 /* Reset to normal state. */
1872 init_infwait_state ();
1873
1874 /* Resume inferior. */
1875 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1876
1877 /* Wait for it to stop (if not standalone)
1878 and in any case decode why it stopped, and act accordingly. */
1879 /* Do this only if we are not using the event loop, or if the target
1880 does not support asynchronous execution. */
1881 if (!target_can_async_p ())
1882 {
1883 wait_for_inferior (0);
1884 normal_stop ();
1885 }
1886 }
1887 \f
1888
1889 /* Start remote-debugging of a machine over a serial link. */
1890
1891 void
1892 start_remote (int from_tty)
1893 {
1894 struct inferior *inferior;
1895 init_wait_for_inferior ();
1896
1897 inferior = current_inferior ();
1898 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1899
1900 /* Always go on waiting for the target, regardless of the mode. */
1901 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1902 indicate to wait_for_inferior that a target should timeout if
1903 nothing is returned (instead of just blocking). Because of this,
1904 targets expecting an immediate response need to, internally, set
1905 things up so that the target_wait() is forced to eventually
1906 timeout. */
1907 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1908 differentiate to its caller what the state of the target is after
1909 the initial open has been performed. Here we're assuming that
1910 the target has stopped. It should be possible to eventually have
1911 target_open() return to the caller an indication that the target
1912 is currently running and GDB state should be set to the same as
1913 for an async run. */
1914 wait_for_inferior (0);
1915
1916 /* Now that the inferior has stopped, do any bookkeeping like
1917 loading shared libraries. We want to do this before normal_stop,
1918 so that the displayed frame is up to date. */
1919 post_create_inferior (&current_target, from_tty);
1920
1921 normal_stop ();
1922 }
1923
1924 /* Initialize static vars when a new inferior begins. */
1925
1926 void
1927 init_wait_for_inferior (void)
1928 {
1929 /* These are meaningless until the first time through wait_for_inferior. */
1930
1931 breakpoint_init_inferior (inf_starting);
1932
1933 clear_proceed_status ();
1934
1935 stepping_past_singlestep_breakpoint = 0;
1936 deferred_step_ptid = null_ptid;
1937
1938 target_last_wait_ptid = minus_one_ptid;
1939
1940 previous_inferior_ptid = null_ptid;
1941 init_infwait_state ();
1942
1943 displaced_step_clear ();
1944
1945 /* Discard any skipped inlined frames. */
1946 clear_inline_frame_state (minus_one_ptid);
1947 }
1948
1949 \f
1950 /* This enum encodes possible reasons for doing a target_wait, so that
1951 wfi can call target_wait in one place. (Ultimately the call will be
1952 moved out of the infinite loop entirely.) */
1953
1954 enum infwait_states
1955 {
1956 infwait_normal_state,
1957 infwait_thread_hop_state,
1958 infwait_step_watch_state,
1959 infwait_nonstep_watch_state
1960 };
1961
1962 /* Why did the inferior stop? Used to print the appropriate messages
1963 to the interface from within handle_inferior_event(). */
1964 enum inferior_stop_reason
1965 {
1966 /* Step, next, nexti, stepi finished. */
1967 END_STEPPING_RANGE,
1968 /* Inferior terminated by signal. */
1969 SIGNAL_EXITED,
1970 /* Inferior exited. */
1971 EXITED,
1972 /* Inferior received signal, and user asked to be notified. */
1973 SIGNAL_RECEIVED,
1974 /* Reverse execution -- target ran out of history info. */
1975 NO_HISTORY
1976 };
1977
1978 /* The PTID we'll do a target_wait on.*/
1979 ptid_t waiton_ptid;
1980
1981 /* Current inferior wait state. */
1982 enum infwait_states infwait_state;
1983
1984 /* Data to be passed around while handling an event. This data is
1985 discarded between events. */
1986 struct execution_control_state
1987 {
1988 ptid_t ptid;
1989 /* The thread that got the event, if this was a thread event; NULL
1990 otherwise. */
1991 struct thread_info *event_thread;
1992
1993 struct target_waitstatus ws;
1994 int random_signal;
1995 CORE_ADDR stop_func_start;
1996 CORE_ADDR stop_func_end;
1997 char *stop_func_name;
1998 int new_thread_event;
1999 int wait_some_more;
2000 };
2001
2002 static void init_execution_control_state (struct execution_control_state *ecs);
2003
2004 static void handle_inferior_event (struct execution_control_state *ecs);
2005
2006 static void handle_step_into_function (struct gdbarch *gdbarch,
2007 struct execution_control_state *ecs);
2008 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2009 struct execution_control_state *ecs);
2010 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2011 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2012 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2013 struct symtab_and_line sr_sal,
2014 struct frame_id sr_id);
2015 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2016
2017 static void stop_stepping (struct execution_control_state *ecs);
2018 static void prepare_to_wait (struct execution_control_state *ecs);
2019 static void keep_going (struct execution_control_state *ecs);
2020 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2021 int stop_info);
2022
2023 /* Callback for iterate over threads. If the thread is stopped, but
2024 the user/frontend doesn't know about that yet, go through
2025 normal_stop, as if the thread had just stopped now. ARG points at
2026 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2027 ptid_is_pid(PTID) is true, applies to all threads of the process
2028 pointed at by PTID. Otherwise, apply only to the thread pointed by
2029 PTID. */
2030
2031 static int
2032 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2033 {
2034 ptid_t ptid = * (ptid_t *) arg;
2035
2036 if ((ptid_equal (info->ptid, ptid)
2037 || ptid_equal (minus_one_ptid, ptid)
2038 || (ptid_is_pid (ptid)
2039 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2040 && is_running (info->ptid)
2041 && !is_executing (info->ptid))
2042 {
2043 struct cleanup *old_chain;
2044 struct execution_control_state ecss;
2045 struct execution_control_state *ecs = &ecss;
2046
2047 memset (ecs, 0, sizeof (*ecs));
2048
2049 old_chain = make_cleanup_restore_current_thread ();
2050
2051 switch_to_thread (info->ptid);
2052
2053 /* Go through handle_inferior_event/normal_stop, so we always
2054 have consistent output as if the stop event had been
2055 reported. */
2056 ecs->ptid = info->ptid;
2057 ecs->event_thread = find_thread_ptid (info->ptid);
2058 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2059 ecs->ws.value.sig = TARGET_SIGNAL_0;
2060
2061 handle_inferior_event (ecs);
2062
2063 if (!ecs->wait_some_more)
2064 {
2065 struct thread_info *tp;
2066
2067 normal_stop ();
2068
2069 /* Finish off the continuations. The continations
2070 themselves are responsible for realising the thread
2071 didn't finish what it was supposed to do. */
2072 tp = inferior_thread ();
2073 do_all_intermediate_continuations_thread (tp);
2074 do_all_continuations_thread (tp);
2075 }
2076
2077 do_cleanups (old_chain);
2078 }
2079
2080 return 0;
2081 }
2082
2083 /* This function is attached as a "thread_stop_requested" observer.
2084 Cleanup local state that assumed the PTID was to be resumed, and
2085 report the stop to the frontend. */
2086
2087 static void
2088 infrun_thread_stop_requested (ptid_t ptid)
2089 {
2090 struct displaced_step_request *it, *next, *prev = NULL;
2091
2092 /* PTID was requested to stop. Remove it from the displaced
2093 stepping queue, so we don't try to resume it automatically. */
2094 for (it = displaced_step_request_queue; it; it = next)
2095 {
2096 next = it->next;
2097
2098 if (ptid_equal (it->ptid, ptid)
2099 || ptid_equal (minus_one_ptid, ptid)
2100 || (ptid_is_pid (ptid)
2101 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
2102 {
2103 if (displaced_step_request_queue == it)
2104 displaced_step_request_queue = it->next;
2105 else
2106 prev->next = it->next;
2107
2108 xfree (it);
2109 }
2110 else
2111 prev = it;
2112 }
2113
2114 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2115 }
2116
2117 static void
2118 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2119 {
2120 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2121 nullify_last_target_wait_ptid ();
2122 }
2123
2124 /* Callback for iterate_over_threads. */
2125
2126 static int
2127 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2128 {
2129 if (is_exited (info->ptid))
2130 return 0;
2131
2132 delete_step_resume_breakpoint (info);
2133 return 0;
2134 }
2135
2136 /* In all-stop, delete the step resume breakpoint of any thread that
2137 had one. In non-stop, delete the step resume breakpoint of the
2138 thread that just stopped. */
2139
2140 static void
2141 delete_step_thread_step_resume_breakpoint (void)
2142 {
2143 if (!target_has_execution
2144 || ptid_equal (inferior_ptid, null_ptid))
2145 /* If the inferior has exited, we have already deleted the step
2146 resume breakpoints out of GDB's lists. */
2147 return;
2148
2149 if (non_stop)
2150 {
2151 /* If in non-stop mode, only delete the step-resume or
2152 longjmp-resume breakpoint of the thread that just stopped
2153 stepping. */
2154 struct thread_info *tp = inferior_thread ();
2155 delete_step_resume_breakpoint (tp);
2156 }
2157 else
2158 /* In all-stop mode, delete all step-resume and longjmp-resume
2159 breakpoints of any thread that had them. */
2160 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2161 }
2162
2163 /* A cleanup wrapper. */
2164
2165 static void
2166 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2167 {
2168 delete_step_thread_step_resume_breakpoint ();
2169 }
2170
2171 /* Pretty print the results of target_wait, for debugging purposes. */
2172
2173 static void
2174 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2175 const struct target_waitstatus *ws)
2176 {
2177 char *status_string = target_waitstatus_to_string (ws);
2178 struct ui_file *tmp_stream = mem_fileopen ();
2179 char *text;
2180
2181 /* The text is split over several lines because it was getting too long.
2182 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2183 output as a unit; we want only one timestamp printed if debug_timestamp
2184 is set. */
2185
2186 fprintf_unfiltered (tmp_stream,
2187 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2188 if (PIDGET (waiton_ptid) != -1)
2189 fprintf_unfiltered (tmp_stream,
2190 " [%s]", target_pid_to_str (waiton_ptid));
2191 fprintf_unfiltered (tmp_stream, ", status) =\n");
2192 fprintf_unfiltered (tmp_stream,
2193 "infrun: %d [%s],\n",
2194 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2195 fprintf_unfiltered (tmp_stream,
2196 "infrun: %s\n",
2197 status_string);
2198
2199 text = ui_file_xstrdup (tmp_stream, NULL);
2200
2201 /* This uses %s in part to handle %'s in the text, but also to avoid
2202 a gcc error: the format attribute requires a string literal. */
2203 fprintf_unfiltered (gdb_stdlog, "%s", text);
2204
2205 xfree (status_string);
2206 xfree (text);
2207 ui_file_delete (tmp_stream);
2208 }
2209
2210 /* Wait for control to return from inferior to debugger.
2211
2212 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2213 as if they were SIGTRAP signals. This can be useful during
2214 the startup sequence on some targets such as HP/UX, where
2215 we receive an EXEC event instead of the expected SIGTRAP.
2216
2217 If inferior gets a signal, we may decide to start it up again
2218 instead of returning. That is why there is a loop in this function.
2219 When this function actually returns it means the inferior
2220 should be left stopped and GDB should read more commands. */
2221
2222 void
2223 wait_for_inferior (int treat_exec_as_sigtrap)
2224 {
2225 struct cleanup *old_cleanups;
2226 struct execution_control_state ecss;
2227 struct execution_control_state *ecs;
2228
2229 if (debug_infrun)
2230 fprintf_unfiltered
2231 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2232 treat_exec_as_sigtrap);
2233
2234 old_cleanups =
2235 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2236
2237 ecs = &ecss;
2238 memset (ecs, 0, sizeof (*ecs));
2239
2240 /* We'll update this if & when we switch to a new thread. */
2241 previous_inferior_ptid = inferior_ptid;
2242
2243 while (1)
2244 {
2245 struct cleanup *old_chain;
2246
2247 /* We have to invalidate the registers BEFORE calling target_wait
2248 because they can be loaded from the target while in target_wait.
2249 This makes remote debugging a bit more efficient for those
2250 targets that provide critical registers as part of their normal
2251 status mechanism. */
2252
2253 overlay_cache_invalid = 1;
2254 registers_changed ();
2255
2256 if (deprecated_target_wait_hook)
2257 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2258 else
2259 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2260
2261 if (debug_infrun)
2262 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2263
2264 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2265 {
2266 xfree (ecs->ws.value.execd_pathname);
2267 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2268 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2269 }
2270
2271 /* If an error happens while handling the event, propagate GDB's
2272 knowledge of the executing state to the frontend/user running
2273 state. */
2274 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2275
2276 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2277 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2278 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2279
2280 /* Now figure out what to do with the result of the result. */
2281 handle_inferior_event (ecs);
2282
2283 /* No error, don't finish the state yet. */
2284 discard_cleanups (old_chain);
2285
2286 if (!ecs->wait_some_more)
2287 break;
2288 }
2289
2290 do_cleanups (old_cleanups);
2291 }
2292
2293 /* Asynchronous version of wait_for_inferior. It is called by the
2294 event loop whenever a change of state is detected on the file
2295 descriptor corresponding to the target. It can be called more than
2296 once to complete a single execution command. In such cases we need
2297 to keep the state in a global variable ECSS. If it is the last time
2298 that this function is called for a single execution command, then
2299 report to the user that the inferior has stopped, and do the
2300 necessary cleanups. */
2301
2302 void
2303 fetch_inferior_event (void *client_data)
2304 {
2305 struct execution_control_state ecss;
2306 struct execution_control_state *ecs = &ecss;
2307 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2308 struct cleanup *ts_old_chain;
2309 int was_sync = sync_execution;
2310
2311 memset (ecs, 0, sizeof (*ecs));
2312
2313 /* We'll update this if & when we switch to a new thread. */
2314 previous_inferior_ptid = inferior_ptid;
2315
2316 if (non_stop)
2317 /* In non-stop mode, the user/frontend should not notice a thread
2318 switch due to internal events. Make sure we reverse to the
2319 user selected thread and frame after handling the event and
2320 running any breakpoint commands. */
2321 make_cleanup_restore_current_thread ();
2322
2323 /* We have to invalidate the registers BEFORE calling target_wait
2324 because they can be loaded from the target while in target_wait.
2325 This makes remote debugging a bit more efficient for those
2326 targets that provide critical registers as part of their normal
2327 status mechanism. */
2328
2329 overlay_cache_invalid = 1;
2330 registers_changed ();
2331
2332 if (deprecated_target_wait_hook)
2333 ecs->ptid =
2334 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2335 else
2336 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2337
2338 if (debug_infrun)
2339 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2340
2341 if (non_stop
2342 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2343 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2344 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2345 /* In non-stop mode, each thread is handled individually. Switch
2346 early, so the global state is set correctly for this
2347 thread. */
2348 context_switch (ecs->ptid);
2349
2350 /* If an error happens while handling the event, propagate GDB's
2351 knowledge of the executing state to the frontend/user running
2352 state. */
2353 if (!non_stop)
2354 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2355 else
2356 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2357
2358 /* Now figure out what to do with the result of the result. */
2359 handle_inferior_event (ecs);
2360
2361 if (!ecs->wait_some_more)
2362 {
2363 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2364
2365 delete_step_thread_step_resume_breakpoint ();
2366
2367 /* We may not find an inferior if this was a process exit. */
2368 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2369 normal_stop ();
2370
2371 if (target_has_execution
2372 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2373 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2374 && ecs->event_thread->step_multi
2375 && ecs->event_thread->stop_step)
2376 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2377 else
2378 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2379 }
2380
2381 /* No error, don't finish the thread states yet. */
2382 discard_cleanups (ts_old_chain);
2383
2384 /* Revert thread and frame. */
2385 do_cleanups (old_chain);
2386
2387 /* If the inferior was in sync execution mode, and now isn't,
2388 restore the prompt. */
2389 if (was_sync && !sync_execution)
2390 display_gdb_prompt (0);
2391 }
2392
2393 /* Record the frame and location we're currently stepping through. */
2394 void
2395 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2396 {
2397 struct thread_info *tp = inferior_thread ();
2398
2399 tp->step_frame_id = get_frame_id (frame);
2400 tp->step_stack_frame_id = get_stack_frame_id (frame);
2401
2402 tp->current_symtab = sal.symtab;
2403 tp->current_line = sal.line;
2404 }
2405
2406 /* Prepare an execution control state for looping through a
2407 wait_for_inferior-type loop. */
2408
2409 static void
2410 init_execution_control_state (struct execution_control_state *ecs)
2411 {
2412 ecs->random_signal = 0;
2413 }
2414
2415 /* Clear context switchable stepping state. */
2416
2417 void
2418 init_thread_stepping_state (struct thread_info *tss)
2419 {
2420 tss->stepping_over_breakpoint = 0;
2421 tss->step_after_step_resume_breakpoint = 0;
2422 tss->stepping_through_solib_after_catch = 0;
2423 tss->stepping_through_solib_catchpoints = NULL;
2424 }
2425
2426 /* Return the cached copy of the last pid/waitstatus returned by
2427 target_wait()/deprecated_target_wait_hook(). The data is actually
2428 cached by handle_inferior_event(), which gets called immediately
2429 after target_wait()/deprecated_target_wait_hook(). */
2430
2431 void
2432 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2433 {
2434 *ptidp = target_last_wait_ptid;
2435 *status = target_last_waitstatus;
2436 }
2437
2438 void
2439 nullify_last_target_wait_ptid (void)
2440 {
2441 target_last_wait_ptid = minus_one_ptid;
2442 }
2443
2444 /* Switch thread contexts. */
2445
2446 static void
2447 context_switch (ptid_t ptid)
2448 {
2449 if (debug_infrun)
2450 {
2451 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2452 target_pid_to_str (inferior_ptid));
2453 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2454 target_pid_to_str (ptid));
2455 }
2456
2457 switch_to_thread (ptid);
2458 }
2459
2460 static void
2461 adjust_pc_after_break (struct execution_control_state *ecs)
2462 {
2463 struct regcache *regcache;
2464 struct gdbarch *gdbarch;
2465 struct address_space *aspace;
2466 CORE_ADDR breakpoint_pc;
2467
2468 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2469 we aren't, just return.
2470
2471 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2472 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2473 implemented by software breakpoints should be handled through the normal
2474 breakpoint layer.
2475
2476 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2477 different signals (SIGILL or SIGEMT for instance), but it is less
2478 clear where the PC is pointing afterwards. It may not match
2479 gdbarch_decr_pc_after_break. I don't know any specific target that
2480 generates these signals at breakpoints (the code has been in GDB since at
2481 least 1992) so I can not guess how to handle them here.
2482
2483 In earlier versions of GDB, a target with
2484 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2485 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2486 target with both of these set in GDB history, and it seems unlikely to be
2487 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2488
2489 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2490 return;
2491
2492 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2493 return;
2494
2495 /* In reverse execution, when a breakpoint is hit, the instruction
2496 under it has already been de-executed. The reported PC always
2497 points at the breakpoint address, so adjusting it further would
2498 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2499 architecture:
2500
2501 B1 0x08000000 : INSN1
2502 B2 0x08000001 : INSN2
2503 0x08000002 : INSN3
2504 PC -> 0x08000003 : INSN4
2505
2506 Say you're stopped at 0x08000003 as above. Reverse continuing
2507 from that point should hit B2 as below. Reading the PC when the
2508 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2509 been de-executed already.
2510
2511 B1 0x08000000 : INSN1
2512 B2 PC -> 0x08000001 : INSN2
2513 0x08000002 : INSN3
2514 0x08000003 : INSN4
2515
2516 We can't apply the same logic as for forward execution, because
2517 we would wrongly adjust the PC to 0x08000000, since there's a
2518 breakpoint at PC - 1. We'd then report a hit on B1, although
2519 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2520 behaviour. */
2521 if (execution_direction == EXEC_REVERSE)
2522 return;
2523
2524 /* If this target does not decrement the PC after breakpoints, then
2525 we have nothing to do. */
2526 regcache = get_thread_regcache (ecs->ptid);
2527 gdbarch = get_regcache_arch (regcache);
2528 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2529 return;
2530
2531 aspace = get_regcache_aspace (regcache);
2532
2533 /* Find the location where (if we've hit a breakpoint) the
2534 breakpoint would be. */
2535 breakpoint_pc = regcache_read_pc (regcache)
2536 - gdbarch_decr_pc_after_break (gdbarch);
2537
2538 /* Check whether there actually is a software breakpoint inserted at
2539 that location.
2540
2541 If in non-stop mode, a race condition is possible where we've
2542 removed a breakpoint, but stop events for that breakpoint were
2543 already queued and arrive later. To suppress those spurious
2544 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2545 and retire them after a number of stop events are reported. */
2546 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2547 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2548 {
2549 struct cleanup *old_cleanups = NULL;
2550 if (RECORD_IS_USED)
2551 old_cleanups = record_gdb_operation_disable_set ();
2552
2553 /* When using hardware single-step, a SIGTRAP is reported for both
2554 a completed single-step and a software breakpoint. Need to
2555 differentiate between the two, as the latter needs adjusting
2556 but the former does not.
2557
2558 The SIGTRAP can be due to a completed hardware single-step only if
2559 - we didn't insert software single-step breakpoints
2560 - the thread to be examined is still the current thread
2561 - this thread is currently being stepped
2562
2563 If any of these events did not occur, we must have stopped due
2564 to hitting a software breakpoint, and have to back up to the
2565 breakpoint address.
2566
2567 As a special case, we could have hardware single-stepped a
2568 software breakpoint. In this case (prev_pc == breakpoint_pc),
2569 we also need to back up to the breakpoint address. */
2570
2571 if (singlestep_breakpoints_inserted_p
2572 || !ptid_equal (ecs->ptid, inferior_ptid)
2573 || !currently_stepping (ecs->event_thread)
2574 || ecs->event_thread->prev_pc == breakpoint_pc)
2575 regcache_write_pc (regcache, breakpoint_pc);
2576
2577 if (RECORD_IS_USED)
2578 do_cleanups (old_cleanups);
2579 }
2580 }
2581
2582 void
2583 init_infwait_state (void)
2584 {
2585 waiton_ptid = pid_to_ptid (-1);
2586 infwait_state = infwait_normal_state;
2587 }
2588
2589 void
2590 error_is_running (void)
2591 {
2592 error (_("\
2593 Cannot execute this command while the selected thread is running."));
2594 }
2595
2596 void
2597 ensure_not_running (void)
2598 {
2599 if (is_running (inferior_ptid))
2600 error_is_running ();
2601 }
2602
2603 static int
2604 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2605 {
2606 for (frame = get_prev_frame (frame);
2607 frame != NULL;
2608 frame = get_prev_frame (frame))
2609 {
2610 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2611 return 1;
2612 if (get_frame_type (frame) != INLINE_FRAME)
2613 break;
2614 }
2615
2616 return 0;
2617 }
2618
2619 /* Auxiliary function that handles syscall entry/return events.
2620 It returns 1 if the inferior should keep going (and GDB
2621 should ignore the event), or 0 if the event deserves to be
2622 processed. */
2623
2624 static int
2625 handle_syscall_event (struct execution_control_state *ecs)
2626 {
2627 struct regcache *regcache;
2628 struct gdbarch *gdbarch;
2629 int syscall_number;
2630
2631 if (!ptid_equal (ecs->ptid, inferior_ptid))
2632 context_switch (ecs->ptid);
2633
2634 regcache = get_thread_regcache (ecs->ptid);
2635 gdbarch = get_regcache_arch (regcache);
2636 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2637 stop_pc = regcache_read_pc (regcache);
2638
2639 target_last_waitstatus.value.syscall_number = syscall_number;
2640
2641 if (catch_syscall_enabled () > 0
2642 && catching_syscall_number (syscall_number) > 0)
2643 {
2644 if (debug_infrun)
2645 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2646 syscall_number);
2647
2648 ecs->event_thread->stop_bpstat
2649 = bpstat_stop_status (get_regcache_aspace (regcache),
2650 stop_pc, ecs->ptid);
2651 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2652
2653 if (!ecs->random_signal)
2654 {
2655 /* Catchpoint hit. */
2656 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2657 return 0;
2658 }
2659 }
2660
2661 /* If no catchpoint triggered for this, then keep going. */
2662 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2663 keep_going (ecs);
2664 return 1;
2665 }
2666
2667 /* Given an execution control state that has been freshly filled in
2668 by an event from the inferior, figure out what it means and take
2669 appropriate action. */
2670
2671 static void
2672 handle_inferior_event (struct execution_control_state *ecs)
2673 {
2674 struct frame_info *frame;
2675 struct gdbarch *gdbarch;
2676 int sw_single_step_trap_p = 0;
2677 int stopped_by_watchpoint;
2678 int stepped_after_stopped_by_watchpoint = 0;
2679 struct symtab_and_line stop_pc_sal;
2680 enum stop_kind stop_soon;
2681
2682 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2683 {
2684 /* We had an event in the inferior, but we are not interested in
2685 handling it at this level. The lower layers have already
2686 done what needs to be done, if anything.
2687
2688 One of the possible circumstances for this is when the
2689 inferior produces output for the console. The inferior has
2690 not stopped, and we are ignoring the event. Another possible
2691 circumstance is any event which the lower level knows will be
2692 reported multiple times without an intervening resume. */
2693 if (debug_infrun)
2694 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2695 prepare_to_wait (ecs);
2696 return;
2697 }
2698
2699 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2700 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2701 {
2702 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2703 gdb_assert (inf);
2704 stop_soon = inf->stop_soon;
2705 }
2706 else
2707 stop_soon = NO_STOP_QUIETLY;
2708
2709 /* Cache the last pid/waitstatus. */
2710 target_last_wait_ptid = ecs->ptid;
2711 target_last_waitstatus = ecs->ws;
2712
2713 /* Always clear state belonging to the previous time we stopped. */
2714 stop_stack_dummy = 0;
2715
2716 /* If it's a new process, add it to the thread database */
2717
2718 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2719 && !ptid_equal (ecs->ptid, minus_one_ptid)
2720 && !in_thread_list (ecs->ptid));
2721
2722 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2723 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2724 add_thread (ecs->ptid);
2725
2726 ecs->event_thread = find_thread_ptid (ecs->ptid);
2727
2728 /* Dependent on valid ECS->EVENT_THREAD. */
2729 adjust_pc_after_break (ecs);
2730
2731 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2732 reinit_frame_cache ();
2733
2734 breakpoint_retire_moribund ();
2735
2736 /* Mark the non-executing threads accordingly. In all-stop, all
2737 threads of all processes are stopped when we get any event
2738 reported. In non-stop mode, only the event thread stops. If
2739 we're handling a process exit in non-stop mode, there's nothing
2740 to do, as threads of the dead process are gone, and threads of
2741 any other process were left running. */
2742 if (!non_stop)
2743 set_executing (minus_one_ptid, 0);
2744 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2745 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2746 set_executing (inferior_ptid, 0);
2747
2748 switch (infwait_state)
2749 {
2750 case infwait_thread_hop_state:
2751 if (debug_infrun)
2752 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2753 break;
2754
2755 case infwait_normal_state:
2756 if (debug_infrun)
2757 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2758 break;
2759
2760 case infwait_step_watch_state:
2761 if (debug_infrun)
2762 fprintf_unfiltered (gdb_stdlog,
2763 "infrun: infwait_step_watch_state\n");
2764
2765 stepped_after_stopped_by_watchpoint = 1;
2766 break;
2767
2768 case infwait_nonstep_watch_state:
2769 if (debug_infrun)
2770 fprintf_unfiltered (gdb_stdlog,
2771 "infrun: infwait_nonstep_watch_state\n");
2772 insert_breakpoints ();
2773
2774 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2775 handle things like signals arriving and other things happening
2776 in combination correctly? */
2777 stepped_after_stopped_by_watchpoint = 1;
2778 break;
2779
2780 default:
2781 internal_error (__FILE__, __LINE__, _("bad switch"));
2782 }
2783
2784 infwait_state = infwait_normal_state;
2785 waiton_ptid = pid_to_ptid (-1);
2786
2787 switch (ecs->ws.kind)
2788 {
2789 case TARGET_WAITKIND_LOADED:
2790 if (debug_infrun)
2791 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2792 /* Ignore gracefully during startup of the inferior, as it might
2793 be the shell which has just loaded some objects, otherwise
2794 add the symbols for the newly loaded objects. Also ignore at
2795 the beginning of an attach or remote session; we will query
2796 the full list of libraries once the connection is
2797 established. */
2798 if (stop_soon == NO_STOP_QUIETLY)
2799 {
2800 /* Check for any newly added shared libraries if we're
2801 supposed to be adding them automatically. Switch
2802 terminal for any messages produced by
2803 breakpoint_re_set. */
2804 target_terminal_ours_for_output ();
2805 /* NOTE: cagney/2003-11-25: Make certain that the target
2806 stack's section table is kept up-to-date. Architectures,
2807 (e.g., PPC64), use the section table to perform
2808 operations such as address => section name and hence
2809 require the table to contain all sections (including
2810 those found in shared libraries). */
2811 #ifdef SOLIB_ADD
2812 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2813 #else
2814 solib_add (NULL, 0, &current_target, auto_solib_add);
2815 #endif
2816 target_terminal_inferior ();
2817
2818 /* If requested, stop when the dynamic linker notifies
2819 gdb of events. This allows the user to get control
2820 and place breakpoints in initializer routines for
2821 dynamically loaded objects (among other things). */
2822 if (stop_on_solib_events)
2823 {
2824 /* Make sure we print "Stopped due to solib-event" in
2825 normal_stop. */
2826 stop_print_frame = 1;
2827
2828 stop_stepping (ecs);
2829 return;
2830 }
2831
2832 /* NOTE drow/2007-05-11: This might be a good place to check
2833 for "catch load". */
2834 }
2835
2836 /* If we are skipping through a shell, or through shared library
2837 loading that we aren't interested in, resume the program. If
2838 we're running the program normally, also resume. But stop if
2839 we're attaching or setting up a remote connection. */
2840 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2841 {
2842 /* Loading of shared libraries might have changed breakpoint
2843 addresses. Make sure new breakpoints are inserted. */
2844 if (stop_soon == NO_STOP_QUIETLY
2845 && !breakpoints_always_inserted_mode ())
2846 insert_breakpoints ();
2847 resume (0, TARGET_SIGNAL_0);
2848 prepare_to_wait (ecs);
2849 return;
2850 }
2851
2852 break;
2853
2854 case TARGET_WAITKIND_SPURIOUS:
2855 if (debug_infrun)
2856 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2857 resume (0, TARGET_SIGNAL_0);
2858 prepare_to_wait (ecs);
2859 return;
2860
2861 case TARGET_WAITKIND_EXITED:
2862 if (debug_infrun)
2863 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2864 inferior_ptid = ecs->ptid;
2865 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2866 set_current_program_space (current_inferior ()->pspace);
2867 handle_vfork_child_exec_or_exit (0);
2868 target_terminal_ours (); /* Must do this before mourn anyway */
2869 print_stop_reason (EXITED, ecs->ws.value.integer);
2870
2871 /* Record the exit code in the convenience variable $_exitcode, so
2872 that the user can inspect this again later. */
2873 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2874 (LONGEST) ecs->ws.value.integer);
2875 gdb_flush (gdb_stdout);
2876 target_mourn_inferior ();
2877 singlestep_breakpoints_inserted_p = 0;
2878 stop_print_frame = 0;
2879 stop_stepping (ecs);
2880 return;
2881
2882 case TARGET_WAITKIND_SIGNALLED:
2883 if (debug_infrun)
2884 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2885 inferior_ptid = ecs->ptid;
2886 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2887 set_current_program_space (current_inferior ()->pspace);
2888 handle_vfork_child_exec_or_exit (0);
2889 stop_print_frame = 0;
2890 target_terminal_ours (); /* Must do this before mourn anyway */
2891
2892 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2893 reach here unless the inferior is dead. However, for years
2894 target_kill() was called here, which hints that fatal signals aren't
2895 really fatal on some systems. If that's true, then some changes
2896 may be needed. */
2897 target_mourn_inferior ();
2898
2899 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2900 singlestep_breakpoints_inserted_p = 0;
2901 stop_stepping (ecs);
2902 return;
2903
2904 /* The following are the only cases in which we keep going;
2905 the above cases end in a continue or goto. */
2906 case TARGET_WAITKIND_FORKED:
2907 case TARGET_WAITKIND_VFORKED:
2908 if (debug_infrun)
2909 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2910
2911 if (!ptid_equal (ecs->ptid, inferior_ptid))
2912 {
2913 context_switch (ecs->ptid);
2914 reinit_frame_cache ();
2915 }
2916
2917 /* Immediately detach breakpoints from the child before there's
2918 any chance of letting the user delete breakpoints from the
2919 breakpoint lists. If we don't do this early, it's easy to
2920 leave left over traps in the child, vis: "break foo; catch
2921 fork; c; <fork>; del; c; <child calls foo>". We only follow
2922 the fork on the last `continue', and by that time the
2923 breakpoint at "foo" is long gone from the breakpoint table.
2924 If we vforked, then we don't need to unpatch here, since both
2925 parent and child are sharing the same memory pages; we'll
2926 need to unpatch at follow/detach time instead to be certain
2927 that new breakpoints added between catchpoint hit time and
2928 vfork follow are detached. */
2929 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2930 {
2931 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2932
2933 /* This won't actually modify the breakpoint list, but will
2934 physically remove the breakpoints from the child. */
2935 detach_breakpoints (child_pid);
2936 }
2937
2938 /* In case the event is caught by a catchpoint, remember that
2939 the event is to be followed at the next resume of the thread,
2940 and not immediately. */
2941 ecs->event_thread->pending_follow = ecs->ws;
2942
2943 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2944
2945 ecs->event_thread->stop_bpstat
2946 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
2947 stop_pc, ecs->ptid);
2948
2949 /* Note that we're interested in knowing the bpstat actually
2950 causes a stop, not just if it may explain the signal.
2951 Software watchpoints, for example, always appear in the
2952 bpstat. */
2953 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
2954
2955 /* If no catchpoint triggered for this, then keep going. */
2956 if (ecs->random_signal)
2957 {
2958 ptid_t parent;
2959 ptid_t child;
2960 int should_resume;
2961 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
2962
2963 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2964
2965 should_resume = follow_fork ();
2966
2967 parent = ecs->ptid;
2968 child = ecs->ws.value.related_pid;
2969
2970 /* In non-stop mode, also resume the other branch. */
2971 if (non_stop && !detach_fork)
2972 {
2973 if (follow_child)
2974 switch_to_thread (parent);
2975 else
2976 switch_to_thread (child);
2977
2978 ecs->event_thread = inferior_thread ();
2979 ecs->ptid = inferior_ptid;
2980 keep_going (ecs);
2981 }
2982
2983 if (follow_child)
2984 switch_to_thread (child);
2985 else
2986 switch_to_thread (parent);
2987
2988 ecs->event_thread = inferior_thread ();
2989 ecs->ptid = inferior_ptid;
2990
2991 if (should_resume)
2992 keep_going (ecs);
2993 else
2994 stop_stepping (ecs);
2995 return;
2996 }
2997 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2998 goto process_event_stop_test;
2999
3000 case TARGET_WAITKIND_VFORK_DONE:
3001 /* Done with the shared memory region. Re-insert breakpoints in
3002 the parent, and keep going. */
3003
3004 if (debug_infrun)
3005 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3006
3007 if (!ptid_equal (ecs->ptid, inferior_ptid))
3008 context_switch (ecs->ptid);
3009
3010 current_inferior ()->waiting_for_vfork_done = 0;
3011 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3012 /* This also takes care of reinserting breakpoints in the
3013 previously locked inferior. */
3014 keep_going (ecs);
3015 return;
3016
3017 case TARGET_WAITKIND_EXECD:
3018 if (debug_infrun)
3019 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3020
3021 if (!ptid_equal (ecs->ptid, inferior_ptid))
3022 {
3023 context_switch (ecs->ptid);
3024 reinit_frame_cache ();
3025 }
3026
3027 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3028
3029 /* Do whatever is necessary to the parent branch of the vfork. */
3030 handle_vfork_child_exec_or_exit (1);
3031
3032 /* This causes the eventpoints and symbol table to be reset.
3033 Must do this now, before trying to determine whether to
3034 stop. */
3035 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3036
3037 ecs->event_thread->stop_bpstat
3038 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3039 stop_pc, ecs->ptid);
3040 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3041
3042 /* Note that this may be referenced from inside
3043 bpstat_stop_status above, through inferior_has_execd. */
3044 xfree (ecs->ws.value.execd_pathname);
3045 ecs->ws.value.execd_pathname = NULL;
3046
3047 /* If no catchpoint triggered for this, then keep going. */
3048 if (ecs->random_signal)
3049 {
3050 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3051 keep_going (ecs);
3052 return;
3053 }
3054 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3055 goto process_event_stop_test;
3056
3057 /* Be careful not to try to gather much state about a thread
3058 that's in a syscall. It's frequently a losing proposition. */
3059 case TARGET_WAITKIND_SYSCALL_ENTRY:
3060 if (debug_infrun)
3061 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3062 /* Getting the current syscall number */
3063 if (handle_syscall_event (ecs) != 0)
3064 return;
3065 goto process_event_stop_test;
3066
3067 /* Before examining the threads further, step this thread to
3068 get it entirely out of the syscall. (We get notice of the
3069 event when the thread is just on the verge of exiting a
3070 syscall. Stepping one instruction seems to get it back
3071 into user code.) */
3072 case TARGET_WAITKIND_SYSCALL_RETURN:
3073 if (debug_infrun)
3074 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3075 if (handle_syscall_event (ecs) != 0)
3076 return;
3077 goto process_event_stop_test;
3078
3079 case TARGET_WAITKIND_STOPPED:
3080 if (debug_infrun)
3081 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3082 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3083 break;
3084
3085 case TARGET_WAITKIND_NO_HISTORY:
3086 /* Reverse execution: target ran out of history info. */
3087 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3088 print_stop_reason (NO_HISTORY, 0);
3089 stop_stepping (ecs);
3090 return;
3091 }
3092
3093 if (ecs->new_thread_event)
3094 {
3095 if (non_stop)
3096 /* Non-stop assumes that the target handles adding new threads
3097 to the thread list. */
3098 internal_error (__FILE__, __LINE__, "\
3099 targets should add new threads to the thread list themselves in non-stop mode.");
3100
3101 /* We may want to consider not doing a resume here in order to
3102 give the user a chance to play with the new thread. It might
3103 be good to make that a user-settable option. */
3104
3105 /* At this point, all threads are stopped (happens automatically
3106 in either the OS or the native code). Therefore we need to
3107 continue all threads in order to make progress. */
3108
3109 if (!ptid_equal (ecs->ptid, inferior_ptid))
3110 context_switch (ecs->ptid);
3111 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3112 prepare_to_wait (ecs);
3113 return;
3114 }
3115
3116 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3117 {
3118 /* Do we need to clean up the state of a thread that has
3119 completed a displaced single-step? (Doing so usually affects
3120 the PC, so do it here, before we set stop_pc.) */
3121 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3122
3123 /* If we either finished a single-step or hit a breakpoint, but
3124 the user wanted this thread to be stopped, pretend we got a
3125 SIG0 (generic unsignaled stop). */
3126
3127 if (ecs->event_thread->stop_requested
3128 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3129 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3130 }
3131
3132 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3133
3134 if (debug_infrun)
3135 {
3136 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3137 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3138 struct cleanup *old_chain = save_inferior_ptid ();
3139
3140 inferior_ptid = ecs->ptid;
3141
3142 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3143 paddress (gdbarch, stop_pc));
3144 if (target_stopped_by_watchpoint ())
3145 {
3146 CORE_ADDR addr;
3147 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3148
3149 if (target_stopped_data_address (&current_target, &addr))
3150 fprintf_unfiltered (gdb_stdlog,
3151 "infrun: stopped data address = %s\n",
3152 paddress (gdbarch, addr));
3153 else
3154 fprintf_unfiltered (gdb_stdlog,
3155 "infrun: (no data address available)\n");
3156 }
3157
3158 do_cleanups (old_chain);
3159 }
3160
3161 if (stepping_past_singlestep_breakpoint)
3162 {
3163 gdb_assert (singlestep_breakpoints_inserted_p);
3164 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3165 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3166
3167 stepping_past_singlestep_breakpoint = 0;
3168
3169 /* We've either finished single-stepping past the single-step
3170 breakpoint, or stopped for some other reason. It would be nice if
3171 we could tell, but we can't reliably. */
3172 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3173 {
3174 if (debug_infrun)
3175 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3176 /* Pull the single step breakpoints out of the target. */
3177 remove_single_step_breakpoints ();
3178 singlestep_breakpoints_inserted_p = 0;
3179
3180 ecs->random_signal = 0;
3181 ecs->event_thread->trap_expected = 0;
3182
3183 context_switch (saved_singlestep_ptid);
3184 if (deprecated_context_hook)
3185 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3186
3187 resume (1, TARGET_SIGNAL_0);
3188 prepare_to_wait (ecs);
3189 return;
3190 }
3191 }
3192
3193 if (!ptid_equal (deferred_step_ptid, null_ptid))
3194 {
3195 /* In non-stop mode, there's never a deferred_step_ptid set. */
3196 gdb_assert (!non_stop);
3197
3198 /* If we stopped for some other reason than single-stepping, ignore
3199 the fact that we were supposed to switch back. */
3200 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3201 {
3202 if (debug_infrun)
3203 fprintf_unfiltered (gdb_stdlog,
3204 "infrun: handling deferred step\n");
3205
3206 /* Pull the single step breakpoints out of the target. */
3207 if (singlestep_breakpoints_inserted_p)
3208 {
3209 remove_single_step_breakpoints ();
3210 singlestep_breakpoints_inserted_p = 0;
3211 }
3212
3213 /* Note: We do not call context_switch at this point, as the
3214 context is already set up for stepping the original thread. */
3215 switch_to_thread (deferred_step_ptid);
3216 deferred_step_ptid = null_ptid;
3217 /* Suppress spurious "Switching to ..." message. */
3218 previous_inferior_ptid = inferior_ptid;
3219
3220 resume (1, TARGET_SIGNAL_0);
3221 prepare_to_wait (ecs);
3222 return;
3223 }
3224
3225 deferred_step_ptid = null_ptid;
3226 }
3227
3228 /* See if a thread hit a thread-specific breakpoint that was meant for
3229 another thread. If so, then step that thread past the breakpoint,
3230 and continue it. */
3231
3232 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3233 {
3234 int thread_hop_needed = 0;
3235 struct address_space *aspace =
3236 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3237
3238 /* Check if a regular breakpoint has been hit before checking
3239 for a potential single step breakpoint. Otherwise, GDB will
3240 not see this breakpoint hit when stepping onto breakpoints. */
3241 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3242 {
3243 ecs->random_signal = 0;
3244 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3245 thread_hop_needed = 1;
3246 }
3247 else if (singlestep_breakpoints_inserted_p)
3248 {
3249 /* We have not context switched yet, so this should be true
3250 no matter which thread hit the singlestep breakpoint. */
3251 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3252 if (debug_infrun)
3253 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3254 "trap for %s\n",
3255 target_pid_to_str (ecs->ptid));
3256
3257 ecs->random_signal = 0;
3258 /* The call to in_thread_list is necessary because PTIDs sometimes
3259 change when we go from single-threaded to multi-threaded. If
3260 the singlestep_ptid is still in the list, assume that it is
3261 really different from ecs->ptid. */
3262 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3263 && in_thread_list (singlestep_ptid))
3264 {
3265 /* If the PC of the thread we were trying to single-step
3266 has changed, discard this event (which we were going
3267 to ignore anyway), and pretend we saw that thread
3268 trap. This prevents us continuously moving the
3269 single-step breakpoint forward, one instruction at a
3270 time. If the PC has changed, then the thread we were
3271 trying to single-step has trapped or been signalled,
3272 but the event has not been reported to GDB yet.
3273
3274 There might be some cases where this loses signal
3275 information, if a signal has arrived at exactly the
3276 same time that the PC changed, but this is the best
3277 we can do with the information available. Perhaps we
3278 should arrange to report all events for all threads
3279 when they stop, or to re-poll the remote looking for
3280 this particular thread (i.e. temporarily enable
3281 schedlock). */
3282
3283 CORE_ADDR new_singlestep_pc
3284 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3285
3286 if (new_singlestep_pc != singlestep_pc)
3287 {
3288 enum target_signal stop_signal;
3289
3290 if (debug_infrun)
3291 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3292 " but expected thread advanced also\n");
3293
3294 /* The current context still belongs to
3295 singlestep_ptid. Don't swap here, since that's
3296 the context we want to use. Just fudge our
3297 state and continue. */
3298 stop_signal = ecs->event_thread->stop_signal;
3299 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3300 ecs->ptid = singlestep_ptid;
3301 ecs->event_thread = find_thread_ptid (ecs->ptid);
3302 ecs->event_thread->stop_signal = stop_signal;
3303 stop_pc = new_singlestep_pc;
3304 }
3305 else
3306 {
3307 if (debug_infrun)
3308 fprintf_unfiltered (gdb_stdlog,
3309 "infrun: unexpected thread\n");
3310
3311 thread_hop_needed = 1;
3312 stepping_past_singlestep_breakpoint = 1;
3313 saved_singlestep_ptid = singlestep_ptid;
3314 }
3315 }
3316 }
3317
3318 if (thread_hop_needed)
3319 {
3320 struct regcache *thread_regcache;
3321 int remove_status = 0;
3322
3323 if (debug_infrun)
3324 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3325
3326 /* Switch context before touching inferior memory, the
3327 previous thread may have exited. */
3328 if (!ptid_equal (inferior_ptid, ecs->ptid))
3329 context_switch (ecs->ptid);
3330
3331 /* Saw a breakpoint, but it was hit by the wrong thread.
3332 Just continue. */
3333
3334 if (singlestep_breakpoints_inserted_p)
3335 {
3336 /* Pull the single step breakpoints out of the target. */
3337 remove_single_step_breakpoints ();
3338 singlestep_breakpoints_inserted_p = 0;
3339 }
3340
3341 /* If the arch can displace step, don't remove the
3342 breakpoints. */
3343 thread_regcache = get_thread_regcache (ecs->ptid);
3344 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3345 remove_status = remove_breakpoints ();
3346
3347 /* Did we fail to remove breakpoints? If so, try
3348 to set the PC past the bp. (There's at least
3349 one situation in which we can fail to remove
3350 the bp's: On HP-UX's that use ttrace, we can't
3351 change the address space of a vforking child
3352 process until the child exits (well, okay, not
3353 then either :-) or execs. */
3354 if (remove_status != 0)
3355 error (_("Cannot step over breakpoint hit in wrong thread"));
3356 else
3357 { /* Single step */
3358 if (!non_stop)
3359 {
3360 /* Only need to require the next event from this
3361 thread in all-stop mode. */
3362 waiton_ptid = ecs->ptid;
3363 infwait_state = infwait_thread_hop_state;
3364 }
3365
3366 ecs->event_thread->stepping_over_breakpoint = 1;
3367 keep_going (ecs);
3368 return;
3369 }
3370 }
3371 else if (singlestep_breakpoints_inserted_p)
3372 {
3373 sw_single_step_trap_p = 1;
3374 ecs->random_signal = 0;
3375 }
3376 }
3377 else
3378 ecs->random_signal = 1;
3379
3380 /* See if something interesting happened to the non-current thread. If
3381 so, then switch to that thread. */
3382 if (!ptid_equal (ecs->ptid, inferior_ptid))
3383 {
3384 if (debug_infrun)
3385 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3386
3387 context_switch (ecs->ptid);
3388
3389 if (deprecated_context_hook)
3390 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3391 }
3392
3393 /* At this point, get hold of the now-current thread's frame. */
3394 frame = get_current_frame ();
3395 gdbarch = get_frame_arch (frame);
3396
3397 if (singlestep_breakpoints_inserted_p)
3398 {
3399 /* Pull the single step breakpoints out of the target. */
3400 remove_single_step_breakpoints ();
3401 singlestep_breakpoints_inserted_p = 0;
3402 }
3403
3404 if (stepped_after_stopped_by_watchpoint)
3405 stopped_by_watchpoint = 0;
3406 else
3407 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3408
3409 /* If necessary, step over this watchpoint. We'll be back to display
3410 it in a moment. */
3411 if (stopped_by_watchpoint
3412 && (target_have_steppable_watchpoint
3413 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3414 {
3415 /* At this point, we are stopped at an instruction which has
3416 attempted to write to a piece of memory under control of
3417 a watchpoint. The instruction hasn't actually executed
3418 yet. If we were to evaluate the watchpoint expression
3419 now, we would get the old value, and therefore no change
3420 would seem to have occurred.
3421
3422 In order to make watchpoints work `right', we really need
3423 to complete the memory write, and then evaluate the
3424 watchpoint expression. We do this by single-stepping the
3425 target.
3426
3427 It may not be necessary to disable the watchpoint to stop over
3428 it. For example, the PA can (with some kernel cooperation)
3429 single step over a watchpoint without disabling the watchpoint.
3430
3431 It is far more common to need to disable a watchpoint to step
3432 the inferior over it. If we have non-steppable watchpoints,
3433 we must disable the current watchpoint; it's simplest to
3434 disable all watchpoints and breakpoints. */
3435 int hw_step = 1;
3436
3437 if (!target_have_steppable_watchpoint)
3438 remove_breakpoints ();
3439 /* Single step */
3440 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3441 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3442 waiton_ptid = ecs->ptid;
3443 if (target_have_steppable_watchpoint)
3444 infwait_state = infwait_step_watch_state;
3445 else
3446 infwait_state = infwait_nonstep_watch_state;
3447 prepare_to_wait (ecs);
3448 return;
3449 }
3450
3451 ecs->stop_func_start = 0;
3452 ecs->stop_func_end = 0;
3453 ecs->stop_func_name = 0;
3454 /* Don't care about return value; stop_func_start and stop_func_name
3455 will both be 0 if it doesn't work. */
3456 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3457 &ecs->stop_func_start, &ecs->stop_func_end);
3458 ecs->stop_func_start
3459 += gdbarch_deprecated_function_start_offset (gdbarch);
3460 ecs->event_thread->stepping_over_breakpoint = 0;
3461 bpstat_clear (&ecs->event_thread->stop_bpstat);
3462 ecs->event_thread->stop_step = 0;
3463 stop_print_frame = 1;
3464 ecs->random_signal = 0;
3465 stopped_by_random_signal = 0;
3466
3467 /* Hide inlined functions starting here, unless we just performed stepi or
3468 nexti. After stepi and nexti, always show the innermost frame (not any
3469 inline function call sites). */
3470 if (ecs->event_thread->step_range_end != 1)
3471 skip_inline_frames (ecs->ptid);
3472
3473 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3474 && ecs->event_thread->trap_expected
3475 && gdbarch_single_step_through_delay_p (gdbarch)
3476 && currently_stepping (ecs->event_thread))
3477 {
3478 /* We're trying to step off a breakpoint. Turns out that we're
3479 also on an instruction that needs to be stepped multiple
3480 times before it's been fully executing. E.g., architectures
3481 with a delay slot. It needs to be stepped twice, once for
3482 the instruction and once for the delay slot. */
3483 int step_through_delay
3484 = gdbarch_single_step_through_delay (gdbarch, frame);
3485 if (debug_infrun && step_through_delay)
3486 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3487 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3488 {
3489 /* The user issued a continue when stopped at a breakpoint.
3490 Set up for another trap and get out of here. */
3491 ecs->event_thread->stepping_over_breakpoint = 1;
3492 keep_going (ecs);
3493 return;
3494 }
3495 else if (step_through_delay)
3496 {
3497 /* The user issued a step when stopped at a breakpoint.
3498 Maybe we should stop, maybe we should not - the delay
3499 slot *might* correspond to a line of source. In any
3500 case, don't decide that here, just set
3501 ecs->stepping_over_breakpoint, making sure we
3502 single-step again before breakpoints are re-inserted. */
3503 ecs->event_thread->stepping_over_breakpoint = 1;
3504 }
3505 }
3506
3507 /* Look at the cause of the stop, and decide what to do.
3508 The alternatives are:
3509 1) stop_stepping and return; to really stop and return to the debugger,
3510 2) keep_going and return to start up again
3511 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3512 3) set ecs->random_signal to 1, and the decision between 1 and 2
3513 will be made according to the signal handling tables. */
3514
3515 /* First, distinguish signals caused by the debugger from signals
3516 that have to do with the program's own actions. Note that
3517 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3518 on the operating system version. Here we detect when a SIGILL or
3519 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3520 something similar for SIGSEGV, since a SIGSEGV will be generated
3521 when we're trying to execute a breakpoint instruction on a
3522 non-executable stack. This happens for call dummy breakpoints
3523 for architectures like SPARC that place call dummies on the
3524 stack.
3525
3526 If we're doing a displaced step past a breakpoint, then the
3527 breakpoint is always inserted at the original instruction;
3528 non-standard signals can't be explained by the breakpoint. */
3529 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3530 || (! ecs->event_thread->trap_expected
3531 && breakpoint_inserted_here_p (get_regcache_aspace (get_current_regcache ()),
3532 stop_pc)
3533 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_ILL
3534 || ecs->event_thread->stop_signal == TARGET_SIGNAL_SEGV
3535 || ecs->event_thread->stop_signal == TARGET_SIGNAL_EMT))
3536 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3537 || stop_soon == STOP_QUIETLY_REMOTE)
3538 {
3539 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3540 {
3541 if (debug_infrun)
3542 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3543 stop_print_frame = 0;
3544 stop_stepping (ecs);
3545 return;
3546 }
3547
3548 /* This is originated from start_remote(), start_inferior() and
3549 shared libraries hook functions. */
3550 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3551 {
3552 if (debug_infrun)
3553 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3554 stop_stepping (ecs);
3555 return;
3556 }
3557
3558 /* This originates from attach_command(). We need to overwrite
3559 the stop_signal here, because some kernels don't ignore a
3560 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3561 See more comments in inferior.h. On the other hand, if we
3562 get a non-SIGSTOP, report it to the user - assume the backend
3563 will handle the SIGSTOP if it should show up later.
3564
3565 Also consider that the attach is complete when we see a
3566 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3567 target extended-remote report it instead of a SIGSTOP
3568 (e.g. gdbserver). We already rely on SIGTRAP being our
3569 signal, so this is no exception.
3570
3571 Also consider that the attach is complete when we see a
3572 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3573 the target to stop all threads of the inferior, in case the
3574 low level attach operation doesn't stop them implicitly. If
3575 they weren't stopped implicitly, then the stub will report a
3576 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3577 other than GDB's request. */
3578 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3579 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3580 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3581 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3582 {
3583 stop_stepping (ecs);
3584 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3585 return;
3586 }
3587
3588 /* See if there is a breakpoint at the current PC. */
3589 ecs->event_thread->stop_bpstat
3590 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3591 stop_pc, ecs->ptid);
3592
3593 /* Following in case break condition called a
3594 function. */
3595 stop_print_frame = 1;
3596
3597 /* This is where we handle "moribund" watchpoints. Unlike
3598 software breakpoints traps, hardware watchpoint traps are
3599 always distinguishable from random traps. If no high-level
3600 watchpoint is associated with the reported stop data address
3601 anymore, then the bpstat does not explain the signal ---
3602 simply make sure to ignore it if `stopped_by_watchpoint' is
3603 set. */
3604
3605 if (debug_infrun
3606 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3607 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3608 && stopped_by_watchpoint)
3609 fprintf_unfiltered (gdb_stdlog, "\
3610 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3611
3612 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3613 at one stage in the past included checks for an inferior
3614 function call's call dummy's return breakpoint. The original
3615 comment, that went with the test, read:
3616
3617 ``End of a stack dummy. Some systems (e.g. Sony news) give
3618 another signal besides SIGTRAP, so check here as well as
3619 above.''
3620
3621 If someone ever tries to get call dummys on a
3622 non-executable stack to work (where the target would stop
3623 with something like a SIGSEGV), then those tests might need
3624 to be re-instated. Given, however, that the tests were only
3625 enabled when momentary breakpoints were not being used, I
3626 suspect that it won't be the case.
3627
3628 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3629 be necessary for call dummies on a non-executable stack on
3630 SPARC. */
3631
3632 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3633 ecs->random_signal
3634 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3635 || stopped_by_watchpoint
3636 || ecs->event_thread->trap_expected
3637 || (ecs->event_thread->step_range_end
3638 && ecs->event_thread->step_resume_breakpoint == NULL));
3639 else
3640 {
3641 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3642 if (!ecs->random_signal)
3643 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3644 }
3645 }
3646
3647 /* When we reach this point, we've pretty much decided
3648 that the reason for stopping must've been a random
3649 (unexpected) signal. */
3650
3651 else
3652 ecs->random_signal = 1;
3653
3654 process_event_stop_test:
3655
3656 /* Re-fetch current thread's frame in case we did a
3657 "goto process_event_stop_test" above. */
3658 frame = get_current_frame ();
3659 gdbarch = get_frame_arch (frame);
3660
3661 /* For the program's own signals, act according to
3662 the signal handling tables. */
3663
3664 if (ecs->random_signal)
3665 {
3666 /* Signal not for debugging purposes. */
3667 int printed = 0;
3668
3669 if (debug_infrun)
3670 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3671 ecs->event_thread->stop_signal);
3672
3673 stopped_by_random_signal = 1;
3674
3675 if (signal_print[ecs->event_thread->stop_signal])
3676 {
3677 printed = 1;
3678 target_terminal_ours_for_output ();
3679 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3680 }
3681 /* Always stop on signals if we're either just gaining control
3682 of the program, or the user explicitly requested this thread
3683 to remain stopped. */
3684 if (stop_soon != NO_STOP_QUIETLY
3685 || ecs->event_thread->stop_requested
3686 || signal_stop_state (ecs->event_thread->stop_signal))
3687 {
3688 stop_stepping (ecs);
3689 return;
3690 }
3691 /* If not going to stop, give terminal back
3692 if we took it away. */
3693 else if (printed)
3694 target_terminal_inferior ();
3695
3696 /* Clear the signal if it should not be passed. */
3697 if (signal_program[ecs->event_thread->stop_signal] == 0)
3698 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3699
3700 if (ecs->event_thread->prev_pc == stop_pc
3701 && ecs->event_thread->trap_expected
3702 && ecs->event_thread->step_resume_breakpoint == NULL)
3703 {
3704 /* We were just starting a new sequence, attempting to
3705 single-step off of a breakpoint and expecting a SIGTRAP.
3706 Instead this signal arrives. This signal will take us out
3707 of the stepping range so GDB needs to remember to, when
3708 the signal handler returns, resume stepping off that
3709 breakpoint. */
3710 /* To simplify things, "continue" is forced to use the same
3711 code paths as single-step - set a breakpoint at the
3712 signal return address and then, once hit, step off that
3713 breakpoint. */
3714 if (debug_infrun)
3715 fprintf_unfiltered (gdb_stdlog,
3716 "infrun: signal arrived while stepping over "
3717 "breakpoint\n");
3718
3719 insert_step_resume_breakpoint_at_frame (frame);
3720 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3721 keep_going (ecs);
3722 return;
3723 }
3724
3725 if (ecs->event_thread->step_range_end != 0
3726 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3727 && (ecs->event_thread->step_range_start <= stop_pc
3728 && stop_pc < ecs->event_thread->step_range_end)
3729 && frame_id_eq (get_stack_frame_id (frame),
3730 ecs->event_thread->step_stack_frame_id)
3731 && ecs->event_thread->step_resume_breakpoint == NULL)
3732 {
3733 /* The inferior is about to take a signal that will take it
3734 out of the single step range. Set a breakpoint at the
3735 current PC (which is presumably where the signal handler
3736 will eventually return) and then allow the inferior to
3737 run free.
3738
3739 Note that this is only needed for a signal delivered
3740 while in the single-step range. Nested signals aren't a
3741 problem as they eventually all return. */
3742 if (debug_infrun)
3743 fprintf_unfiltered (gdb_stdlog,
3744 "infrun: signal may take us out of "
3745 "single-step range\n");
3746
3747 insert_step_resume_breakpoint_at_frame (frame);
3748 keep_going (ecs);
3749 return;
3750 }
3751
3752 /* Note: step_resume_breakpoint may be non-NULL. This occures
3753 when either there's a nested signal, or when there's a
3754 pending signal enabled just as the signal handler returns
3755 (leaving the inferior at the step-resume-breakpoint without
3756 actually executing it). Either way continue until the
3757 breakpoint is really hit. */
3758 keep_going (ecs);
3759 return;
3760 }
3761
3762 /* Handle cases caused by hitting a breakpoint. */
3763 {
3764 CORE_ADDR jmp_buf_pc;
3765 struct bpstat_what what;
3766
3767 what = bpstat_what (ecs->event_thread->stop_bpstat);
3768
3769 if (what.call_dummy)
3770 {
3771 stop_stack_dummy = 1;
3772 }
3773
3774 switch (what.main_action)
3775 {
3776 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3777 /* If we hit the breakpoint at longjmp while stepping, we
3778 install a momentary breakpoint at the target of the
3779 jmp_buf. */
3780
3781 if (debug_infrun)
3782 fprintf_unfiltered (gdb_stdlog,
3783 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3784
3785 ecs->event_thread->stepping_over_breakpoint = 1;
3786
3787 if (!gdbarch_get_longjmp_target_p (gdbarch)
3788 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3789 {
3790 if (debug_infrun)
3791 fprintf_unfiltered (gdb_stdlog, "\
3792 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3793 keep_going (ecs);
3794 return;
3795 }
3796
3797 /* We're going to replace the current step-resume breakpoint
3798 with a longjmp-resume breakpoint. */
3799 delete_step_resume_breakpoint (ecs->event_thread);
3800
3801 /* Insert a breakpoint at resume address. */
3802 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3803
3804 keep_going (ecs);
3805 return;
3806
3807 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3808 if (debug_infrun)
3809 fprintf_unfiltered (gdb_stdlog,
3810 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3811
3812 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3813 delete_step_resume_breakpoint (ecs->event_thread);
3814
3815 ecs->event_thread->stop_step = 1;
3816 print_stop_reason (END_STEPPING_RANGE, 0);
3817 stop_stepping (ecs);
3818 return;
3819
3820 case BPSTAT_WHAT_SINGLE:
3821 if (debug_infrun)
3822 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3823 ecs->event_thread->stepping_over_breakpoint = 1;
3824 /* Still need to check other stuff, at least the case
3825 where we are stepping and step out of the right range. */
3826 break;
3827
3828 case BPSTAT_WHAT_STOP_NOISY:
3829 if (debug_infrun)
3830 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3831 stop_print_frame = 1;
3832
3833 /* We are about to nuke the step_resume_breakpointt via the
3834 cleanup chain, so no need to worry about it here. */
3835
3836 stop_stepping (ecs);
3837 return;
3838
3839 case BPSTAT_WHAT_STOP_SILENT:
3840 if (debug_infrun)
3841 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3842 stop_print_frame = 0;
3843
3844 /* We are about to nuke the step_resume_breakpoin via the
3845 cleanup chain, so no need to worry about it here. */
3846
3847 stop_stepping (ecs);
3848 return;
3849
3850 case BPSTAT_WHAT_STEP_RESUME:
3851 if (debug_infrun)
3852 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3853
3854 delete_step_resume_breakpoint (ecs->event_thread);
3855 if (ecs->event_thread->step_after_step_resume_breakpoint)
3856 {
3857 /* Back when the step-resume breakpoint was inserted, we
3858 were trying to single-step off a breakpoint. Go back
3859 to doing that. */
3860 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3861 ecs->event_thread->stepping_over_breakpoint = 1;
3862 keep_going (ecs);
3863 return;
3864 }
3865 if (stop_pc == ecs->stop_func_start
3866 && execution_direction == EXEC_REVERSE)
3867 {
3868 /* We are stepping over a function call in reverse, and
3869 just hit the step-resume breakpoint at the start
3870 address of the function. Go back to single-stepping,
3871 which should take us back to the function call. */
3872 ecs->event_thread->stepping_over_breakpoint = 1;
3873 keep_going (ecs);
3874 return;
3875 }
3876 break;
3877
3878 case BPSTAT_WHAT_CHECK_SHLIBS:
3879 {
3880 if (debug_infrun)
3881 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3882
3883 /* Check for any newly added shared libraries if we're
3884 supposed to be adding them automatically. Switch
3885 terminal for any messages produced by
3886 breakpoint_re_set. */
3887 target_terminal_ours_for_output ();
3888 /* NOTE: cagney/2003-11-25: Make certain that the target
3889 stack's section table is kept up-to-date. Architectures,
3890 (e.g., PPC64), use the section table to perform
3891 operations such as address => section name and hence
3892 require the table to contain all sections (including
3893 those found in shared libraries). */
3894 #ifdef SOLIB_ADD
3895 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3896 #else
3897 solib_add (NULL, 0, &current_target, auto_solib_add);
3898 #endif
3899 target_terminal_inferior ();
3900
3901 /* If requested, stop when the dynamic linker notifies
3902 gdb of events. This allows the user to get control
3903 and place breakpoints in initializer routines for
3904 dynamically loaded objects (among other things). */
3905 if (stop_on_solib_events || stop_stack_dummy)
3906 {
3907 stop_stepping (ecs);
3908 return;
3909 }
3910 else
3911 {
3912 /* We want to step over this breakpoint, then keep going. */
3913 ecs->event_thread->stepping_over_breakpoint = 1;
3914 break;
3915 }
3916 }
3917 break;
3918
3919 case BPSTAT_WHAT_CHECK_JIT:
3920 if (debug_infrun)
3921 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
3922
3923 /* Switch terminal for any messages produced by breakpoint_re_set. */
3924 target_terminal_ours_for_output ();
3925
3926 jit_event_handler (gdbarch);
3927
3928 target_terminal_inferior ();
3929
3930 /* We want to step over this breakpoint, then keep going. */
3931 ecs->event_thread->stepping_over_breakpoint = 1;
3932
3933 break;
3934
3935 case BPSTAT_WHAT_LAST:
3936 /* Not a real code, but listed here to shut up gcc -Wall. */
3937
3938 case BPSTAT_WHAT_KEEP_CHECKING:
3939 break;
3940 }
3941 }
3942
3943 /* We come here if we hit a breakpoint but should not
3944 stop for it. Possibly we also were stepping
3945 and should stop for that. So fall through and
3946 test for stepping. But, if not stepping,
3947 do not stop. */
3948
3949 /* In all-stop mode, if we're currently stepping but have stopped in
3950 some other thread, we need to switch back to the stepped thread. */
3951 if (!non_stop)
3952 {
3953 struct thread_info *tp;
3954 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3955 ecs->event_thread);
3956 if (tp)
3957 {
3958 /* However, if the current thread is blocked on some internal
3959 breakpoint, and we simply need to step over that breakpoint
3960 to get it going again, do that first. */
3961 if ((ecs->event_thread->trap_expected
3962 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3963 || ecs->event_thread->stepping_over_breakpoint)
3964 {
3965 keep_going (ecs);
3966 return;
3967 }
3968
3969 /* If the stepping thread exited, then don't try to switch
3970 back and resume it, which could fail in several different
3971 ways depending on the target. Instead, just keep going.
3972
3973 We can find a stepping dead thread in the thread list in
3974 two cases:
3975
3976 - The target supports thread exit events, and when the
3977 target tries to delete the thread from the thread list,
3978 inferior_ptid pointed at the exiting thread. In such
3979 case, calling delete_thread does not really remove the
3980 thread from the list; instead, the thread is left listed,
3981 with 'exited' state.
3982
3983 - The target's debug interface does not support thread
3984 exit events, and so we have no idea whatsoever if the
3985 previously stepping thread is still alive. For that
3986 reason, we need to synchronously query the target
3987 now. */
3988 if (is_exited (tp->ptid)
3989 || !target_thread_alive (tp->ptid))
3990 {
3991 if (debug_infrun)
3992 fprintf_unfiltered (gdb_stdlog, "\
3993 infrun: not switching back to stepped thread, it has vanished\n");
3994
3995 delete_thread (tp->ptid);
3996 keep_going (ecs);
3997 return;
3998 }
3999
4000 /* Otherwise, we no longer expect a trap in the current thread.
4001 Clear the trap_expected flag before switching back -- this is
4002 what keep_going would do as well, if we called it. */
4003 ecs->event_thread->trap_expected = 0;
4004
4005 if (debug_infrun)
4006 fprintf_unfiltered (gdb_stdlog,
4007 "infrun: switching back to stepped thread\n");
4008
4009 ecs->event_thread = tp;
4010 ecs->ptid = tp->ptid;
4011 context_switch (ecs->ptid);
4012 keep_going (ecs);
4013 return;
4014 }
4015 }
4016
4017 /* Are we stepping to get the inferior out of the dynamic linker's
4018 hook (and possibly the dld itself) after catching a shlib
4019 event? */
4020 if (ecs->event_thread->stepping_through_solib_after_catch)
4021 {
4022 #if defined(SOLIB_ADD)
4023 /* Have we reached our destination? If not, keep going. */
4024 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4025 {
4026 if (debug_infrun)
4027 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4028 ecs->event_thread->stepping_over_breakpoint = 1;
4029 keep_going (ecs);
4030 return;
4031 }
4032 #endif
4033 if (debug_infrun)
4034 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4035 /* Else, stop and report the catchpoint(s) whose triggering
4036 caused us to begin stepping. */
4037 ecs->event_thread->stepping_through_solib_after_catch = 0;
4038 bpstat_clear (&ecs->event_thread->stop_bpstat);
4039 ecs->event_thread->stop_bpstat
4040 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4041 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4042 stop_print_frame = 1;
4043 stop_stepping (ecs);
4044 return;
4045 }
4046
4047 if (ecs->event_thread->step_resume_breakpoint)
4048 {
4049 if (debug_infrun)
4050 fprintf_unfiltered (gdb_stdlog,
4051 "infrun: step-resume breakpoint is inserted\n");
4052
4053 /* Having a step-resume breakpoint overrides anything
4054 else having to do with stepping commands until
4055 that breakpoint is reached. */
4056 keep_going (ecs);
4057 return;
4058 }
4059
4060 if (ecs->event_thread->step_range_end == 0)
4061 {
4062 if (debug_infrun)
4063 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4064 /* Likewise if we aren't even stepping. */
4065 keep_going (ecs);
4066 return;
4067 }
4068
4069 /* Re-fetch current thread's frame in case the code above caused
4070 the frame cache to be re-initialized, making our FRAME variable
4071 a dangling pointer. */
4072 frame = get_current_frame ();
4073
4074 /* If stepping through a line, keep going if still within it.
4075
4076 Note that step_range_end is the address of the first instruction
4077 beyond the step range, and NOT the address of the last instruction
4078 within it!
4079
4080 Note also that during reverse execution, we may be stepping
4081 through a function epilogue and therefore must detect when
4082 the current-frame changes in the middle of a line. */
4083
4084 if (stop_pc >= ecs->event_thread->step_range_start
4085 && stop_pc < ecs->event_thread->step_range_end
4086 && (execution_direction != EXEC_REVERSE
4087 || frame_id_eq (get_frame_id (frame),
4088 ecs->event_thread->step_frame_id)))
4089 {
4090 if (debug_infrun)
4091 fprintf_unfiltered
4092 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4093 paddress (gdbarch, ecs->event_thread->step_range_start),
4094 paddress (gdbarch, ecs->event_thread->step_range_end));
4095
4096 /* When stepping backward, stop at beginning of line range
4097 (unless it's the function entry point, in which case
4098 keep going back to the call point). */
4099 if (stop_pc == ecs->event_thread->step_range_start
4100 && stop_pc != ecs->stop_func_start
4101 && execution_direction == EXEC_REVERSE)
4102 {
4103 ecs->event_thread->stop_step = 1;
4104 print_stop_reason (END_STEPPING_RANGE, 0);
4105 stop_stepping (ecs);
4106 }
4107 else
4108 keep_going (ecs);
4109
4110 return;
4111 }
4112
4113 /* We stepped out of the stepping range. */
4114
4115 /* If we are stepping at the source level and entered the runtime
4116 loader dynamic symbol resolution code...
4117
4118 EXEC_FORWARD: we keep on single stepping until we exit the run
4119 time loader code and reach the callee's address.
4120
4121 EXEC_REVERSE: we've already executed the callee (backward), and
4122 the runtime loader code is handled just like any other
4123 undebuggable function call. Now we need only keep stepping
4124 backward through the trampoline code, and that's handled further
4125 down, so there is nothing for us to do here. */
4126
4127 if (execution_direction != EXEC_REVERSE
4128 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4129 && in_solib_dynsym_resolve_code (stop_pc))
4130 {
4131 CORE_ADDR pc_after_resolver =
4132 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4133
4134 if (debug_infrun)
4135 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4136
4137 if (pc_after_resolver)
4138 {
4139 /* Set up a step-resume breakpoint at the address
4140 indicated by SKIP_SOLIB_RESOLVER. */
4141 struct symtab_and_line sr_sal;
4142 init_sal (&sr_sal);
4143 sr_sal.pc = pc_after_resolver;
4144 sr_sal.pspace = get_frame_program_space (frame);
4145
4146 insert_step_resume_breakpoint_at_sal (gdbarch,
4147 sr_sal, null_frame_id);
4148 }
4149
4150 keep_going (ecs);
4151 return;
4152 }
4153
4154 if (ecs->event_thread->step_range_end != 1
4155 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4156 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4157 && get_frame_type (frame) == SIGTRAMP_FRAME)
4158 {
4159 if (debug_infrun)
4160 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4161 /* The inferior, while doing a "step" or "next", has ended up in
4162 a signal trampoline (either by a signal being delivered or by
4163 the signal handler returning). Just single-step until the
4164 inferior leaves the trampoline (either by calling the handler
4165 or returning). */
4166 keep_going (ecs);
4167 return;
4168 }
4169
4170 /* Check for subroutine calls. The check for the current frame
4171 equalling the step ID is not necessary - the check of the
4172 previous frame's ID is sufficient - but it is a common case and
4173 cheaper than checking the previous frame's ID.
4174
4175 NOTE: frame_id_eq will never report two invalid frame IDs as
4176 being equal, so to get into this block, both the current and
4177 previous frame must have valid frame IDs. */
4178 /* The outer_frame_id check is a heuristic to detect stepping
4179 through startup code. If we step over an instruction which
4180 sets the stack pointer from an invalid value to a valid value,
4181 we may detect that as a subroutine call from the mythical
4182 "outermost" function. This could be fixed by marking
4183 outermost frames as !stack_p,code_p,special_p. Then the
4184 initial outermost frame, before sp was valid, would
4185 have code_addr == &_start. See the comment in frame_id_eq
4186 for more. */
4187 if (!frame_id_eq (get_stack_frame_id (frame),
4188 ecs->event_thread->step_stack_frame_id)
4189 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4190 ecs->event_thread->step_stack_frame_id)
4191 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4192 outer_frame_id)
4193 || step_start_function != find_pc_function (stop_pc))))
4194 {
4195 CORE_ADDR real_stop_pc;
4196
4197 if (debug_infrun)
4198 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4199
4200 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4201 || ((ecs->event_thread->step_range_end == 1)
4202 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4203 ecs->stop_func_start)))
4204 {
4205 /* I presume that step_over_calls is only 0 when we're
4206 supposed to be stepping at the assembly language level
4207 ("stepi"). Just stop. */
4208 /* Also, maybe we just did a "nexti" inside a prolog, so we
4209 thought it was a subroutine call but it was not. Stop as
4210 well. FENN */
4211 /* And this works the same backward as frontward. MVS */
4212 ecs->event_thread->stop_step = 1;
4213 print_stop_reason (END_STEPPING_RANGE, 0);
4214 stop_stepping (ecs);
4215 return;
4216 }
4217
4218 /* Reverse stepping through solib trampolines. */
4219
4220 if (execution_direction == EXEC_REVERSE
4221 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4222 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4223 || (ecs->stop_func_start == 0
4224 && in_solib_dynsym_resolve_code (stop_pc))))
4225 {
4226 /* Any solib trampoline code can be handled in reverse
4227 by simply continuing to single-step. We have already
4228 executed the solib function (backwards), and a few
4229 steps will take us back through the trampoline to the
4230 caller. */
4231 keep_going (ecs);
4232 return;
4233 }
4234
4235 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4236 {
4237 /* We're doing a "next".
4238
4239 Normal (forward) execution: set a breakpoint at the
4240 callee's return address (the address at which the caller
4241 will resume).
4242
4243 Reverse (backward) execution. set the step-resume
4244 breakpoint at the start of the function that we just
4245 stepped into (backwards), and continue to there. When we
4246 get there, we'll need to single-step back to the caller. */
4247
4248 if (execution_direction == EXEC_REVERSE)
4249 {
4250 struct symtab_and_line sr_sal;
4251
4252 /* Normal function call return (static or dynamic). */
4253 init_sal (&sr_sal);
4254 sr_sal.pc = ecs->stop_func_start;
4255 sr_sal.pspace = get_frame_program_space (frame);
4256 insert_step_resume_breakpoint_at_sal (gdbarch,
4257 sr_sal, null_frame_id);
4258 }
4259 else
4260 insert_step_resume_breakpoint_at_caller (frame);
4261
4262 keep_going (ecs);
4263 return;
4264 }
4265
4266 /* If we are in a function call trampoline (a stub between the
4267 calling routine and the real function), locate the real
4268 function. That's what tells us (a) whether we want to step
4269 into it at all, and (b) what prologue we want to run to the
4270 end of, if we do step into it. */
4271 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4272 if (real_stop_pc == 0)
4273 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4274 if (real_stop_pc != 0)
4275 ecs->stop_func_start = real_stop_pc;
4276
4277 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4278 {
4279 struct symtab_and_line sr_sal;
4280 init_sal (&sr_sal);
4281 sr_sal.pc = ecs->stop_func_start;
4282 sr_sal.pspace = get_frame_program_space (frame);
4283
4284 insert_step_resume_breakpoint_at_sal (gdbarch,
4285 sr_sal, null_frame_id);
4286 keep_going (ecs);
4287 return;
4288 }
4289
4290 /* If we have line number information for the function we are
4291 thinking of stepping into, step into it.
4292
4293 If there are several symtabs at that PC (e.g. with include
4294 files), just want to know whether *any* of them have line
4295 numbers. find_pc_line handles this. */
4296 {
4297 struct symtab_and_line tmp_sal;
4298
4299 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4300 tmp_sal.pspace = get_frame_program_space (frame);
4301 if (tmp_sal.line != 0)
4302 {
4303 if (execution_direction == EXEC_REVERSE)
4304 handle_step_into_function_backward (gdbarch, ecs);
4305 else
4306 handle_step_into_function (gdbarch, ecs);
4307 return;
4308 }
4309 }
4310
4311 /* If we have no line number and the step-stop-if-no-debug is
4312 set, we stop the step so that the user has a chance to switch
4313 in assembly mode. */
4314 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4315 && step_stop_if_no_debug)
4316 {
4317 ecs->event_thread->stop_step = 1;
4318 print_stop_reason (END_STEPPING_RANGE, 0);
4319 stop_stepping (ecs);
4320 return;
4321 }
4322
4323 if (execution_direction == EXEC_REVERSE)
4324 {
4325 /* Set a breakpoint at callee's start address.
4326 From there we can step once and be back in the caller. */
4327 struct symtab_and_line sr_sal;
4328 init_sal (&sr_sal);
4329 sr_sal.pc = ecs->stop_func_start;
4330 sr_sal.pspace = get_frame_program_space (frame);
4331 insert_step_resume_breakpoint_at_sal (gdbarch,
4332 sr_sal, null_frame_id);
4333 }
4334 else
4335 /* Set a breakpoint at callee's return address (the address
4336 at which the caller will resume). */
4337 insert_step_resume_breakpoint_at_caller (frame);
4338
4339 keep_going (ecs);
4340 return;
4341 }
4342
4343 /* Reverse stepping through solib trampolines. */
4344
4345 if (execution_direction == EXEC_REVERSE
4346 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4347 {
4348 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4349 || (ecs->stop_func_start == 0
4350 && in_solib_dynsym_resolve_code (stop_pc)))
4351 {
4352 /* Any solib trampoline code can be handled in reverse
4353 by simply continuing to single-step. We have already
4354 executed the solib function (backwards), and a few
4355 steps will take us back through the trampoline to the
4356 caller. */
4357 keep_going (ecs);
4358 return;
4359 }
4360 else if (in_solib_dynsym_resolve_code (stop_pc))
4361 {
4362 /* Stepped backward into the solib dynsym resolver.
4363 Set a breakpoint at its start and continue, then
4364 one more step will take us out. */
4365 struct symtab_and_line sr_sal;
4366 init_sal (&sr_sal);
4367 sr_sal.pc = ecs->stop_func_start;
4368 sr_sal.pspace = get_frame_program_space (frame);
4369 insert_step_resume_breakpoint_at_sal (gdbarch,
4370 sr_sal, null_frame_id);
4371 keep_going (ecs);
4372 return;
4373 }
4374 }
4375
4376 /* If we're in the return path from a shared library trampoline,
4377 we want to proceed through the trampoline when stepping. */
4378 if (gdbarch_in_solib_return_trampoline (gdbarch,
4379 stop_pc, ecs->stop_func_name))
4380 {
4381 /* Determine where this trampoline returns. */
4382 CORE_ADDR real_stop_pc;
4383 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4384
4385 if (debug_infrun)
4386 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4387
4388 /* Only proceed through if we know where it's going. */
4389 if (real_stop_pc)
4390 {
4391 /* And put the step-breakpoint there and go until there. */
4392 struct symtab_and_line sr_sal;
4393
4394 init_sal (&sr_sal); /* initialize to zeroes */
4395 sr_sal.pc = real_stop_pc;
4396 sr_sal.section = find_pc_overlay (sr_sal.pc);
4397 sr_sal.pspace = get_frame_program_space (frame);
4398
4399 /* Do not specify what the fp should be when we stop since
4400 on some machines the prologue is where the new fp value
4401 is established. */
4402 insert_step_resume_breakpoint_at_sal (gdbarch,
4403 sr_sal, null_frame_id);
4404
4405 /* Restart without fiddling with the step ranges or
4406 other state. */
4407 keep_going (ecs);
4408 return;
4409 }
4410 }
4411
4412 stop_pc_sal = find_pc_line (stop_pc, 0);
4413
4414 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4415 the trampoline processing logic, however, there are some trampolines
4416 that have no names, so we should do trampoline handling first. */
4417 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4418 && ecs->stop_func_name == NULL
4419 && stop_pc_sal.line == 0)
4420 {
4421 if (debug_infrun)
4422 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4423
4424 /* The inferior just stepped into, or returned to, an
4425 undebuggable function (where there is no debugging information
4426 and no line number corresponding to the address where the
4427 inferior stopped). Since we want to skip this kind of code,
4428 we keep going until the inferior returns from this
4429 function - unless the user has asked us not to (via
4430 set step-mode) or we no longer know how to get back
4431 to the call site. */
4432 if (step_stop_if_no_debug
4433 || !frame_id_p (frame_unwind_caller_id (frame)))
4434 {
4435 /* If we have no line number and the step-stop-if-no-debug
4436 is set, we stop the step so that the user has a chance to
4437 switch in assembly mode. */
4438 ecs->event_thread->stop_step = 1;
4439 print_stop_reason (END_STEPPING_RANGE, 0);
4440 stop_stepping (ecs);
4441 return;
4442 }
4443 else
4444 {
4445 /* Set a breakpoint at callee's return address (the address
4446 at which the caller will resume). */
4447 insert_step_resume_breakpoint_at_caller (frame);
4448 keep_going (ecs);
4449 return;
4450 }
4451 }
4452
4453 if (ecs->event_thread->step_range_end == 1)
4454 {
4455 /* It is stepi or nexti. We always want to stop stepping after
4456 one instruction. */
4457 if (debug_infrun)
4458 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4459 ecs->event_thread->stop_step = 1;
4460 print_stop_reason (END_STEPPING_RANGE, 0);
4461 stop_stepping (ecs);
4462 return;
4463 }
4464
4465 if (stop_pc_sal.line == 0)
4466 {
4467 /* We have no line number information. That means to stop
4468 stepping (does this always happen right after one instruction,
4469 when we do "s" in a function with no line numbers,
4470 or can this happen as a result of a return or longjmp?). */
4471 if (debug_infrun)
4472 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4473 ecs->event_thread->stop_step = 1;
4474 print_stop_reason (END_STEPPING_RANGE, 0);
4475 stop_stepping (ecs);
4476 return;
4477 }
4478
4479 /* Look for "calls" to inlined functions, part one. If the inline
4480 frame machinery detected some skipped call sites, we have entered
4481 a new inline function. */
4482
4483 if (frame_id_eq (get_frame_id (get_current_frame ()),
4484 ecs->event_thread->step_frame_id)
4485 && inline_skipped_frames (ecs->ptid))
4486 {
4487 struct symtab_and_line call_sal;
4488
4489 if (debug_infrun)
4490 fprintf_unfiltered (gdb_stdlog,
4491 "infrun: stepped into inlined function\n");
4492
4493 find_frame_sal (get_current_frame (), &call_sal);
4494
4495 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4496 {
4497 /* For "step", we're going to stop. But if the call site
4498 for this inlined function is on the same source line as
4499 we were previously stepping, go down into the function
4500 first. Otherwise stop at the call site. */
4501
4502 if (call_sal.line == ecs->event_thread->current_line
4503 && call_sal.symtab == ecs->event_thread->current_symtab)
4504 step_into_inline_frame (ecs->ptid);
4505
4506 ecs->event_thread->stop_step = 1;
4507 print_stop_reason (END_STEPPING_RANGE, 0);
4508 stop_stepping (ecs);
4509 return;
4510 }
4511 else
4512 {
4513 /* For "next", we should stop at the call site if it is on a
4514 different source line. Otherwise continue through the
4515 inlined function. */
4516 if (call_sal.line == ecs->event_thread->current_line
4517 && call_sal.symtab == ecs->event_thread->current_symtab)
4518 keep_going (ecs);
4519 else
4520 {
4521 ecs->event_thread->stop_step = 1;
4522 print_stop_reason (END_STEPPING_RANGE, 0);
4523 stop_stepping (ecs);
4524 }
4525 return;
4526 }
4527 }
4528
4529 /* Look for "calls" to inlined functions, part two. If we are still
4530 in the same real function we were stepping through, but we have
4531 to go further up to find the exact frame ID, we are stepping
4532 through a more inlined call beyond its call site. */
4533
4534 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4535 && !frame_id_eq (get_frame_id (get_current_frame ()),
4536 ecs->event_thread->step_frame_id)
4537 && stepped_in_from (get_current_frame (),
4538 ecs->event_thread->step_frame_id))
4539 {
4540 if (debug_infrun)
4541 fprintf_unfiltered (gdb_stdlog,
4542 "infrun: stepping through inlined function\n");
4543
4544 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4545 keep_going (ecs);
4546 else
4547 {
4548 ecs->event_thread->stop_step = 1;
4549 print_stop_reason (END_STEPPING_RANGE, 0);
4550 stop_stepping (ecs);
4551 }
4552 return;
4553 }
4554
4555 if ((stop_pc == stop_pc_sal.pc)
4556 && (ecs->event_thread->current_line != stop_pc_sal.line
4557 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4558 {
4559 /* We are at the start of a different line. So stop. Note that
4560 we don't stop if we step into the middle of a different line.
4561 That is said to make things like for (;;) statements work
4562 better. */
4563 if (debug_infrun)
4564 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4565 ecs->event_thread->stop_step = 1;
4566 print_stop_reason (END_STEPPING_RANGE, 0);
4567 stop_stepping (ecs);
4568 return;
4569 }
4570
4571 /* We aren't done stepping.
4572
4573 Optimize by setting the stepping range to the line.
4574 (We might not be in the original line, but if we entered a
4575 new line in mid-statement, we continue stepping. This makes
4576 things like for(;;) statements work better.) */
4577
4578 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4579 ecs->event_thread->step_range_end = stop_pc_sal.end;
4580 set_step_info (frame, stop_pc_sal);
4581
4582 if (debug_infrun)
4583 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4584 keep_going (ecs);
4585 }
4586
4587 /* Is thread TP in the middle of single-stepping? */
4588
4589 static int
4590 currently_stepping (struct thread_info *tp)
4591 {
4592 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4593 || tp->trap_expected
4594 || tp->stepping_through_solib_after_catch
4595 || bpstat_should_step ());
4596 }
4597
4598 /* Returns true if any thread *but* the one passed in "data" is in the
4599 middle of stepping or of handling a "next". */
4600
4601 static int
4602 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4603 {
4604 if (tp == data)
4605 return 0;
4606
4607 return (tp->step_range_end
4608 || tp->trap_expected
4609 || tp->stepping_through_solib_after_catch);
4610 }
4611
4612 /* Inferior has stepped into a subroutine call with source code that
4613 we should not step over. Do step to the first line of code in
4614 it. */
4615
4616 static void
4617 handle_step_into_function (struct gdbarch *gdbarch,
4618 struct execution_control_state *ecs)
4619 {
4620 struct symtab *s;
4621 struct symtab_and_line stop_func_sal, sr_sal;
4622
4623 s = find_pc_symtab (stop_pc);
4624 if (s && s->language != language_asm)
4625 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4626 ecs->stop_func_start);
4627
4628 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4629 /* Use the step_resume_break to step until the end of the prologue,
4630 even if that involves jumps (as it seems to on the vax under
4631 4.2). */
4632 /* If the prologue ends in the middle of a source line, continue to
4633 the end of that source line (if it is still within the function).
4634 Otherwise, just go to end of prologue. */
4635 if (stop_func_sal.end
4636 && stop_func_sal.pc != ecs->stop_func_start
4637 && stop_func_sal.end < ecs->stop_func_end)
4638 ecs->stop_func_start = stop_func_sal.end;
4639
4640 /* Architectures which require breakpoint adjustment might not be able
4641 to place a breakpoint at the computed address. If so, the test
4642 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4643 ecs->stop_func_start to an address at which a breakpoint may be
4644 legitimately placed.
4645
4646 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4647 made, GDB will enter an infinite loop when stepping through
4648 optimized code consisting of VLIW instructions which contain
4649 subinstructions corresponding to different source lines. On
4650 FR-V, it's not permitted to place a breakpoint on any but the
4651 first subinstruction of a VLIW instruction. When a breakpoint is
4652 set, GDB will adjust the breakpoint address to the beginning of
4653 the VLIW instruction. Thus, we need to make the corresponding
4654 adjustment here when computing the stop address. */
4655
4656 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4657 {
4658 ecs->stop_func_start
4659 = gdbarch_adjust_breakpoint_address (gdbarch,
4660 ecs->stop_func_start);
4661 }
4662
4663 if (ecs->stop_func_start == stop_pc)
4664 {
4665 /* We are already there: stop now. */
4666 ecs->event_thread->stop_step = 1;
4667 print_stop_reason (END_STEPPING_RANGE, 0);
4668 stop_stepping (ecs);
4669 return;
4670 }
4671 else
4672 {
4673 /* Put the step-breakpoint there and go until there. */
4674 init_sal (&sr_sal); /* initialize to zeroes */
4675 sr_sal.pc = ecs->stop_func_start;
4676 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4677 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4678
4679 /* Do not specify what the fp should be when we stop since on
4680 some machines the prologue is where the new fp value is
4681 established. */
4682 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4683
4684 /* And make sure stepping stops right away then. */
4685 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4686 }
4687 keep_going (ecs);
4688 }
4689
4690 /* Inferior has stepped backward into a subroutine call with source
4691 code that we should not step over. Do step to the beginning of the
4692 last line of code in it. */
4693
4694 static void
4695 handle_step_into_function_backward (struct gdbarch *gdbarch,
4696 struct execution_control_state *ecs)
4697 {
4698 struct symtab *s;
4699 struct symtab_and_line stop_func_sal, sr_sal;
4700
4701 s = find_pc_symtab (stop_pc);
4702 if (s && s->language != language_asm)
4703 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4704 ecs->stop_func_start);
4705
4706 stop_func_sal = find_pc_line (stop_pc, 0);
4707
4708 /* OK, we're just going to keep stepping here. */
4709 if (stop_func_sal.pc == stop_pc)
4710 {
4711 /* We're there already. Just stop stepping now. */
4712 ecs->event_thread->stop_step = 1;
4713 print_stop_reason (END_STEPPING_RANGE, 0);
4714 stop_stepping (ecs);
4715 }
4716 else
4717 {
4718 /* Else just reset the step range and keep going.
4719 No step-resume breakpoint, they don't work for
4720 epilogues, which can have multiple entry paths. */
4721 ecs->event_thread->step_range_start = stop_func_sal.pc;
4722 ecs->event_thread->step_range_end = stop_func_sal.end;
4723 keep_going (ecs);
4724 }
4725 return;
4726 }
4727
4728 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4729 This is used to both functions and to skip over code. */
4730
4731 static void
4732 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4733 struct symtab_and_line sr_sal,
4734 struct frame_id sr_id)
4735 {
4736 /* There should never be more than one step-resume or longjmp-resume
4737 breakpoint per thread, so we should never be setting a new
4738 step_resume_breakpoint when one is already active. */
4739 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4740
4741 if (debug_infrun)
4742 fprintf_unfiltered (gdb_stdlog,
4743 "infrun: inserting step-resume breakpoint at %s\n",
4744 paddress (gdbarch, sr_sal.pc));
4745
4746 inferior_thread ()->step_resume_breakpoint
4747 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4748 }
4749
4750 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4751 to skip a potential signal handler.
4752
4753 This is called with the interrupted function's frame. The signal
4754 handler, when it returns, will resume the interrupted function at
4755 RETURN_FRAME.pc. */
4756
4757 static void
4758 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4759 {
4760 struct symtab_and_line sr_sal;
4761 struct gdbarch *gdbarch;
4762
4763 gdb_assert (return_frame != NULL);
4764 init_sal (&sr_sal); /* initialize to zeros */
4765
4766 gdbarch = get_frame_arch (return_frame);
4767 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4768 sr_sal.section = find_pc_overlay (sr_sal.pc);
4769 sr_sal.pspace = get_frame_program_space (return_frame);
4770
4771 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4772 get_stack_frame_id (return_frame));
4773 }
4774
4775 /* Similar to insert_step_resume_breakpoint_at_frame, except
4776 but a breakpoint at the previous frame's PC. This is used to
4777 skip a function after stepping into it (for "next" or if the called
4778 function has no debugging information).
4779
4780 The current function has almost always been reached by single
4781 stepping a call or return instruction. NEXT_FRAME belongs to the
4782 current function, and the breakpoint will be set at the caller's
4783 resume address.
4784
4785 This is a separate function rather than reusing
4786 insert_step_resume_breakpoint_at_frame in order to avoid
4787 get_prev_frame, which may stop prematurely (see the implementation
4788 of frame_unwind_caller_id for an example). */
4789
4790 static void
4791 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4792 {
4793 struct symtab_and_line sr_sal;
4794 struct gdbarch *gdbarch;
4795
4796 /* We shouldn't have gotten here if we don't know where the call site
4797 is. */
4798 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4799
4800 init_sal (&sr_sal); /* initialize to zeros */
4801
4802 gdbarch = frame_unwind_caller_arch (next_frame);
4803 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4804 frame_unwind_caller_pc (next_frame));
4805 sr_sal.section = find_pc_overlay (sr_sal.pc);
4806 sr_sal.pspace = frame_unwind_program_space (next_frame);
4807
4808 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4809 frame_unwind_caller_id (next_frame));
4810 }
4811
4812 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4813 new breakpoint at the target of a jmp_buf. The handling of
4814 longjmp-resume uses the same mechanisms used for handling
4815 "step-resume" breakpoints. */
4816
4817 static void
4818 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4819 {
4820 /* There should never be more than one step-resume or longjmp-resume
4821 breakpoint per thread, so we should never be setting a new
4822 longjmp_resume_breakpoint when one is already active. */
4823 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4824
4825 if (debug_infrun)
4826 fprintf_unfiltered (gdb_stdlog,
4827 "infrun: inserting longjmp-resume breakpoint at %s\n",
4828 paddress (gdbarch, pc));
4829
4830 inferior_thread ()->step_resume_breakpoint =
4831 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4832 }
4833
4834 static void
4835 stop_stepping (struct execution_control_state *ecs)
4836 {
4837 if (debug_infrun)
4838 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4839
4840 /* Let callers know we don't want to wait for the inferior anymore. */
4841 ecs->wait_some_more = 0;
4842 }
4843
4844 /* This function handles various cases where we need to continue
4845 waiting for the inferior. */
4846 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4847
4848 static void
4849 keep_going (struct execution_control_state *ecs)
4850 {
4851 /* Make sure normal_stop is called if we get a QUIT handled before
4852 reaching resume. */
4853 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
4854
4855 /* Save the pc before execution, to compare with pc after stop. */
4856 ecs->event_thread->prev_pc
4857 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4858
4859 /* If we did not do break;, it means we should keep running the
4860 inferior and not return to debugger. */
4861
4862 if (ecs->event_thread->trap_expected
4863 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4864 {
4865 /* We took a signal (which we are supposed to pass through to
4866 the inferior, else we'd not get here) and we haven't yet
4867 gotten our trap. Simply continue. */
4868
4869 discard_cleanups (old_cleanups);
4870 resume (currently_stepping (ecs->event_thread),
4871 ecs->event_thread->stop_signal);
4872 }
4873 else
4874 {
4875 /* Either the trap was not expected, but we are continuing
4876 anyway (the user asked that this signal be passed to the
4877 child)
4878 -- or --
4879 The signal was SIGTRAP, e.g. it was our signal, but we
4880 decided we should resume from it.
4881
4882 We're going to run this baby now!
4883
4884 Note that insert_breakpoints won't try to re-insert
4885 already inserted breakpoints. Therefore, we don't
4886 care if breakpoints were already inserted, or not. */
4887
4888 if (ecs->event_thread->stepping_over_breakpoint)
4889 {
4890 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4891 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4892 /* Since we can't do a displaced step, we have to remove
4893 the breakpoint while we step it. To keep things
4894 simple, we remove them all. */
4895 remove_breakpoints ();
4896 }
4897 else
4898 {
4899 struct gdb_exception e;
4900 /* Stop stepping when inserting breakpoints
4901 has failed. */
4902 TRY_CATCH (e, RETURN_MASK_ERROR)
4903 {
4904 insert_breakpoints ();
4905 }
4906 if (e.reason < 0)
4907 {
4908 exception_print (gdb_stderr, e);
4909 stop_stepping (ecs);
4910 return;
4911 }
4912 }
4913
4914 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4915
4916 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4917 specifies that such a signal should be delivered to the
4918 target program).
4919
4920 Typically, this would occure when a user is debugging a
4921 target monitor on a simulator: the target monitor sets a
4922 breakpoint; the simulator encounters this break-point and
4923 halts the simulation handing control to GDB; GDB, noteing
4924 that the break-point isn't valid, returns control back to the
4925 simulator; the simulator then delivers the hardware
4926 equivalent of a SIGNAL_TRAP to the program being debugged. */
4927
4928 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4929 && !signal_program[ecs->event_thread->stop_signal])
4930 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4931
4932 discard_cleanups (old_cleanups);
4933 resume (currently_stepping (ecs->event_thread),
4934 ecs->event_thread->stop_signal);
4935 }
4936
4937 prepare_to_wait (ecs);
4938 }
4939
4940 /* This function normally comes after a resume, before
4941 handle_inferior_event exits. It takes care of any last bits of
4942 housekeeping, and sets the all-important wait_some_more flag. */
4943
4944 static void
4945 prepare_to_wait (struct execution_control_state *ecs)
4946 {
4947 if (debug_infrun)
4948 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4949
4950 /* This is the old end of the while loop. Let everybody know we
4951 want to wait for the inferior some more and get called again
4952 soon. */
4953 ecs->wait_some_more = 1;
4954 }
4955
4956 /* Print why the inferior has stopped. We always print something when
4957 the inferior exits, or receives a signal. The rest of the cases are
4958 dealt with later on in normal_stop() and print_it_typical(). Ideally
4959 there should be a call to this function from handle_inferior_event()
4960 each time stop_stepping() is called.*/
4961 static void
4962 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4963 {
4964 switch (stop_reason)
4965 {
4966 case END_STEPPING_RANGE:
4967 /* We are done with a step/next/si/ni command. */
4968 /* For now print nothing. */
4969 /* Print a message only if not in the middle of doing a "step n"
4970 operation for n > 1 */
4971 if (!inferior_thread ()->step_multi
4972 || !inferior_thread ()->stop_step)
4973 if (ui_out_is_mi_like_p (uiout))
4974 ui_out_field_string
4975 (uiout, "reason",
4976 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4977 break;
4978 case SIGNAL_EXITED:
4979 /* The inferior was terminated by a signal. */
4980 annotate_signalled ();
4981 if (ui_out_is_mi_like_p (uiout))
4982 ui_out_field_string
4983 (uiout, "reason",
4984 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4985 ui_out_text (uiout, "\nProgram terminated with signal ");
4986 annotate_signal_name ();
4987 ui_out_field_string (uiout, "signal-name",
4988 target_signal_to_name (stop_info));
4989 annotate_signal_name_end ();
4990 ui_out_text (uiout, ", ");
4991 annotate_signal_string ();
4992 ui_out_field_string (uiout, "signal-meaning",
4993 target_signal_to_string (stop_info));
4994 annotate_signal_string_end ();
4995 ui_out_text (uiout, ".\n");
4996 ui_out_text (uiout, "The program no longer exists.\n");
4997 break;
4998 case EXITED:
4999 /* The inferior program is finished. */
5000 annotate_exited (stop_info);
5001 if (stop_info)
5002 {
5003 if (ui_out_is_mi_like_p (uiout))
5004 ui_out_field_string (uiout, "reason",
5005 async_reason_lookup (EXEC_ASYNC_EXITED));
5006 ui_out_text (uiout, "\nProgram exited with code ");
5007 ui_out_field_fmt (uiout, "exit-code", "0%o",
5008 (unsigned int) stop_info);
5009 ui_out_text (uiout, ".\n");
5010 }
5011 else
5012 {
5013 if (ui_out_is_mi_like_p (uiout))
5014 ui_out_field_string
5015 (uiout, "reason",
5016 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5017 ui_out_text (uiout, "\nProgram exited normally.\n");
5018 }
5019 /* Support the --return-child-result option. */
5020 return_child_result_value = stop_info;
5021 break;
5022 case SIGNAL_RECEIVED:
5023 /* Signal received. The signal table tells us to print about
5024 it. */
5025 annotate_signal ();
5026
5027 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5028 {
5029 struct thread_info *t = inferior_thread ();
5030
5031 ui_out_text (uiout, "\n[");
5032 ui_out_field_string (uiout, "thread-name",
5033 target_pid_to_str (t->ptid));
5034 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5035 ui_out_text (uiout, " stopped");
5036 }
5037 else
5038 {
5039 ui_out_text (uiout, "\nProgram received signal ");
5040 annotate_signal_name ();
5041 if (ui_out_is_mi_like_p (uiout))
5042 ui_out_field_string
5043 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5044 ui_out_field_string (uiout, "signal-name",
5045 target_signal_to_name (stop_info));
5046 annotate_signal_name_end ();
5047 ui_out_text (uiout, ", ");
5048 annotate_signal_string ();
5049 ui_out_field_string (uiout, "signal-meaning",
5050 target_signal_to_string (stop_info));
5051 annotate_signal_string_end ();
5052 }
5053 ui_out_text (uiout, ".\n");
5054 break;
5055 case NO_HISTORY:
5056 /* Reverse execution: target ran out of history info. */
5057 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5058 break;
5059 default:
5060 internal_error (__FILE__, __LINE__,
5061 _("print_stop_reason: unrecognized enum value"));
5062 break;
5063 }
5064 }
5065 \f
5066
5067 /* Here to return control to GDB when the inferior stops for real.
5068 Print appropriate messages, remove breakpoints, give terminal our modes.
5069
5070 STOP_PRINT_FRAME nonzero means print the executing frame
5071 (pc, function, args, file, line number and line text).
5072 BREAKPOINTS_FAILED nonzero means stop was due to error
5073 attempting to insert breakpoints. */
5074
5075 void
5076 normal_stop (void)
5077 {
5078 struct target_waitstatus last;
5079 ptid_t last_ptid;
5080 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5081
5082 get_last_target_status (&last_ptid, &last);
5083
5084 /* If an exception is thrown from this point on, make sure to
5085 propagate GDB's knowledge of the executing state to the
5086 frontend/user running state. A QUIT is an easy exception to see
5087 here, so do this before any filtered output. */
5088 if (!non_stop)
5089 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5090 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5091 && last.kind != TARGET_WAITKIND_EXITED)
5092 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5093
5094 /* In non-stop mode, we don't want GDB to switch threads behind the
5095 user's back, to avoid races where the user is typing a command to
5096 apply to thread x, but GDB switches to thread y before the user
5097 finishes entering the command. */
5098
5099 /* As with the notification of thread events, we want to delay
5100 notifying the user that we've switched thread context until
5101 the inferior actually stops.
5102
5103 There's no point in saying anything if the inferior has exited.
5104 Note that SIGNALLED here means "exited with a signal", not
5105 "received a signal". */
5106 if (!non_stop
5107 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5108 && target_has_execution
5109 && last.kind != TARGET_WAITKIND_SIGNALLED
5110 && last.kind != TARGET_WAITKIND_EXITED)
5111 {
5112 target_terminal_ours_for_output ();
5113 printf_filtered (_("[Switching to %s]\n"),
5114 target_pid_to_str (inferior_ptid));
5115 annotate_thread_changed ();
5116 previous_inferior_ptid = inferior_ptid;
5117 }
5118
5119 if (!breakpoints_always_inserted_mode () && target_has_execution)
5120 {
5121 if (remove_breakpoints ())
5122 {
5123 target_terminal_ours_for_output ();
5124 printf_filtered (_("\
5125 Cannot remove breakpoints because program is no longer writable.\n\
5126 Further execution is probably impossible.\n"));
5127 }
5128 }
5129
5130 /* If an auto-display called a function and that got a signal,
5131 delete that auto-display to avoid an infinite recursion. */
5132
5133 if (stopped_by_random_signal)
5134 disable_current_display ();
5135
5136 /* Don't print a message if in the middle of doing a "step n"
5137 operation for n > 1 */
5138 if (target_has_execution
5139 && last.kind != TARGET_WAITKIND_SIGNALLED
5140 && last.kind != TARGET_WAITKIND_EXITED
5141 && inferior_thread ()->step_multi
5142 && inferior_thread ()->stop_step)
5143 goto done;
5144
5145 target_terminal_ours ();
5146
5147 /* Set the current source location. This will also happen if we
5148 display the frame below, but the current SAL will be incorrect
5149 during a user hook-stop function. */
5150 if (has_stack_frames () && !stop_stack_dummy)
5151 set_current_sal_from_frame (get_current_frame (), 1);
5152
5153 /* Let the user/frontend see the threads as stopped. */
5154 do_cleanups (old_chain);
5155
5156 /* Look up the hook_stop and run it (CLI internally handles problem
5157 of stop_command's pre-hook not existing). */
5158 if (stop_command)
5159 catch_errors (hook_stop_stub, stop_command,
5160 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5161
5162 if (!has_stack_frames ())
5163 goto done;
5164
5165 if (last.kind == TARGET_WAITKIND_SIGNALLED
5166 || last.kind == TARGET_WAITKIND_EXITED)
5167 goto done;
5168
5169 /* Select innermost stack frame - i.e., current frame is frame 0,
5170 and current location is based on that.
5171 Don't do this on return from a stack dummy routine,
5172 or if the program has exited. */
5173
5174 if (!stop_stack_dummy)
5175 {
5176 select_frame (get_current_frame ());
5177
5178 /* Print current location without a level number, if
5179 we have changed functions or hit a breakpoint.
5180 Print source line if we have one.
5181 bpstat_print() contains the logic deciding in detail
5182 what to print, based on the event(s) that just occurred. */
5183
5184 /* If --batch-silent is enabled then there's no need to print the current
5185 source location, and to try risks causing an error message about
5186 missing source files. */
5187 if (stop_print_frame && !batch_silent)
5188 {
5189 int bpstat_ret;
5190 int source_flag;
5191 int do_frame_printing = 1;
5192 struct thread_info *tp = inferior_thread ();
5193
5194 bpstat_ret = bpstat_print (tp->stop_bpstat);
5195 switch (bpstat_ret)
5196 {
5197 case PRINT_UNKNOWN:
5198 /* If we had hit a shared library event breakpoint,
5199 bpstat_print would print out this message. If we hit
5200 an OS-level shared library event, do the same
5201 thing. */
5202 if (last.kind == TARGET_WAITKIND_LOADED)
5203 {
5204 printf_filtered (_("Stopped due to shared library event\n"));
5205 source_flag = SRC_LINE; /* something bogus */
5206 do_frame_printing = 0;
5207 break;
5208 }
5209
5210 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5211 (or should) carry around the function and does (or
5212 should) use that when doing a frame comparison. */
5213 if (tp->stop_step
5214 && frame_id_eq (tp->step_frame_id,
5215 get_frame_id (get_current_frame ()))
5216 && step_start_function == find_pc_function (stop_pc))
5217 source_flag = SRC_LINE; /* finished step, just print source line */
5218 else
5219 source_flag = SRC_AND_LOC; /* print location and source line */
5220 break;
5221 case PRINT_SRC_AND_LOC:
5222 source_flag = SRC_AND_LOC; /* print location and source line */
5223 break;
5224 case PRINT_SRC_ONLY:
5225 source_flag = SRC_LINE;
5226 break;
5227 case PRINT_NOTHING:
5228 source_flag = SRC_LINE; /* something bogus */
5229 do_frame_printing = 0;
5230 break;
5231 default:
5232 internal_error (__FILE__, __LINE__, _("Unknown value."));
5233 }
5234
5235 /* The behavior of this routine with respect to the source
5236 flag is:
5237 SRC_LINE: Print only source line
5238 LOCATION: Print only location
5239 SRC_AND_LOC: Print location and source line */
5240 if (do_frame_printing)
5241 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5242
5243 /* Display the auto-display expressions. */
5244 do_displays ();
5245 }
5246 }
5247
5248 /* Save the function value return registers, if we care.
5249 We might be about to restore their previous contents. */
5250 if (inferior_thread ()->proceed_to_finish)
5251 {
5252 /* This should not be necessary. */
5253 if (stop_registers)
5254 regcache_xfree (stop_registers);
5255
5256 /* NB: The copy goes through to the target picking up the value of
5257 all the registers. */
5258 stop_registers = regcache_dup (get_current_regcache ());
5259 }
5260
5261 if (stop_stack_dummy)
5262 {
5263 /* Pop the empty frame that contains the stack dummy.
5264 This also restores inferior state prior to the call
5265 (struct inferior_thread_state). */
5266 struct frame_info *frame = get_current_frame ();
5267 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5268 frame_pop (frame);
5269 /* frame_pop() calls reinit_frame_cache as the last thing it does
5270 which means there's currently no selected frame. We don't need
5271 to re-establish a selected frame if the dummy call returns normally,
5272 that will be done by restore_inferior_status. However, we do have
5273 to handle the case where the dummy call is returning after being
5274 stopped (e.g. the dummy call previously hit a breakpoint). We
5275 can't know which case we have so just always re-establish a
5276 selected frame here. */
5277 select_frame (get_current_frame ());
5278 }
5279
5280 done:
5281 annotate_stopped ();
5282
5283 /* Suppress the stop observer if we're in the middle of:
5284
5285 - a step n (n > 1), as there still more steps to be done.
5286
5287 - a "finish" command, as the observer will be called in
5288 finish_command_continuation, so it can include the inferior
5289 function's return value.
5290
5291 - calling an inferior function, as we pretend we inferior didn't
5292 run at all. The return value of the call is handled by the
5293 expression evaluator, through call_function_by_hand. */
5294
5295 if (!target_has_execution
5296 || last.kind == TARGET_WAITKIND_SIGNALLED
5297 || last.kind == TARGET_WAITKIND_EXITED
5298 || (!inferior_thread ()->step_multi
5299 && !(inferior_thread ()->stop_bpstat
5300 && inferior_thread ()->proceed_to_finish)
5301 && !inferior_thread ()->in_infcall))
5302 {
5303 if (!ptid_equal (inferior_ptid, null_ptid))
5304 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5305 stop_print_frame);
5306 else
5307 observer_notify_normal_stop (NULL, stop_print_frame);
5308 }
5309
5310 if (target_has_execution)
5311 {
5312 if (last.kind != TARGET_WAITKIND_SIGNALLED
5313 && last.kind != TARGET_WAITKIND_EXITED)
5314 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5315 Delete any breakpoint that is to be deleted at the next stop. */
5316 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5317 }
5318
5319 /* Try to get rid of automatically added inferiors that are no
5320 longer needed. Keeping those around slows down things linearly.
5321 Note that this never removes the current inferior. */
5322 prune_inferiors ();
5323 }
5324
5325 static int
5326 hook_stop_stub (void *cmd)
5327 {
5328 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5329 return (0);
5330 }
5331 \f
5332 int
5333 signal_stop_state (int signo)
5334 {
5335 return signal_stop[signo];
5336 }
5337
5338 int
5339 signal_print_state (int signo)
5340 {
5341 return signal_print[signo];
5342 }
5343
5344 int
5345 signal_pass_state (int signo)
5346 {
5347 return signal_program[signo];
5348 }
5349
5350 int
5351 signal_stop_update (int signo, int state)
5352 {
5353 int ret = signal_stop[signo];
5354 signal_stop[signo] = state;
5355 return ret;
5356 }
5357
5358 int
5359 signal_print_update (int signo, int state)
5360 {
5361 int ret = signal_print[signo];
5362 signal_print[signo] = state;
5363 return ret;
5364 }
5365
5366 int
5367 signal_pass_update (int signo, int state)
5368 {
5369 int ret = signal_program[signo];
5370 signal_program[signo] = state;
5371 return ret;
5372 }
5373
5374 static void
5375 sig_print_header (void)
5376 {
5377 printf_filtered (_("\
5378 Signal Stop\tPrint\tPass to program\tDescription\n"));
5379 }
5380
5381 static void
5382 sig_print_info (enum target_signal oursig)
5383 {
5384 const char *name = target_signal_to_name (oursig);
5385 int name_padding = 13 - strlen (name);
5386
5387 if (name_padding <= 0)
5388 name_padding = 0;
5389
5390 printf_filtered ("%s", name);
5391 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5392 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5393 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5394 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5395 printf_filtered ("%s\n", target_signal_to_string (oursig));
5396 }
5397
5398 /* Specify how various signals in the inferior should be handled. */
5399
5400 static void
5401 handle_command (char *args, int from_tty)
5402 {
5403 char **argv;
5404 int digits, wordlen;
5405 int sigfirst, signum, siglast;
5406 enum target_signal oursig;
5407 int allsigs;
5408 int nsigs;
5409 unsigned char *sigs;
5410 struct cleanup *old_chain;
5411
5412 if (args == NULL)
5413 {
5414 error_no_arg (_("signal to handle"));
5415 }
5416
5417 /* Allocate and zero an array of flags for which signals to handle. */
5418
5419 nsigs = (int) TARGET_SIGNAL_LAST;
5420 sigs = (unsigned char *) alloca (nsigs);
5421 memset (sigs, 0, nsigs);
5422
5423 /* Break the command line up into args. */
5424
5425 argv = gdb_buildargv (args);
5426 old_chain = make_cleanup_freeargv (argv);
5427
5428 /* Walk through the args, looking for signal oursigs, signal names, and
5429 actions. Signal numbers and signal names may be interspersed with
5430 actions, with the actions being performed for all signals cumulatively
5431 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5432
5433 while (*argv != NULL)
5434 {
5435 wordlen = strlen (*argv);
5436 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5437 {;
5438 }
5439 allsigs = 0;
5440 sigfirst = siglast = -1;
5441
5442 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5443 {
5444 /* Apply action to all signals except those used by the
5445 debugger. Silently skip those. */
5446 allsigs = 1;
5447 sigfirst = 0;
5448 siglast = nsigs - 1;
5449 }
5450 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5451 {
5452 SET_SIGS (nsigs, sigs, signal_stop);
5453 SET_SIGS (nsigs, sigs, signal_print);
5454 }
5455 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5456 {
5457 UNSET_SIGS (nsigs, sigs, signal_program);
5458 }
5459 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5460 {
5461 SET_SIGS (nsigs, sigs, signal_print);
5462 }
5463 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5464 {
5465 SET_SIGS (nsigs, sigs, signal_program);
5466 }
5467 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5468 {
5469 UNSET_SIGS (nsigs, sigs, signal_stop);
5470 }
5471 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5472 {
5473 SET_SIGS (nsigs, sigs, signal_program);
5474 }
5475 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5476 {
5477 UNSET_SIGS (nsigs, sigs, signal_print);
5478 UNSET_SIGS (nsigs, sigs, signal_stop);
5479 }
5480 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5481 {
5482 UNSET_SIGS (nsigs, sigs, signal_program);
5483 }
5484 else if (digits > 0)
5485 {
5486 /* It is numeric. The numeric signal refers to our own
5487 internal signal numbering from target.h, not to host/target
5488 signal number. This is a feature; users really should be
5489 using symbolic names anyway, and the common ones like
5490 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5491
5492 sigfirst = siglast = (int)
5493 target_signal_from_command (atoi (*argv));
5494 if ((*argv)[digits] == '-')
5495 {
5496 siglast = (int)
5497 target_signal_from_command (atoi ((*argv) + digits + 1));
5498 }
5499 if (sigfirst > siglast)
5500 {
5501 /* Bet he didn't figure we'd think of this case... */
5502 signum = sigfirst;
5503 sigfirst = siglast;
5504 siglast = signum;
5505 }
5506 }
5507 else
5508 {
5509 oursig = target_signal_from_name (*argv);
5510 if (oursig != TARGET_SIGNAL_UNKNOWN)
5511 {
5512 sigfirst = siglast = (int) oursig;
5513 }
5514 else
5515 {
5516 /* Not a number and not a recognized flag word => complain. */
5517 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5518 }
5519 }
5520
5521 /* If any signal numbers or symbol names were found, set flags for
5522 which signals to apply actions to. */
5523
5524 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5525 {
5526 switch ((enum target_signal) signum)
5527 {
5528 case TARGET_SIGNAL_TRAP:
5529 case TARGET_SIGNAL_INT:
5530 if (!allsigs && !sigs[signum])
5531 {
5532 if (query (_("%s is used by the debugger.\n\
5533 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5534 {
5535 sigs[signum] = 1;
5536 }
5537 else
5538 {
5539 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5540 gdb_flush (gdb_stdout);
5541 }
5542 }
5543 break;
5544 case TARGET_SIGNAL_0:
5545 case TARGET_SIGNAL_DEFAULT:
5546 case TARGET_SIGNAL_UNKNOWN:
5547 /* Make sure that "all" doesn't print these. */
5548 break;
5549 default:
5550 sigs[signum] = 1;
5551 break;
5552 }
5553 }
5554
5555 argv++;
5556 }
5557
5558 for (signum = 0; signum < nsigs; signum++)
5559 if (sigs[signum])
5560 {
5561 target_notice_signals (inferior_ptid);
5562
5563 if (from_tty)
5564 {
5565 /* Show the results. */
5566 sig_print_header ();
5567 for (; signum < nsigs; signum++)
5568 if (sigs[signum])
5569 sig_print_info (signum);
5570 }
5571
5572 break;
5573 }
5574
5575 do_cleanups (old_chain);
5576 }
5577
5578 static void
5579 xdb_handle_command (char *args, int from_tty)
5580 {
5581 char **argv;
5582 struct cleanup *old_chain;
5583
5584 if (args == NULL)
5585 error_no_arg (_("xdb command"));
5586
5587 /* Break the command line up into args. */
5588
5589 argv = gdb_buildargv (args);
5590 old_chain = make_cleanup_freeargv (argv);
5591 if (argv[1] != (char *) NULL)
5592 {
5593 char *argBuf;
5594 int bufLen;
5595
5596 bufLen = strlen (argv[0]) + 20;
5597 argBuf = (char *) xmalloc (bufLen);
5598 if (argBuf)
5599 {
5600 int validFlag = 1;
5601 enum target_signal oursig;
5602
5603 oursig = target_signal_from_name (argv[0]);
5604 memset (argBuf, 0, bufLen);
5605 if (strcmp (argv[1], "Q") == 0)
5606 sprintf (argBuf, "%s %s", argv[0], "noprint");
5607 else
5608 {
5609 if (strcmp (argv[1], "s") == 0)
5610 {
5611 if (!signal_stop[oursig])
5612 sprintf (argBuf, "%s %s", argv[0], "stop");
5613 else
5614 sprintf (argBuf, "%s %s", argv[0], "nostop");
5615 }
5616 else if (strcmp (argv[1], "i") == 0)
5617 {
5618 if (!signal_program[oursig])
5619 sprintf (argBuf, "%s %s", argv[0], "pass");
5620 else
5621 sprintf (argBuf, "%s %s", argv[0], "nopass");
5622 }
5623 else if (strcmp (argv[1], "r") == 0)
5624 {
5625 if (!signal_print[oursig])
5626 sprintf (argBuf, "%s %s", argv[0], "print");
5627 else
5628 sprintf (argBuf, "%s %s", argv[0], "noprint");
5629 }
5630 else
5631 validFlag = 0;
5632 }
5633 if (validFlag)
5634 handle_command (argBuf, from_tty);
5635 else
5636 printf_filtered (_("Invalid signal handling flag.\n"));
5637 if (argBuf)
5638 xfree (argBuf);
5639 }
5640 }
5641 do_cleanups (old_chain);
5642 }
5643
5644 /* Print current contents of the tables set by the handle command.
5645 It is possible we should just be printing signals actually used
5646 by the current target (but for things to work right when switching
5647 targets, all signals should be in the signal tables). */
5648
5649 static void
5650 signals_info (char *signum_exp, int from_tty)
5651 {
5652 enum target_signal oursig;
5653 sig_print_header ();
5654
5655 if (signum_exp)
5656 {
5657 /* First see if this is a symbol name. */
5658 oursig = target_signal_from_name (signum_exp);
5659 if (oursig == TARGET_SIGNAL_UNKNOWN)
5660 {
5661 /* No, try numeric. */
5662 oursig =
5663 target_signal_from_command (parse_and_eval_long (signum_exp));
5664 }
5665 sig_print_info (oursig);
5666 return;
5667 }
5668
5669 printf_filtered ("\n");
5670 /* These ugly casts brought to you by the native VAX compiler. */
5671 for (oursig = TARGET_SIGNAL_FIRST;
5672 (int) oursig < (int) TARGET_SIGNAL_LAST;
5673 oursig = (enum target_signal) ((int) oursig + 1))
5674 {
5675 QUIT;
5676
5677 if (oursig != TARGET_SIGNAL_UNKNOWN
5678 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5679 sig_print_info (oursig);
5680 }
5681
5682 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5683 }
5684
5685 /* The $_siginfo convenience variable is a bit special. We don't know
5686 for sure the type of the value until we actually have a chance to
5687 fetch the data. The type can change depending on gdbarch, so it it
5688 also dependent on which thread you have selected.
5689
5690 1. making $_siginfo be an internalvar that creates a new value on
5691 access.
5692
5693 2. making the value of $_siginfo be an lval_computed value. */
5694
5695 /* This function implements the lval_computed support for reading a
5696 $_siginfo value. */
5697
5698 static void
5699 siginfo_value_read (struct value *v)
5700 {
5701 LONGEST transferred;
5702
5703 transferred =
5704 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5705 NULL,
5706 value_contents_all_raw (v),
5707 value_offset (v),
5708 TYPE_LENGTH (value_type (v)));
5709
5710 if (transferred != TYPE_LENGTH (value_type (v)))
5711 error (_("Unable to read siginfo"));
5712 }
5713
5714 /* This function implements the lval_computed support for writing a
5715 $_siginfo value. */
5716
5717 static void
5718 siginfo_value_write (struct value *v, struct value *fromval)
5719 {
5720 LONGEST transferred;
5721
5722 transferred = target_write (&current_target,
5723 TARGET_OBJECT_SIGNAL_INFO,
5724 NULL,
5725 value_contents_all_raw (fromval),
5726 value_offset (v),
5727 TYPE_LENGTH (value_type (fromval)));
5728
5729 if (transferred != TYPE_LENGTH (value_type (fromval)))
5730 error (_("Unable to write siginfo"));
5731 }
5732
5733 static struct lval_funcs siginfo_value_funcs =
5734 {
5735 siginfo_value_read,
5736 siginfo_value_write
5737 };
5738
5739 /* Return a new value with the correct type for the siginfo object of
5740 the current thread using architecture GDBARCH. Return a void value
5741 if there's no object available. */
5742
5743 static struct value *
5744 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5745 {
5746 if (target_has_stack
5747 && !ptid_equal (inferior_ptid, null_ptid)
5748 && gdbarch_get_siginfo_type_p (gdbarch))
5749 {
5750 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5751 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5752 }
5753
5754 return allocate_value (builtin_type (gdbarch)->builtin_void);
5755 }
5756
5757 \f
5758 /* Inferior thread state.
5759 These are details related to the inferior itself, and don't include
5760 things like what frame the user had selected or what gdb was doing
5761 with the target at the time.
5762 For inferior function calls these are things we want to restore
5763 regardless of whether the function call successfully completes
5764 or the dummy frame has to be manually popped. */
5765
5766 struct inferior_thread_state
5767 {
5768 enum target_signal stop_signal;
5769 CORE_ADDR stop_pc;
5770 struct regcache *registers;
5771 };
5772
5773 struct inferior_thread_state *
5774 save_inferior_thread_state (void)
5775 {
5776 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5777 struct thread_info *tp = inferior_thread ();
5778
5779 inf_state->stop_signal = tp->stop_signal;
5780 inf_state->stop_pc = stop_pc;
5781
5782 inf_state->registers = regcache_dup (get_current_regcache ());
5783
5784 return inf_state;
5785 }
5786
5787 /* Restore inferior session state to INF_STATE. */
5788
5789 void
5790 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5791 {
5792 struct thread_info *tp = inferior_thread ();
5793
5794 tp->stop_signal = inf_state->stop_signal;
5795 stop_pc = inf_state->stop_pc;
5796
5797 /* The inferior can be gone if the user types "print exit(0)"
5798 (and perhaps other times). */
5799 if (target_has_execution)
5800 /* NB: The register write goes through to the target. */
5801 regcache_cpy (get_current_regcache (), inf_state->registers);
5802 regcache_xfree (inf_state->registers);
5803 xfree (inf_state);
5804 }
5805
5806 static void
5807 do_restore_inferior_thread_state_cleanup (void *state)
5808 {
5809 restore_inferior_thread_state (state);
5810 }
5811
5812 struct cleanup *
5813 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5814 {
5815 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5816 }
5817
5818 void
5819 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5820 {
5821 regcache_xfree (inf_state->registers);
5822 xfree (inf_state);
5823 }
5824
5825 struct regcache *
5826 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5827 {
5828 return inf_state->registers;
5829 }
5830
5831 /* Session related state for inferior function calls.
5832 These are the additional bits of state that need to be restored
5833 when an inferior function call successfully completes. */
5834
5835 struct inferior_status
5836 {
5837 bpstat stop_bpstat;
5838 int stop_step;
5839 int stop_stack_dummy;
5840 int stopped_by_random_signal;
5841 int stepping_over_breakpoint;
5842 CORE_ADDR step_range_start;
5843 CORE_ADDR step_range_end;
5844 struct frame_id step_frame_id;
5845 struct frame_id step_stack_frame_id;
5846 enum step_over_calls_kind step_over_calls;
5847 CORE_ADDR step_resume_break_address;
5848 int stop_after_trap;
5849 int stop_soon;
5850
5851 /* ID if the selected frame when the inferior function call was made. */
5852 struct frame_id selected_frame_id;
5853
5854 int proceed_to_finish;
5855 int in_infcall;
5856 };
5857
5858 /* Save all of the information associated with the inferior<==>gdb
5859 connection. */
5860
5861 struct inferior_status *
5862 save_inferior_status (void)
5863 {
5864 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5865 struct thread_info *tp = inferior_thread ();
5866 struct inferior *inf = current_inferior ();
5867
5868 inf_status->stop_step = tp->stop_step;
5869 inf_status->stop_stack_dummy = stop_stack_dummy;
5870 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5871 inf_status->stepping_over_breakpoint = tp->trap_expected;
5872 inf_status->step_range_start = tp->step_range_start;
5873 inf_status->step_range_end = tp->step_range_end;
5874 inf_status->step_frame_id = tp->step_frame_id;
5875 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5876 inf_status->step_over_calls = tp->step_over_calls;
5877 inf_status->stop_after_trap = stop_after_trap;
5878 inf_status->stop_soon = inf->stop_soon;
5879 /* Save original bpstat chain here; replace it with copy of chain.
5880 If caller's caller is walking the chain, they'll be happier if we
5881 hand them back the original chain when restore_inferior_status is
5882 called. */
5883 inf_status->stop_bpstat = tp->stop_bpstat;
5884 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5885 inf_status->proceed_to_finish = tp->proceed_to_finish;
5886 inf_status->in_infcall = tp->in_infcall;
5887
5888 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5889
5890 return inf_status;
5891 }
5892
5893 static int
5894 restore_selected_frame (void *args)
5895 {
5896 struct frame_id *fid = (struct frame_id *) args;
5897 struct frame_info *frame;
5898
5899 frame = frame_find_by_id (*fid);
5900
5901 /* If inf_status->selected_frame_id is NULL, there was no previously
5902 selected frame. */
5903 if (frame == NULL)
5904 {
5905 warning (_("Unable to restore previously selected frame."));
5906 return 0;
5907 }
5908
5909 select_frame (frame);
5910
5911 return (1);
5912 }
5913
5914 /* Restore inferior session state to INF_STATUS. */
5915
5916 void
5917 restore_inferior_status (struct inferior_status *inf_status)
5918 {
5919 struct thread_info *tp = inferior_thread ();
5920 struct inferior *inf = current_inferior ();
5921
5922 tp->stop_step = inf_status->stop_step;
5923 stop_stack_dummy = inf_status->stop_stack_dummy;
5924 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5925 tp->trap_expected = inf_status->stepping_over_breakpoint;
5926 tp->step_range_start = inf_status->step_range_start;
5927 tp->step_range_end = inf_status->step_range_end;
5928 tp->step_frame_id = inf_status->step_frame_id;
5929 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5930 tp->step_over_calls = inf_status->step_over_calls;
5931 stop_after_trap = inf_status->stop_after_trap;
5932 inf->stop_soon = inf_status->stop_soon;
5933 bpstat_clear (&tp->stop_bpstat);
5934 tp->stop_bpstat = inf_status->stop_bpstat;
5935 inf_status->stop_bpstat = NULL;
5936 tp->proceed_to_finish = inf_status->proceed_to_finish;
5937 tp->in_infcall = inf_status->in_infcall;
5938
5939 if (target_has_stack)
5940 {
5941 /* The point of catch_errors is that if the stack is clobbered,
5942 walking the stack might encounter a garbage pointer and
5943 error() trying to dereference it. */
5944 if (catch_errors
5945 (restore_selected_frame, &inf_status->selected_frame_id,
5946 "Unable to restore previously selected frame:\n",
5947 RETURN_MASK_ERROR) == 0)
5948 /* Error in restoring the selected frame. Select the innermost
5949 frame. */
5950 select_frame (get_current_frame ());
5951 }
5952
5953 xfree (inf_status);
5954 }
5955
5956 static void
5957 do_restore_inferior_status_cleanup (void *sts)
5958 {
5959 restore_inferior_status (sts);
5960 }
5961
5962 struct cleanup *
5963 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5964 {
5965 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5966 }
5967
5968 void
5969 discard_inferior_status (struct inferior_status *inf_status)
5970 {
5971 /* See save_inferior_status for info on stop_bpstat. */
5972 bpstat_clear (&inf_status->stop_bpstat);
5973 xfree (inf_status);
5974 }
5975 \f
5976 int
5977 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5978 {
5979 struct target_waitstatus last;
5980 ptid_t last_ptid;
5981
5982 get_last_target_status (&last_ptid, &last);
5983
5984 if (last.kind != TARGET_WAITKIND_FORKED)
5985 return 0;
5986
5987 if (!ptid_equal (last_ptid, pid))
5988 return 0;
5989
5990 *child_pid = last.value.related_pid;
5991 return 1;
5992 }
5993
5994 int
5995 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
5996 {
5997 struct target_waitstatus last;
5998 ptid_t last_ptid;
5999
6000 get_last_target_status (&last_ptid, &last);
6001
6002 if (last.kind != TARGET_WAITKIND_VFORKED)
6003 return 0;
6004
6005 if (!ptid_equal (last_ptid, pid))
6006 return 0;
6007
6008 *child_pid = last.value.related_pid;
6009 return 1;
6010 }
6011
6012 int
6013 inferior_has_execd (ptid_t pid, char **execd_pathname)
6014 {
6015 struct target_waitstatus last;
6016 ptid_t last_ptid;
6017
6018 get_last_target_status (&last_ptid, &last);
6019
6020 if (last.kind != TARGET_WAITKIND_EXECD)
6021 return 0;
6022
6023 if (!ptid_equal (last_ptid, pid))
6024 return 0;
6025
6026 *execd_pathname = xstrdup (last.value.execd_pathname);
6027 return 1;
6028 }
6029
6030 int
6031 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6032 {
6033 struct target_waitstatus last;
6034 ptid_t last_ptid;
6035
6036 get_last_target_status (&last_ptid, &last);
6037
6038 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6039 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6040 return 0;
6041
6042 if (!ptid_equal (last_ptid, pid))
6043 return 0;
6044
6045 *syscall_number = last.value.syscall_number;
6046 return 1;
6047 }
6048
6049 /* Oft used ptids */
6050 ptid_t null_ptid;
6051 ptid_t minus_one_ptid;
6052
6053 /* Create a ptid given the necessary PID, LWP, and TID components. */
6054
6055 ptid_t
6056 ptid_build (int pid, long lwp, long tid)
6057 {
6058 ptid_t ptid;
6059
6060 ptid.pid = pid;
6061 ptid.lwp = lwp;
6062 ptid.tid = tid;
6063 return ptid;
6064 }
6065
6066 /* Create a ptid from just a pid. */
6067
6068 ptid_t
6069 pid_to_ptid (int pid)
6070 {
6071 return ptid_build (pid, 0, 0);
6072 }
6073
6074 /* Fetch the pid (process id) component from a ptid. */
6075
6076 int
6077 ptid_get_pid (ptid_t ptid)
6078 {
6079 return ptid.pid;
6080 }
6081
6082 /* Fetch the lwp (lightweight process) component from a ptid. */
6083
6084 long
6085 ptid_get_lwp (ptid_t ptid)
6086 {
6087 return ptid.lwp;
6088 }
6089
6090 /* Fetch the tid (thread id) component from a ptid. */
6091
6092 long
6093 ptid_get_tid (ptid_t ptid)
6094 {
6095 return ptid.tid;
6096 }
6097
6098 /* ptid_equal() is used to test equality of two ptids. */
6099
6100 int
6101 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6102 {
6103 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6104 && ptid1.tid == ptid2.tid);
6105 }
6106
6107 /* Returns true if PTID represents a process. */
6108
6109 int
6110 ptid_is_pid (ptid_t ptid)
6111 {
6112 if (ptid_equal (minus_one_ptid, ptid))
6113 return 0;
6114 if (ptid_equal (null_ptid, ptid))
6115 return 0;
6116
6117 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6118 }
6119
6120 /* restore_inferior_ptid() will be used by the cleanup machinery
6121 to restore the inferior_ptid value saved in a call to
6122 save_inferior_ptid(). */
6123
6124 static void
6125 restore_inferior_ptid (void *arg)
6126 {
6127 ptid_t *saved_ptid_ptr = arg;
6128 inferior_ptid = *saved_ptid_ptr;
6129 xfree (arg);
6130 }
6131
6132 /* Save the value of inferior_ptid so that it may be restored by a
6133 later call to do_cleanups(). Returns the struct cleanup pointer
6134 needed for later doing the cleanup. */
6135
6136 struct cleanup *
6137 save_inferior_ptid (void)
6138 {
6139 ptid_t *saved_ptid_ptr;
6140
6141 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6142 *saved_ptid_ptr = inferior_ptid;
6143 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6144 }
6145 \f
6146
6147 /* User interface for reverse debugging:
6148 Set exec-direction / show exec-direction commands
6149 (returns error unless target implements to_set_exec_direction method). */
6150
6151 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6152 static const char exec_forward[] = "forward";
6153 static const char exec_reverse[] = "reverse";
6154 static const char *exec_direction = exec_forward;
6155 static const char *exec_direction_names[] = {
6156 exec_forward,
6157 exec_reverse,
6158 NULL
6159 };
6160
6161 static void
6162 set_exec_direction_func (char *args, int from_tty,
6163 struct cmd_list_element *cmd)
6164 {
6165 if (target_can_execute_reverse)
6166 {
6167 if (!strcmp (exec_direction, exec_forward))
6168 execution_direction = EXEC_FORWARD;
6169 else if (!strcmp (exec_direction, exec_reverse))
6170 execution_direction = EXEC_REVERSE;
6171 }
6172 }
6173
6174 static void
6175 show_exec_direction_func (struct ui_file *out, int from_tty,
6176 struct cmd_list_element *cmd, const char *value)
6177 {
6178 switch (execution_direction) {
6179 case EXEC_FORWARD:
6180 fprintf_filtered (out, _("Forward.\n"));
6181 break;
6182 case EXEC_REVERSE:
6183 fprintf_filtered (out, _("Reverse.\n"));
6184 break;
6185 case EXEC_ERROR:
6186 default:
6187 fprintf_filtered (out,
6188 _("Forward (target `%s' does not support exec-direction).\n"),
6189 target_shortname);
6190 break;
6191 }
6192 }
6193
6194 /* User interface for non-stop mode. */
6195
6196 int non_stop = 0;
6197 static int non_stop_1 = 0;
6198
6199 static void
6200 set_non_stop (char *args, int from_tty,
6201 struct cmd_list_element *c)
6202 {
6203 if (target_has_execution)
6204 {
6205 non_stop_1 = non_stop;
6206 error (_("Cannot change this setting while the inferior is running."));
6207 }
6208
6209 non_stop = non_stop_1;
6210 }
6211
6212 static void
6213 show_non_stop (struct ui_file *file, int from_tty,
6214 struct cmd_list_element *c, const char *value)
6215 {
6216 fprintf_filtered (file,
6217 _("Controlling the inferior in non-stop mode is %s.\n"),
6218 value);
6219 }
6220
6221 static void
6222 show_schedule_multiple (struct ui_file *file, int from_tty,
6223 struct cmd_list_element *c, const char *value)
6224 {
6225 fprintf_filtered (file, _("\
6226 Resuming the execution of threads of all processes is %s.\n"), value);
6227 }
6228
6229 void
6230 _initialize_infrun (void)
6231 {
6232 int i;
6233 int numsigs;
6234 struct cmd_list_element *c;
6235
6236 add_info ("signals", signals_info, _("\
6237 What debugger does when program gets various signals.\n\
6238 Specify a signal as argument to print info on that signal only."));
6239 add_info_alias ("handle", "signals", 0);
6240
6241 add_com ("handle", class_run, handle_command, _("\
6242 Specify how to handle a signal.\n\
6243 Args are signals and actions to apply to those signals.\n\
6244 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6245 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6246 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6247 The special arg \"all\" is recognized to mean all signals except those\n\
6248 used by the debugger, typically SIGTRAP and SIGINT.\n\
6249 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6250 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6251 Stop means reenter debugger if this signal happens (implies print).\n\
6252 Print means print a message if this signal happens.\n\
6253 Pass means let program see this signal; otherwise program doesn't know.\n\
6254 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6255 Pass and Stop may be combined."));
6256 if (xdb_commands)
6257 {
6258 add_com ("lz", class_info, signals_info, _("\
6259 What debugger does when program gets various signals.\n\
6260 Specify a signal as argument to print info on that signal only."));
6261 add_com ("z", class_run, xdb_handle_command, _("\
6262 Specify how to handle a signal.\n\
6263 Args are signals and actions to apply to those signals.\n\
6264 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6265 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6266 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6267 The special arg \"all\" is recognized to mean all signals except those\n\
6268 used by the debugger, typically SIGTRAP and SIGINT.\n\
6269 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6270 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6271 nopass), \"Q\" (noprint)\n\
6272 Stop means reenter debugger if this signal happens (implies print).\n\
6273 Print means print a message if this signal happens.\n\
6274 Pass means let program see this signal; otherwise program doesn't know.\n\
6275 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6276 Pass and Stop may be combined."));
6277 }
6278
6279 if (!dbx_commands)
6280 stop_command = add_cmd ("stop", class_obscure,
6281 not_just_help_class_command, _("\
6282 There is no `stop' command, but you can set a hook on `stop'.\n\
6283 This allows you to set a list of commands to be run each time execution\n\
6284 of the program stops."), &cmdlist);
6285
6286 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6287 Set inferior debugging."), _("\
6288 Show inferior debugging."), _("\
6289 When non-zero, inferior specific debugging is enabled."),
6290 NULL,
6291 show_debug_infrun,
6292 &setdebuglist, &showdebuglist);
6293
6294 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6295 Set displaced stepping debugging."), _("\
6296 Show displaced stepping debugging."), _("\
6297 When non-zero, displaced stepping specific debugging is enabled."),
6298 NULL,
6299 show_debug_displaced,
6300 &setdebuglist, &showdebuglist);
6301
6302 add_setshow_boolean_cmd ("non-stop", no_class,
6303 &non_stop_1, _("\
6304 Set whether gdb controls the inferior in non-stop mode."), _("\
6305 Show whether gdb controls the inferior in non-stop mode."), _("\
6306 When debugging a multi-threaded program and this setting is\n\
6307 off (the default, also called all-stop mode), when one thread stops\n\
6308 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6309 all other threads in the program while you interact with the thread of\n\
6310 interest. When you continue or step a thread, you can allow the other\n\
6311 threads to run, or have them remain stopped, but while you inspect any\n\
6312 thread's state, all threads stop.\n\
6313 \n\
6314 In non-stop mode, when one thread stops, other threads can continue\n\
6315 to run freely. You'll be able to step each thread independently,\n\
6316 leave it stopped or free to run as needed."),
6317 set_non_stop,
6318 show_non_stop,
6319 &setlist,
6320 &showlist);
6321
6322 numsigs = (int) TARGET_SIGNAL_LAST;
6323 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6324 signal_print = (unsigned char *)
6325 xmalloc (sizeof (signal_print[0]) * numsigs);
6326 signal_program = (unsigned char *)
6327 xmalloc (sizeof (signal_program[0]) * numsigs);
6328 for (i = 0; i < numsigs; i++)
6329 {
6330 signal_stop[i] = 1;
6331 signal_print[i] = 1;
6332 signal_program[i] = 1;
6333 }
6334
6335 /* Signals caused by debugger's own actions
6336 should not be given to the program afterwards. */
6337 signal_program[TARGET_SIGNAL_TRAP] = 0;
6338 signal_program[TARGET_SIGNAL_INT] = 0;
6339
6340 /* Signals that are not errors should not normally enter the debugger. */
6341 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6342 signal_print[TARGET_SIGNAL_ALRM] = 0;
6343 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6344 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6345 signal_stop[TARGET_SIGNAL_PROF] = 0;
6346 signal_print[TARGET_SIGNAL_PROF] = 0;
6347 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6348 signal_print[TARGET_SIGNAL_CHLD] = 0;
6349 signal_stop[TARGET_SIGNAL_IO] = 0;
6350 signal_print[TARGET_SIGNAL_IO] = 0;
6351 signal_stop[TARGET_SIGNAL_POLL] = 0;
6352 signal_print[TARGET_SIGNAL_POLL] = 0;
6353 signal_stop[TARGET_SIGNAL_URG] = 0;
6354 signal_print[TARGET_SIGNAL_URG] = 0;
6355 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6356 signal_print[TARGET_SIGNAL_WINCH] = 0;
6357
6358 /* These signals are used internally by user-level thread
6359 implementations. (See signal(5) on Solaris.) Like the above
6360 signals, a healthy program receives and handles them as part of
6361 its normal operation. */
6362 signal_stop[TARGET_SIGNAL_LWP] = 0;
6363 signal_print[TARGET_SIGNAL_LWP] = 0;
6364 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6365 signal_print[TARGET_SIGNAL_WAITING] = 0;
6366 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6367 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6368
6369 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6370 &stop_on_solib_events, _("\
6371 Set stopping for shared library events."), _("\
6372 Show stopping for shared library events."), _("\
6373 If nonzero, gdb will give control to the user when the dynamic linker\n\
6374 notifies gdb of shared library events. The most common event of interest\n\
6375 to the user would be loading/unloading of a new library."),
6376 NULL,
6377 show_stop_on_solib_events,
6378 &setlist, &showlist);
6379
6380 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6381 follow_fork_mode_kind_names,
6382 &follow_fork_mode_string, _("\
6383 Set debugger response to a program call of fork or vfork."), _("\
6384 Show debugger response to a program call of fork or vfork."), _("\
6385 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6386 parent - the original process is debugged after a fork\n\
6387 child - the new process is debugged after a fork\n\
6388 The unfollowed process will continue to run.\n\
6389 By default, the debugger will follow the parent process."),
6390 NULL,
6391 show_follow_fork_mode_string,
6392 &setlist, &showlist);
6393
6394 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6395 follow_exec_mode_names,
6396 &follow_exec_mode_string, _("\
6397 Set debugger response to a program call of exec."), _("\
6398 Show debugger response to a program call of exec."), _("\
6399 An exec call replaces the program image of a process.\n\
6400 \n\
6401 follow-exec-mode can be:\n\
6402 \n\
6403 new - the debugger creates a new inferior and rebinds the process \n\
6404 to this new inferior. The program the process was running before\n\
6405 the exec call can be restarted afterwards by restarting the original\n\
6406 inferior.\n\
6407 \n\
6408 same - the debugger keeps the process bound to the same inferior.\n\
6409 The new executable image replaces the previous executable loaded in\n\
6410 the inferior. Restarting the inferior after the exec call restarts\n\
6411 the executable the process was running after the exec call.\n\
6412 \n\
6413 By default, the debugger will use the same inferior."),
6414 NULL,
6415 show_follow_exec_mode_string,
6416 &setlist, &showlist);
6417
6418 add_setshow_enum_cmd ("scheduler-locking", class_run,
6419 scheduler_enums, &scheduler_mode, _("\
6420 Set mode for locking scheduler during execution."), _("\
6421 Show mode for locking scheduler during execution."), _("\
6422 off == no locking (threads may preempt at any time)\n\
6423 on == full locking (no thread except the current thread may run)\n\
6424 step == scheduler locked during every single-step operation.\n\
6425 In this mode, no other thread may run during a step command.\n\
6426 Other threads may run while stepping over a function call ('next')."),
6427 set_schedlock_func, /* traps on target vector */
6428 show_scheduler_mode,
6429 &setlist, &showlist);
6430
6431 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6432 Set mode for resuming threads of all processes."), _("\
6433 Show mode for resuming threads of all processes."), _("\
6434 When on, execution commands (such as 'continue' or 'next') resume all\n\
6435 threads of all processes. When off (which is the default), execution\n\
6436 commands only resume the threads of the current process. The set of\n\
6437 threads that are resumed is further refined by the scheduler-locking\n\
6438 mode (see help set scheduler-locking)."),
6439 NULL,
6440 show_schedule_multiple,
6441 &setlist, &showlist);
6442
6443 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6444 Set mode of the step operation."), _("\
6445 Show mode of the step operation."), _("\
6446 When set, doing a step over a function without debug line information\n\
6447 will stop at the first instruction of that function. Otherwise, the\n\
6448 function is skipped and the step command stops at a different source line."),
6449 NULL,
6450 show_step_stop_if_no_debug,
6451 &setlist, &showlist);
6452
6453 add_setshow_enum_cmd ("displaced-stepping", class_run,
6454 can_use_displaced_stepping_enum,
6455 &can_use_displaced_stepping, _("\
6456 Set debugger's willingness to use displaced stepping."), _("\
6457 Show debugger's willingness to use displaced stepping."), _("\
6458 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6459 supported by the target architecture. If off, gdb will not use displaced\n\
6460 stepping to step over breakpoints, even if such is supported by the target\n\
6461 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6462 if the target architecture supports it and non-stop mode is active, but will not\n\
6463 use it in all-stop mode (see help set non-stop)."),
6464 NULL,
6465 show_can_use_displaced_stepping,
6466 &setlist, &showlist);
6467
6468 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6469 &exec_direction, _("Set direction of execution.\n\
6470 Options are 'forward' or 'reverse'."),
6471 _("Show direction of execution (forward/reverse)."),
6472 _("Tells gdb whether to execute forward or backward."),
6473 set_exec_direction_func, show_exec_direction_func,
6474 &setlist, &showlist);
6475
6476 /* Set/show detach-on-fork: user-settable mode. */
6477
6478 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6479 Set whether gdb will detach the child of a fork."), _("\
6480 Show whether gdb will detach the child of a fork."), _("\
6481 Tells gdb whether to detach the child of a fork."),
6482 NULL, NULL, &setlist, &showlist);
6483
6484 /* ptid initializations */
6485 null_ptid = ptid_build (0, 0, 0);
6486 minus_one_ptid = ptid_build (-1, 0, 0);
6487 inferior_ptid = null_ptid;
6488 target_last_wait_ptid = minus_one_ptid;
6489 displaced_step_ptid = null_ptid;
6490
6491 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6492 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6493 observer_attach_thread_exit (infrun_thread_thread_exit);
6494
6495 /* Explicitly create without lookup, since that tries to create a
6496 value with a void typed value, and when we get here, gdbarch
6497 isn't initialized yet. At this point, we're quite sure there
6498 isn't another convenience variable of the same name. */
6499 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6500 }
This page took 0.180147 seconds and 4 git commands to generate.