* tracepoint.h (set_traceframe_number)
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Convert the #defines into values. This is temporary until wfi control
183 flow is completely sorted out. */
184
185 #ifndef CANNOT_STEP_HW_WATCHPOINTS
186 #define CANNOT_STEP_HW_WATCHPOINTS 0
187 #else
188 #undef CANNOT_STEP_HW_WATCHPOINTS
189 #define CANNOT_STEP_HW_WATCHPOINTS 1
190 #endif
191
192 /* Tables of how to react to signals; the user sets them. */
193
194 static unsigned char *signal_stop;
195 static unsigned char *signal_print;
196 static unsigned char *signal_program;
197
198 #define SET_SIGS(nsigs,sigs,flags) \
199 do { \
200 int signum = (nsigs); \
201 while (signum-- > 0) \
202 if ((sigs)[signum]) \
203 (flags)[signum] = 1; \
204 } while (0)
205
206 #define UNSET_SIGS(nsigs,sigs,flags) \
207 do { \
208 int signum = (nsigs); \
209 while (signum-- > 0) \
210 if ((sigs)[signum]) \
211 (flags)[signum] = 0; \
212 } while (0)
213
214 /* Value to pass to target_resume() to cause all threads to resume */
215
216 #define RESUME_ALL minus_one_ptid
217
218 /* Command list pointer for the "stop" placeholder. */
219
220 static struct cmd_list_element *stop_command;
221
222 /* Function inferior was in as of last step command. */
223
224 static struct symbol *step_start_function;
225
226 /* Nonzero if we want to give control to the user when we're notified
227 of shared library events by the dynamic linker. */
228 static int stop_on_solib_events;
229 static void
230 show_stop_on_solib_events (struct ui_file *file, int from_tty,
231 struct cmd_list_element *c, const char *value)
232 {
233 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
234 value);
235 }
236
237 /* Nonzero means expecting a trace trap
238 and should stop the inferior and return silently when it happens. */
239
240 int stop_after_trap;
241
242 /* Save register contents here when executing a "finish" command or are
243 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
244 Thus this contains the return value from the called function (assuming
245 values are returned in a register). */
246
247 struct regcache *stop_registers;
248
249 /* Nonzero after stop if current stack frame should be printed. */
250
251 static int stop_print_frame;
252
253 /* This is a cached copy of the pid/waitstatus of the last event
254 returned by target_wait()/deprecated_target_wait_hook(). This
255 information is returned by get_last_target_status(). */
256 static ptid_t target_last_wait_ptid;
257 static struct target_waitstatus target_last_waitstatus;
258
259 static void context_switch (ptid_t ptid);
260
261 void init_thread_stepping_state (struct thread_info *tss);
262
263 void init_infwait_state (void);
264
265 static const char follow_fork_mode_child[] = "child";
266 static const char follow_fork_mode_parent[] = "parent";
267
268 static const char *follow_fork_mode_kind_names[] = {
269 follow_fork_mode_child,
270 follow_fork_mode_parent,
271 NULL
272 };
273
274 static const char *follow_fork_mode_string = follow_fork_mode_parent;
275 static void
276 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
277 struct cmd_list_element *c, const char *value)
278 {
279 fprintf_filtered (file, _("\
280 Debugger response to a program call of fork or vfork is \"%s\".\n"),
281 value);
282 }
283 \f
284
285 /* Tell the target to follow the fork we're stopped at. Returns true
286 if the inferior should be resumed; false, if the target for some
287 reason decided it's best not to resume. */
288
289 static int
290 follow_fork (void)
291 {
292 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
293 int should_resume = 1;
294 struct thread_info *tp;
295
296 /* Copy user stepping state to the new inferior thread. FIXME: the
297 followed fork child thread should have a copy of most of the
298 parent thread structure's run control related fields, not just these.
299 Initialized to avoid "may be used uninitialized" warnings from gcc. */
300 struct breakpoint *step_resume_breakpoint = NULL;
301 CORE_ADDR step_range_start = 0;
302 CORE_ADDR step_range_end = 0;
303 struct frame_id step_frame_id = { 0 };
304
305 if (!non_stop)
306 {
307 ptid_t wait_ptid;
308 struct target_waitstatus wait_status;
309
310 /* Get the last target status returned by target_wait(). */
311 get_last_target_status (&wait_ptid, &wait_status);
312
313 /* If not stopped at a fork event, then there's nothing else to
314 do. */
315 if (wait_status.kind != TARGET_WAITKIND_FORKED
316 && wait_status.kind != TARGET_WAITKIND_VFORKED)
317 return 1;
318
319 /* Check if we switched over from WAIT_PTID, since the event was
320 reported. */
321 if (!ptid_equal (wait_ptid, minus_one_ptid)
322 && !ptid_equal (inferior_ptid, wait_ptid))
323 {
324 /* We did. Switch back to WAIT_PTID thread, to tell the
325 target to follow it (in either direction). We'll
326 afterwards refuse to resume, and inform the user what
327 happened. */
328 switch_to_thread (wait_ptid);
329 should_resume = 0;
330 }
331 }
332
333 tp = inferior_thread ();
334
335 /* If there were any forks/vforks that were caught and are now to be
336 followed, then do so now. */
337 switch (tp->pending_follow.kind)
338 {
339 case TARGET_WAITKIND_FORKED:
340 case TARGET_WAITKIND_VFORKED:
341 {
342 ptid_t parent, child;
343
344 /* If the user did a next/step, etc, over a fork call,
345 preserve the stepping state in the fork child. */
346 if (follow_child && should_resume)
347 {
348 step_resume_breakpoint
349 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
350 step_range_start = tp->step_range_start;
351 step_range_end = tp->step_range_end;
352 step_frame_id = tp->step_frame_id;
353
354 /* For now, delete the parent's sr breakpoint, otherwise,
355 parent/child sr breakpoints are considered duplicates,
356 and the child version will not be installed. Remove
357 this when the breakpoints module becomes aware of
358 inferiors and address spaces. */
359 delete_step_resume_breakpoint (tp);
360 tp->step_range_start = 0;
361 tp->step_range_end = 0;
362 tp->step_frame_id = null_frame_id;
363 }
364
365 parent = inferior_ptid;
366 child = tp->pending_follow.value.related_pid;
367
368 /* Tell the target to do whatever is necessary to follow
369 either parent or child. */
370 if (target_follow_fork (follow_child))
371 {
372 /* Target refused to follow, or there's some other reason
373 we shouldn't resume. */
374 should_resume = 0;
375 }
376 else
377 {
378 /* This pending follow fork event is now handled, one way
379 or another. The previous selected thread may be gone
380 from the lists by now, but if it is still around, need
381 to clear the pending follow request. */
382 tp = find_thread_ptid (parent);
383 if (tp)
384 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
385
386 /* This makes sure we don't try to apply the "Switched
387 over from WAIT_PID" logic above. */
388 nullify_last_target_wait_ptid ();
389
390 /* If we followed the child, switch to it... */
391 if (follow_child)
392 {
393 switch_to_thread (child);
394
395 /* ... and preserve the stepping state, in case the
396 user was stepping over the fork call. */
397 if (should_resume)
398 {
399 tp = inferior_thread ();
400 tp->step_resume_breakpoint = step_resume_breakpoint;
401 tp->step_range_start = step_range_start;
402 tp->step_range_end = step_range_end;
403 tp->step_frame_id = step_frame_id;
404 }
405 else
406 {
407 /* If we get here, it was because we're trying to
408 resume from a fork catchpoint, but, the user
409 has switched threads away from the thread that
410 forked. In that case, the resume command
411 issued is most likely not applicable to the
412 child, so just warn, and refuse to resume. */
413 warning (_("\
414 Not resuming: switched threads before following fork child.\n"));
415 }
416
417 /* Reset breakpoints in the child as appropriate. */
418 follow_inferior_reset_breakpoints ();
419 }
420 else
421 switch_to_thread (parent);
422 }
423 }
424 break;
425 case TARGET_WAITKIND_SPURIOUS:
426 /* Nothing to follow. */
427 break;
428 default:
429 internal_error (__FILE__, __LINE__,
430 "Unexpected pending_follow.kind %d\n",
431 tp->pending_follow.kind);
432 break;
433 }
434
435 return should_resume;
436 }
437
438 void
439 follow_inferior_reset_breakpoints (void)
440 {
441 struct thread_info *tp = inferior_thread ();
442
443 /* Was there a step_resume breakpoint? (There was if the user
444 did a "next" at the fork() call.) If so, explicitly reset its
445 thread number.
446
447 step_resumes are a form of bp that are made to be per-thread.
448 Since we created the step_resume bp when the parent process
449 was being debugged, and now are switching to the child process,
450 from the breakpoint package's viewpoint, that's a switch of
451 "threads". We must update the bp's notion of which thread
452 it is for, or it'll be ignored when it triggers. */
453
454 if (tp->step_resume_breakpoint)
455 breakpoint_re_set_thread (tp->step_resume_breakpoint);
456
457 /* Reinsert all breakpoints in the child. The user may have set
458 breakpoints after catching the fork, in which case those
459 were never set in the child, but only in the parent. This makes
460 sure the inserted breakpoints match the breakpoint list. */
461
462 breakpoint_re_set ();
463 insert_breakpoints ();
464 }
465
466 /* The child has exited or execed: resume threads of the parent the
467 user wanted to be executing. */
468
469 static int
470 proceed_after_vfork_done (struct thread_info *thread,
471 void *arg)
472 {
473 int pid = * (int *) arg;
474
475 if (ptid_get_pid (thread->ptid) == pid
476 && is_running (thread->ptid)
477 && !is_executing (thread->ptid)
478 && !thread->stop_requested
479 && thread->stop_signal == TARGET_SIGNAL_0)
480 {
481 if (debug_infrun)
482 fprintf_unfiltered (gdb_stdlog,
483 "infrun: resuming vfork parent thread %s\n",
484 target_pid_to_str (thread->ptid));
485
486 switch_to_thread (thread->ptid);
487 clear_proceed_status ();
488 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
489 }
490
491 return 0;
492 }
493
494 /* Called whenever we notice an exec or exit event, to handle
495 detaching or resuming a vfork parent. */
496
497 static void
498 handle_vfork_child_exec_or_exit (int exec)
499 {
500 struct inferior *inf = current_inferior ();
501
502 if (inf->vfork_parent)
503 {
504 int resume_parent = -1;
505
506 /* This exec or exit marks the end of the shared memory region
507 between the parent and the child. If the user wanted to
508 detach from the parent, now is the time. */
509
510 if (inf->vfork_parent->pending_detach)
511 {
512 struct thread_info *tp;
513 struct cleanup *old_chain;
514 struct program_space *pspace;
515 struct address_space *aspace;
516
517 /* follow-fork child, detach-on-fork on */
518
519 old_chain = make_cleanup_restore_current_thread ();
520
521 /* We're letting loose of the parent. */
522 tp = any_live_thread_of_process (inf->vfork_parent->pid);
523 switch_to_thread (tp->ptid);
524
525 /* We're about to detach from the parent, which implicitly
526 removes breakpoints from its address space. There's a
527 catch here: we want to reuse the spaces for the child,
528 but, parent/child are still sharing the pspace at this
529 point, although the exec in reality makes the kernel give
530 the child a fresh set of new pages. The problem here is
531 that the breakpoints module being unaware of this, would
532 likely chose the child process to write to the parent
533 address space. Swapping the child temporarily away from
534 the spaces has the desired effect. Yes, this is "sort
535 of" a hack. */
536
537 pspace = inf->pspace;
538 aspace = inf->aspace;
539 inf->aspace = NULL;
540 inf->pspace = NULL;
541
542 if (debug_infrun || info_verbose)
543 {
544 target_terminal_ours ();
545
546 if (exec)
547 fprintf_filtered (gdb_stdlog,
548 "Detaching vfork parent process %d after child exec.\n",
549 inf->vfork_parent->pid);
550 else
551 fprintf_filtered (gdb_stdlog,
552 "Detaching vfork parent process %d after child exit.\n",
553 inf->vfork_parent->pid);
554 }
555
556 target_detach (NULL, 0);
557
558 /* Put it back. */
559 inf->pspace = pspace;
560 inf->aspace = aspace;
561
562 do_cleanups (old_chain);
563 }
564 else if (exec)
565 {
566 /* We're staying attached to the parent, so, really give the
567 child a new address space. */
568 inf->pspace = add_program_space (maybe_new_address_space ());
569 inf->aspace = inf->pspace->aspace;
570 inf->removable = 1;
571 set_current_program_space (inf->pspace);
572
573 resume_parent = inf->vfork_parent->pid;
574
575 /* Break the bonds. */
576 inf->vfork_parent->vfork_child = NULL;
577 }
578 else
579 {
580 struct cleanup *old_chain;
581 struct program_space *pspace;
582
583 /* If this is a vfork child exiting, then the pspace and
584 aspaces were shared with the parent. Since we're
585 reporting the process exit, we'll be mourning all that is
586 found in the address space, and switching to null_ptid,
587 preparing to start a new inferior. But, since we don't
588 want to clobber the parent's address/program spaces, we
589 go ahead and create a new one for this exiting
590 inferior. */
591
592 /* Switch to null_ptid, so that clone_program_space doesn't want
593 to read the selected frame of a dead process. */
594 old_chain = save_inferior_ptid ();
595 inferior_ptid = null_ptid;
596
597 /* This inferior is dead, so avoid giving the breakpoints
598 module the option to write through to it (cloning a
599 program space resets breakpoints). */
600 inf->aspace = NULL;
601 inf->pspace = NULL;
602 pspace = add_program_space (maybe_new_address_space ());
603 set_current_program_space (pspace);
604 inf->removable = 1;
605 clone_program_space (pspace, inf->vfork_parent->pspace);
606 inf->pspace = pspace;
607 inf->aspace = pspace->aspace;
608
609 /* Put back inferior_ptid. We'll continue mourning this
610 inferior. */
611 do_cleanups (old_chain);
612
613 resume_parent = inf->vfork_parent->pid;
614 /* Break the bonds. */
615 inf->vfork_parent->vfork_child = NULL;
616 }
617
618 inf->vfork_parent = NULL;
619
620 gdb_assert (current_program_space == inf->pspace);
621
622 if (non_stop && resume_parent != -1)
623 {
624 /* If the user wanted the parent to be running, let it go
625 free now. */
626 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
627
628 if (debug_infrun)
629 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
630 resume_parent);
631
632 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
633
634 do_cleanups (old_chain);
635 }
636 }
637 }
638
639 /* Enum strings for "set|show displaced-stepping". */
640
641 static const char follow_exec_mode_new[] = "new";
642 static const char follow_exec_mode_same[] = "same";
643 static const char *follow_exec_mode_names[] =
644 {
645 follow_exec_mode_new,
646 follow_exec_mode_same,
647 NULL,
648 };
649
650 static const char *follow_exec_mode_string = follow_exec_mode_same;
651 static void
652 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
653 struct cmd_list_element *c, const char *value)
654 {
655 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
656 }
657
658 /* EXECD_PATHNAME is assumed to be non-NULL. */
659
660 static void
661 follow_exec (ptid_t pid, char *execd_pathname)
662 {
663 struct target_ops *tgt;
664 struct thread_info *th = inferior_thread ();
665 struct inferior *inf = current_inferior ();
666
667 /* This is an exec event that we actually wish to pay attention to.
668 Refresh our symbol table to the newly exec'd program, remove any
669 momentary bp's, etc.
670
671 If there are breakpoints, they aren't really inserted now,
672 since the exec() transformed our inferior into a fresh set
673 of instructions.
674
675 We want to preserve symbolic breakpoints on the list, since
676 we have hopes that they can be reset after the new a.out's
677 symbol table is read.
678
679 However, any "raw" breakpoints must be removed from the list
680 (e.g., the solib bp's), since their address is probably invalid
681 now.
682
683 And, we DON'T want to call delete_breakpoints() here, since
684 that may write the bp's "shadow contents" (the instruction
685 value that was overwritten witha TRAP instruction). Since
686 we now have a new a.out, those shadow contents aren't valid. */
687
688 mark_breakpoints_out ();
689
690 update_breakpoints_after_exec ();
691
692 /* If there was one, it's gone now. We cannot truly step-to-next
693 statement through an exec(). */
694 th->step_resume_breakpoint = NULL;
695 th->step_range_start = 0;
696 th->step_range_end = 0;
697
698 /* The target reports the exec event to the main thread, even if
699 some other thread does the exec, and even if the main thread was
700 already stopped --- if debugging in non-stop mode, it's possible
701 the user had the main thread held stopped in the previous image
702 --- release it now. This is the same behavior as step-over-exec
703 with scheduler-locking on in all-stop mode. */
704 th->stop_requested = 0;
705
706 /* What is this a.out's name? */
707 printf_unfiltered (_("%s is executing new program: %s\n"),
708 target_pid_to_str (inferior_ptid),
709 execd_pathname);
710
711 /* We've followed the inferior through an exec. Therefore, the
712 inferior has essentially been killed & reborn. */
713
714 gdb_flush (gdb_stdout);
715
716 breakpoint_init_inferior (inf_execd);
717
718 if (gdb_sysroot && *gdb_sysroot)
719 {
720 char *name = alloca (strlen (gdb_sysroot)
721 + strlen (execd_pathname)
722 + 1);
723 strcpy (name, gdb_sysroot);
724 strcat (name, execd_pathname);
725 execd_pathname = name;
726 }
727
728 /* Reset the shared library package. This ensures that we get a
729 shlib event when the child reaches "_start", at which point the
730 dld will have had a chance to initialize the child. */
731 /* Also, loading a symbol file below may trigger symbol lookups, and
732 we don't want those to be satisfied by the libraries of the
733 previous incarnation of this process. */
734 no_shared_libraries (NULL, 0);
735
736 if (follow_exec_mode_string == follow_exec_mode_new)
737 {
738 struct program_space *pspace;
739 struct inferior *new_inf;
740
741 /* The user wants to keep the old inferior and program spaces
742 around. Create a new fresh one, and switch to it. */
743
744 inf = add_inferior (current_inferior ()->pid);
745 pspace = add_program_space (maybe_new_address_space ());
746 inf->pspace = pspace;
747 inf->aspace = pspace->aspace;
748
749 exit_inferior_num_silent (current_inferior ()->num);
750
751 set_current_inferior (inf);
752 set_current_program_space (pspace);
753 }
754
755 gdb_assert (current_program_space == inf->pspace);
756
757 /* That a.out is now the one to use. */
758 exec_file_attach (execd_pathname, 0);
759
760 /* Load the main file's symbols. */
761 symbol_file_add_main (execd_pathname, 0);
762
763 #ifdef SOLIB_CREATE_INFERIOR_HOOK
764 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
765 #else
766 solib_create_inferior_hook (0);
767 #endif
768
769 jit_inferior_created_hook ();
770
771 /* Reinsert all breakpoints. (Those which were symbolic have
772 been reset to the proper address in the new a.out, thanks
773 to symbol_file_command...) */
774 insert_breakpoints ();
775
776 /* The next resume of this inferior should bring it to the shlib
777 startup breakpoints. (If the user had also set bp's on
778 "main" from the old (parent) process, then they'll auto-
779 matically get reset there in the new process.) */
780 }
781
782 /* Non-zero if we just simulating a single-step. This is needed
783 because we cannot remove the breakpoints in the inferior process
784 until after the `wait' in `wait_for_inferior'. */
785 static int singlestep_breakpoints_inserted_p = 0;
786
787 /* The thread we inserted single-step breakpoints for. */
788 static ptid_t singlestep_ptid;
789
790 /* PC when we started this single-step. */
791 static CORE_ADDR singlestep_pc;
792
793 /* If another thread hit the singlestep breakpoint, we save the original
794 thread here so that we can resume single-stepping it later. */
795 static ptid_t saved_singlestep_ptid;
796 static int stepping_past_singlestep_breakpoint;
797
798 /* If not equal to null_ptid, this means that after stepping over breakpoint
799 is finished, we need to switch to deferred_step_ptid, and step it.
800
801 The use case is when one thread has hit a breakpoint, and then the user
802 has switched to another thread and issued 'step'. We need to step over
803 breakpoint in the thread which hit the breakpoint, but then continue
804 stepping the thread user has selected. */
805 static ptid_t deferred_step_ptid;
806 \f
807 /* Displaced stepping. */
808
809 /* In non-stop debugging mode, we must take special care to manage
810 breakpoints properly; in particular, the traditional strategy for
811 stepping a thread past a breakpoint it has hit is unsuitable.
812 'Displaced stepping' is a tactic for stepping one thread past a
813 breakpoint it has hit while ensuring that other threads running
814 concurrently will hit the breakpoint as they should.
815
816 The traditional way to step a thread T off a breakpoint in a
817 multi-threaded program in all-stop mode is as follows:
818
819 a0) Initially, all threads are stopped, and breakpoints are not
820 inserted.
821 a1) We single-step T, leaving breakpoints uninserted.
822 a2) We insert breakpoints, and resume all threads.
823
824 In non-stop debugging, however, this strategy is unsuitable: we
825 don't want to have to stop all threads in the system in order to
826 continue or step T past a breakpoint. Instead, we use displaced
827 stepping:
828
829 n0) Initially, T is stopped, other threads are running, and
830 breakpoints are inserted.
831 n1) We copy the instruction "under" the breakpoint to a separate
832 location, outside the main code stream, making any adjustments
833 to the instruction, register, and memory state as directed by
834 T's architecture.
835 n2) We single-step T over the instruction at its new location.
836 n3) We adjust the resulting register and memory state as directed
837 by T's architecture. This includes resetting T's PC to point
838 back into the main instruction stream.
839 n4) We resume T.
840
841 This approach depends on the following gdbarch methods:
842
843 - gdbarch_max_insn_length and gdbarch_displaced_step_location
844 indicate where to copy the instruction, and how much space must
845 be reserved there. We use these in step n1.
846
847 - gdbarch_displaced_step_copy_insn copies a instruction to a new
848 address, and makes any necessary adjustments to the instruction,
849 register contents, and memory. We use this in step n1.
850
851 - gdbarch_displaced_step_fixup adjusts registers and memory after
852 we have successfuly single-stepped the instruction, to yield the
853 same effect the instruction would have had if we had executed it
854 at its original address. We use this in step n3.
855
856 - gdbarch_displaced_step_free_closure provides cleanup.
857
858 The gdbarch_displaced_step_copy_insn and
859 gdbarch_displaced_step_fixup functions must be written so that
860 copying an instruction with gdbarch_displaced_step_copy_insn,
861 single-stepping across the copied instruction, and then applying
862 gdbarch_displaced_insn_fixup should have the same effects on the
863 thread's memory and registers as stepping the instruction in place
864 would have. Exactly which responsibilities fall to the copy and
865 which fall to the fixup is up to the author of those functions.
866
867 See the comments in gdbarch.sh for details.
868
869 Note that displaced stepping and software single-step cannot
870 currently be used in combination, although with some care I think
871 they could be made to. Software single-step works by placing
872 breakpoints on all possible subsequent instructions; if the
873 displaced instruction is a PC-relative jump, those breakpoints
874 could fall in very strange places --- on pages that aren't
875 executable, or at addresses that are not proper instruction
876 boundaries. (We do generally let other threads run while we wait
877 to hit the software single-step breakpoint, and they might
878 encounter such a corrupted instruction.) One way to work around
879 this would be to have gdbarch_displaced_step_copy_insn fully
880 simulate the effect of PC-relative instructions (and return NULL)
881 on architectures that use software single-stepping.
882
883 In non-stop mode, we can have independent and simultaneous step
884 requests, so more than one thread may need to simultaneously step
885 over a breakpoint. The current implementation assumes there is
886 only one scratch space per process. In this case, we have to
887 serialize access to the scratch space. If thread A wants to step
888 over a breakpoint, but we are currently waiting for some other
889 thread to complete a displaced step, we leave thread A stopped and
890 place it in the displaced_step_request_queue. Whenever a displaced
891 step finishes, we pick the next thread in the queue and start a new
892 displaced step operation on it. See displaced_step_prepare and
893 displaced_step_fixup for details. */
894
895 /* If this is not null_ptid, this is the thread carrying out a
896 displaced single-step. This thread's state will require fixing up
897 once it has completed its step. */
898 static ptid_t displaced_step_ptid;
899
900 struct displaced_step_request
901 {
902 ptid_t ptid;
903 struct displaced_step_request *next;
904 };
905
906 /* A queue of pending displaced stepping requests. */
907 struct displaced_step_request *displaced_step_request_queue;
908
909 /* The architecture the thread had when we stepped it. */
910 static struct gdbarch *displaced_step_gdbarch;
911
912 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
913 for post-step cleanup. */
914 static struct displaced_step_closure *displaced_step_closure;
915
916 /* The address of the original instruction, and the copy we made. */
917 static CORE_ADDR displaced_step_original, displaced_step_copy;
918
919 /* Saved contents of copy area. */
920 static gdb_byte *displaced_step_saved_copy;
921
922 /* Enum strings for "set|show displaced-stepping". */
923
924 static const char can_use_displaced_stepping_auto[] = "auto";
925 static const char can_use_displaced_stepping_on[] = "on";
926 static const char can_use_displaced_stepping_off[] = "off";
927 static const char *can_use_displaced_stepping_enum[] =
928 {
929 can_use_displaced_stepping_auto,
930 can_use_displaced_stepping_on,
931 can_use_displaced_stepping_off,
932 NULL,
933 };
934
935 /* If ON, and the architecture supports it, GDB will use displaced
936 stepping to step over breakpoints. If OFF, or if the architecture
937 doesn't support it, GDB will instead use the traditional
938 hold-and-step approach. If AUTO (which is the default), GDB will
939 decide which technique to use to step over breakpoints depending on
940 which of all-stop or non-stop mode is active --- displaced stepping
941 in non-stop mode; hold-and-step in all-stop mode. */
942
943 static const char *can_use_displaced_stepping =
944 can_use_displaced_stepping_auto;
945
946 static void
947 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
948 struct cmd_list_element *c,
949 const char *value)
950 {
951 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
952 fprintf_filtered (file, _("\
953 Debugger's willingness to use displaced stepping to step over \
954 breakpoints is %s (currently %s).\n"),
955 value, non_stop ? "on" : "off");
956 else
957 fprintf_filtered (file, _("\
958 Debugger's willingness to use displaced stepping to step over \
959 breakpoints is %s.\n"), value);
960 }
961
962 /* Return non-zero if displaced stepping can/should be used to step
963 over breakpoints. */
964
965 static int
966 use_displaced_stepping (struct gdbarch *gdbarch)
967 {
968 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
969 && non_stop)
970 || can_use_displaced_stepping == can_use_displaced_stepping_on)
971 && gdbarch_displaced_step_copy_insn_p (gdbarch)
972 && !RECORD_IS_USED);
973 }
974
975 /* Clean out any stray displaced stepping state. */
976 static void
977 displaced_step_clear (void)
978 {
979 /* Indicate that there is no cleanup pending. */
980 displaced_step_ptid = null_ptid;
981
982 if (displaced_step_closure)
983 {
984 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
985 displaced_step_closure);
986 displaced_step_closure = NULL;
987 }
988 }
989
990 static void
991 displaced_step_clear_cleanup (void *ignore)
992 {
993 displaced_step_clear ();
994 }
995
996 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
997 void
998 displaced_step_dump_bytes (struct ui_file *file,
999 const gdb_byte *buf,
1000 size_t len)
1001 {
1002 int i;
1003
1004 for (i = 0; i < len; i++)
1005 fprintf_unfiltered (file, "%02x ", buf[i]);
1006 fputs_unfiltered ("\n", file);
1007 }
1008
1009 /* Prepare to single-step, using displaced stepping.
1010
1011 Note that we cannot use displaced stepping when we have a signal to
1012 deliver. If we have a signal to deliver and an instruction to step
1013 over, then after the step, there will be no indication from the
1014 target whether the thread entered a signal handler or ignored the
1015 signal and stepped over the instruction successfully --- both cases
1016 result in a simple SIGTRAP. In the first case we mustn't do a
1017 fixup, and in the second case we must --- but we can't tell which.
1018 Comments in the code for 'random signals' in handle_inferior_event
1019 explain how we handle this case instead.
1020
1021 Returns 1 if preparing was successful -- this thread is going to be
1022 stepped now; or 0 if displaced stepping this thread got queued. */
1023 static int
1024 displaced_step_prepare (ptid_t ptid)
1025 {
1026 struct cleanup *old_cleanups, *ignore_cleanups;
1027 struct regcache *regcache = get_thread_regcache (ptid);
1028 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1029 CORE_ADDR original, copy;
1030 ULONGEST len;
1031 struct displaced_step_closure *closure;
1032
1033 /* We should never reach this function if the architecture does not
1034 support displaced stepping. */
1035 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1036
1037 /* For the first cut, we're displaced stepping one thread at a
1038 time. */
1039
1040 if (!ptid_equal (displaced_step_ptid, null_ptid))
1041 {
1042 /* Already waiting for a displaced step to finish. Defer this
1043 request and place in queue. */
1044 struct displaced_step_request *req, *new_req;
1045
1046 if (debug_displaced)
1047 fprintf_unfiltered (gdb_stdlog,
1048 "displaced: defering step of %s\n",
1049 target_pid_to_str (ptid));
1050
1051 new_req = xmalloc (sizeof (*new_req));
1052 new_req->ptid = ptid;
1053 new_req->next = NULL;
1054
1055 if (displaced_step_request_queue)
1056 {
1057 for (req = displaced_step_request_queue;
1058 req && req->next;
1059 req = req->next)
1060 ;
1061 req->next = new_req;
1062 }
1063 else
1064 displaced_step_request_queue = new_req;
1065
1066 return 0;
1067 }
1068 else
1069 {
1070 if (debug_displaced)
1071 fprintf_unfiltered (gdb_stdlog,
1072 "displaced: stepping %s now\n",
1073 target_pid_to_str (ptid));
1074 }
1075
1076 displaced_step_clear ();
1077
1078 old_cleanups = save_inferior_ptid ();
1079 inferior_ptid = ptid;
1080
1081 original = regcache_read_pc (regcache);
1082
1083 copy = gdbarch_displaced_step_location (gdbarch);
1084 len = gdbarch_max_insn_length (gdbarch);
1085
1086 /* Save the original contents of the copy area. */
1087 displaced_step_saved_copy = xmalloc (len);
1088 ignore_cleanups = make_cleanup (free_current_contents,
1089 &displaced_step_saved_copy);
1090 read_memory (copy, displaced_step_saved_copy, len);
1091 if (debug_displaced)
1092 {
1093 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1094 paddress (gdbarch, copy));
1095 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
1096 };
1097
1098 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1099 original, copy, regcache);
1100
1101 /* We don't support the fully-simulated case at present. */
1102 gdb_assert (closure);
1103
1104 /* Save the information we need to fix things up if the step
1105 succeeds. */
1106 displaced_step_ptid = ptid;
1107 displaced_step_gdbarch = gdbarch;
1108 displaced_step_closure = closure;
1109 displaced_step_original = original;
1110 displaced_step_copy = copy;
1111
1112 make_cleanup (displaced_step_clear_cleanup, 0);
1113
1114 /* Resume execution at the copy. */
1115 regcache_write_pc (regcache, copy);
1116
1117 discard_cleanups (ignore_cleanups);
1118
1119 do_cleanups (old_cleanups);
1120
1121 if (debug_displaced)
1122 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1123 paddress (gdbarch, copy));
1124
1125 return 1;
1126 }
1127
1128 static void
1129 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1130 {
1131 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1132 inferior_ptid = ptid;
1133 write_memory (memaddr, myaddr, len);
1134 do_cleanups (ptid_cleanup);
1135 }
1136
1137 static void
1138 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1139 {
1140 struct cleanup *old_cleanups;
1141
1142 /* Was this event for the pid we displaced? */
1143 if (ptid_equal (displaced_step_ptid, null_ptid)
1144 || ! ptid_equal (displaced_step_ptid, event_ptid))
1145 return;
1146
1147 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
1148
1149 /* Restore the contents of the copy area. */
1150 {
1151 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
1152 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
1153 displaced_step_saved_copy, len);
1154 if (debug_displaced)
1155 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1156 paddress (displaced_step_gdbarch,
1157 displaced_step_copy));
1158 }
1159
1160 /* Did the instruction complete successfully? */
1161 if (signal == TARGET_SIGNAL_TRAP)
1162 {
1163 /* Fix up the resulting state. */
1164 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
1165 displaced_step_closure,
1166 displaced_step_original,
1167 displaced_step_copy,
1168 get_thread_regcache (displaced_step_ptid));
1169 }
1170 else
1171 {
1172 /* Since the instruction didn't complete, all we can do is
1173 relocate the PC. */
1174 struct regcache *regcache = get_thread_regcache (event_ptid);
1175 CORE_ADDR pc = regcache_read_pc (regcache);
1176 pc = displaced_step_original + (pc - displaced_step_copy);
1177 regcache_write_pc (regcache, pc);
1178 }
1179
1180 do_cleanups (old_cleanups);
1181
1182 displaced_step_ptid = null_ptid;
1183
1184 /* Are there any pending displaced stepping requests? If so, run
1185 one now. */
1186 while (displaced_step_request_queue)
1187 {
1188 struct displaced_step_request *head;
1189 ptid_t ptid;
1190 struct regcache *regcache;
1191 struct gdbarch *gdbarch;
1192 CORE_ADDR actual_pc;
1193 struct address_space *aspace;
1194
1195 head = displaced_step_request_queue;
1196 ptid = head->ptid;
1197 displaced_step_request_queue = head->next;
1198 xfree (head);
1199
1200 context_switch (ptid);
1201
1202 regcache = get_thread_regcache (ptid);
1203 actual_pc = regcache_read_pc (regcache);
1204 aspace = get_regcache_aspace (regcache);
1205
1206 if (breakpoint_here_p (aspace, actual_pc))
1207 {
1208 if (debug_displaced)
1209 fprintf_unfiltered (gdb_stdlog,
1210 "displaced: stepping queued %s now\n",
1211 target_pid_to_str (ptid));
1212
1213 displaced_step_prepare (ptid);
1214
1215 gdbarch = get_regcache_arch (regcache);
1216
1217 if (debug_displaced)
1218 {
1219 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1220 gdb_byte buf[4];
1221
1222 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1223 paddress (gdbarch, actual_pc));
1224 read_memory (actual_pc, buf, sizeof (buf));
1225 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1226 }
1227
1228 if (gdbarch_displaced_step_hw_singlestep
1229 (gdbarch, displaced_step_closure))
1230 target_resume (ptid, 1, TARGET_SIGNAL_0);
1231 else
1232 target_resume (ptid, 0, TARGET_SIGNAL_0);
1233
1234 /* Done, we're stepping a thread. */
1235 break;
1236 }
1237 else
1238 {
1239 int step;
1240 struct thread_info *tp = inferior_thread ();
1241
1242 /* The breakpoint we were sitting under has since been
1243 removed. */
1244 tp->trap_expected = 0;
1245
1246 /* Go back to what we were trying to do. */
1247 step = currently_stepping (tp);
1248
1249 if (debug_displaced)
1250 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1251 target_pid_to_str (tp->ptid), step);
1252
1253 target_resume (ptid, step, TARGET_SIGNAL_0);
1254 tp->stop_signal = TARGET_SIGNAL_0;
1255
1256 /* This request was discarded. See if there's any other
1257 thread waiting for its turn. */
1258 }
1259 }
1260 }
1261
1262 /* Update global variables holding ptids to hold NEW_PTID if they were
1263 holding OLD_PTID. */
1264 static void
1265 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1266 {
1267 struct displaced_step_request *it;
1268
1269 if (ptid_equal (inferior_ptid, old_ptid))
1270 inferior_ptid = new_ptid;
1271
1272 if (ptid_equal (singlestep_ptid, old_ptid))
1273 singlestep_ptid = new_ptid;
1274
1275 if (ptid_equal (displaced_step_ptid, old_ptid))
1276 displaced_step_ptid = new_ptid;
1277
1278 if (ptid_equal (deferred_step_ptid, old_ptid))
1279 deferred_step_ptid = new_ptid;
1280
1281 for (it = displaced_step_request_queue; it; it = it->next)
1282 if (ptid_equal (it->ptid, old_ptid))
1283 it->ptid = new_ptid;
1284 }
1285
1286 \f
1287 /* Resuming. */
1288
1289 /* Things to clean up if we QUIT out of resume (). */
1290 static void
1291 resume_cleanups (void *ignore)
1292 {
1293 normal_stop ();
1294 }
1295
1296 static const char schedlock_off[] = "off";
1297 static const char schedlock_on[] = "on";
1298 static const char schedlock_step[] = "step";
1299 static const char *scheduler_enums[] = {
1300 schedlock_off,
1301 schedlock_on,
1302 schedlock_step,
1303 NULL
1304 };
1305 static const char *scheduler_mode = schedlock_off;
1306 static void
1307 show_scheduler_mode (struct ui_file *file, int from_tty,
1308 struct cmd_list_element *c, const char *value)
1309 {
1310 fprintf_filtered (file, _("\
1311 Mode for locking scheduler during execution is \"%s\".\n"),
1312 value);
1313 }
1314
1315 static void
1316 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1317 {
1318 if (!target_can_lock_scheduler)
1319 {
1320 scheduler_mode = schedlock_off;
1321 error (_("Target '%s' cannot support this command."), target_shortname);
1322 }
1323 }
1324
1325 /* True if execution commands resume all threads of all processes by
1326 default; otherwise, resume only threads of the current inferior
1327 process. */
1328 int sched_multi = 0;
1329
1330 /* Try to setup for software single stepping over the specified location.
1331 Return 1 if target_resume() should use hardware single step.
1332
1333 GDBARCH the current gdbarch.
1334 PC the location to step over. */
1335
1336 static int
1337 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1338 {
1339 int hw_step = 1;
1340
1341 if (gdbarch_software_single_step_p (gdbarch)
1342 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1343 {
1344 hw_step = 0;
1345 /* Do not pull these breakpoints until after a `wait' in
1346 `wait_for_inferior' */
1347 singlestep_breakpoints_inserted_p = 1;
1348 singlestep_ptid = inferior_ptid;
1349 singlestep_pc = pc;
1350 }
1351 return hw_step;
1352 }
1353
1354 /* Resume the inferior, but allow a QUIT. This is useful if the user
1355 wants to interrupt some lengthy single-stepping operation
1356 (for child processes, the SIGINT goes to the inferior, and so
1357 we get a SIGINT random_signal, but for remote debugging and perhaps
1358 other targets, that's not true).
1359
1360 STEP nonzero if we should step (zero to continue instead).
1361 SIG is the signal to give the inferior (zero for none). */
1362 void
1363 resume (int step, enum target_signal sig)
1364 {
1365 int should_resume = 1;
1366 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1367 struct regcache *regcache = get_current_regcache ();
1368 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1369 struct thread_info *tp = inferior_thread ();
1370 CORE_ADDR pc = regcache_read_pc (regcache);
1371 struct address_space *aspace = get_regcache_aspace (regcache);
1372
1373 QUIT;
1374
1375 if (debug_infrun)
1376 fprintf_unfiltered (gdb_stdlog,
1377 "infrun: resume (step=%d, signal=%d), "
1378 "trap_expected=%d\n",
1379 step, sig, tp->trap_expected);
1380
1381 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1382 over an instruction that causes a page fault without triggering
1383 a hardware watchpoint. The kernel properly notices that it shouldn't
1384 stop, because the hardware watchpoint is not triggered, but it forgets
1385 the step request and continues the program normally.
1386 Work around the problem by removing hardware watchpoints if a step is
1387 requested, GDB will check for a hardware watchpoint trigger after the
1388 step anyway. */
1389 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1390 remove_hw_watchpoints ();
1391
1392
1393 /* Normally, by the time we reach `resume', the breakpoints are either
1394 removed or inserted, as appropriate. The exception is if we're sitting
1395 at a permanent breakpoint; we need to step over it, but permanent
1396 breakpoints can't be removed. So we have to test for it here. */
1397 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1398 {
1399 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1400 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1401 else
1402 error (_("\
1403 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1404 how to step past a permanent breakpoint on this architecture. Try using\n\
1405 a command like `return' or `jump' to continue execution."));
1406 }
1407
1408 /* If enabled, step over breakpoints by executing a copy of the
1409 instruction at a different address.
1410
1411 We can't use displaced stepping when we have a signal to deliver;
1412 the comments for displaced_step_prepare explain why. The
1413 comments in the handle_inferior event for dealing with 'random
1414 signals' explain what we do instead. */
1415 if (use_displaced_stepping (gdbarch)
1416 && (tp->trap_expected
1417 || (step && gdbarch_software_single_step_p (gdbarch)))
1418 && sig == TARGET_SIGNAL_0)
1419 {
1420 if (!displaced_step_prepare (inferior_ptid))
1421 {
1422 /* Got placed in displaced stepping queue. Will be resumed
1423 later when all the currently queued displaced stepping
1424 requests finish. The thread is not executing at this point,
1425 and the call to set_executing will be made later. But we
1426 need to call set_running here, since from frontend point of view,
1427 the thread is running. */
1428 set_running (inferior_ptid, 1);
1429 discard_cleanups (old_cleanups);
1430 return;
1431 }
1432
1433 step = gdbarch_displaced_step_hw_singlestep
1434 (gdbarch, displaced_step_closure);
1435 }
1436
1437 /* Do we need to do it the hard way, w/temp breakpoints? */
1438 else if (step)
1439 step = maybe_software_singlestep (gdbarch, pc);
1440
1441 if (should_resume)
1442 {
1443 ptid_t resume_ptid;
1444
1445 /* If STEP is set, it's a request to use hardware stepping
1446 facilities. But in that case, we should never
1447 use singlestep breakpoint. */
1448 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1449
1450 /* Decide the set of threads to ask the target to resume. Start
1451 by assuming everything will be resumed, than narrow the set
1452 by applying increasingly restricting conditions. */
1453
1454 /* By default, resume all threads of all processes. */
1455 resume_ptid = RESUME_ALL;
1456
1457 /* Maybe resume only all threads of the current process. */
1458 if (!sched_multi && target_supports_multi_process ())
1459 {
1460 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1461 }
1462
1463 /* Maybe resume a single thread after all. */
1464 if (singlestep_breakpoints_inserted_p
1465 && stepping_past_singlestep_breakpoint)
1466 {
1467 /* The situation here is as follows. In thread T1 we wanted to
1468 single-step. Lacking hardware single-stepping we've
1469 set breakpoint at the PC of the next instruction -- call it
1470 P. After resuming, we've hit that breakpoint in thread T2.
1471 Now we've removed original breakpoint, inserted breakpoint
1472 at P+1, and try to step to advance T2 past breakpoint.
1473 We need to step only T2, as if T1 is allowed to freely run,
1474 it can run past P, and if other threads are allowed to run,
1475 they can hit breakpoint at P+1, and nested hits of single-step
1476 breakpoints is not something we'd want -- that's complicated
1477 to support, and has no value. */
1478 resume_ptid = inferior_ptid;
1479 }
1480 else if ((step || singlestep_breakpoints_inserted_p)
1481 && tp->trap_expected)
1482 {
1483 /* We're allowing a thread to run past a breakpoint it has
1484 hit, by single-stepping the thread with the breakpoint
1485 removed. In which case, we need to single-step only this
1486 thread, and keep others stopped, as they can miss this
1487 breakpoint if allowed to run.
1488
1489 The current code actually removes all breakpoints when
1490 doing this, not just the one being stepped over, so if we
1491 let other threads run, we can actually miss any
1492 breakpoint, not just the one at PC. */
1493 resume_ptid = inferior_ptid;
1494 }
1495 else if (non_stop)
1496 {
1497 /* With non-stop mode on, threads are always handled
1498 individually. */
1499 resume_ptid = inferior_ptid;
1500 }
1501 else if ((scheduler_mode == schedlock_on)
1502 || (scheduler_mode == schedlock_step
1503 && (step || singlestep_breakpoints_inserted_p)))
1504 {
1505 /* User-settable 'scheduler' mode requires solo thread resume. */
1506 resume_ptid = inferior_ptid;
1507 }
1508
1509 if (gdbarch_cannot_step_breakpoint (gdbarch))
1510 {
1511 /* Most targets can step a breakpoint instruction, thus
1512 executing it normally. But if this one cannot, just
1513 continue and we will hit it anyway. */
1514 if (step && breakpoint_inserted_here_p (aspace, pc))
1515 step = 0;
1516 }
1517
1518 if (debug_displaced
1519 && use_displaced_stepping (gdbarch)
1520 && tp->trap_expected)
1521 {
1522 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1523 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1524 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1525 gdb_byte buf[4];
1526
1527 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1528 paddress (resume_gdbarch, actual_pc));
1529 read_memory (actual_pc, buf, sizeof (buf));
1530 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1531 }
1532
1533 /* Install inferior's terminal modes. */
1534 target_terminal_inferior ();
1535
1536 /* Avoid confusing the next resume, if the next stop/resume
1537 happens to apply to another thread. */
1538 tp->stop_signal = TARGET_SIGNAL_0;
1539
1540 target_resume (resume_ptid, step, sig);
1541 }
1542
1543 discard_cleanups (old_cleanups);
1544 }
1545 \f
1546 /* Proceeding. */
1547
1548 /* Clear out all variables saying what to do when inferior is continued.
1549 First do this, then set the ones you want, then call `proceed'. */
1550
1551 static void
1552 clear_proceed_status_thread (struct thread_info *tp)
1553 {
1554 if (debug_infrun)
1555 fprintf_unfiltered (gdb_stdlog,
1556 "infrun: clear_proceed_status_thread (%s)\n",
1557 target_pid_to_str (tp->ptid));
1558
1559 tp->trap_expected = 0;
1560 tp->step_range_start = 0;
1561 tp->step_range_end = 0;
1562 tp->step_frame_id = null_frame_id;
1563 tp->step_stack_frame_id = null_frame_id;
1564 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1565 tp->stop_requested = 0;
1566
1567 tp->stop_step = 0;
1568
1569 tp->proceed_to_finish = 0;
1570
1571 /* Discard any remaining commands or status from previous stop. */
1572 bpstat_clear (&tp->stop_bpstat);
1573 }
1574
1575 static int
1576 clear_proceed_status_callback (struct thread_info *tp, void *data)
1577 {
1578 if (is_exited (tp->ptid))
1579 return 0;
1580
1581 clear_proceed_status_thread (tp);
1582 return 0;
1583 }
1584
1585 void
1586 clear_proceed_status (void)
1587 {
1588 if (!non_stop)
1589 {
1590 /* In all-stop mode, delete the per-thread status of all
1591 threads, even if inferior_ptid is null_ptid, there may be
1592 threads on the list. E.g., we may be launching a new
1593 process, while selecting the executable. */
1594 iterate_over_threads (clear_proceed_status_callback, NULL);
1595 }
1596
1597 if (!ptid_equal (inferior_ptid, null_ptid))
1598 {
1599 struct inferior *inferior;
1600
1601 if (non_stop)
1602 {
1603 /* If in non-stop mode, only delete the per-thread status of
1604 the current thread. */
1605 clear_proceed_status_thread (inferior_thread ());
1606 }
1607
1608 inferior = current_inferior ();
1609 inferior->stop_soon = NO_STOP_QUIETLY;
1610 }
1611
1612 stop_after_trap = 0;
1613
1614 observer_notify_about_to_proceed ();
1615
1616 if (stop_registers)
1617 {
1618 regcache_xfree (stop_registers);
1619 stop_registers = NULL;
1620 }
1621 }
1622
1623 /* Check the current thread against the thread that reported the most recent
1624 event. If a step-over is required return TRUE and set the current thread
1625 to the old thread. Otherwise return FALSE.
1626
1627 This should be suitable for any targets that support threads. */
1628
1629 static int
1630 prepare_to_proceed (int step)
1631 {
1632 ptid_t wait_ptid;
1633 struct target_waitstatus wait_status;
1634 int schedlock_enabled;
1635
1636 /* With non-stop mode on, threads are always handled individually. */
1637 gdb_assert (! non_stop);
1638
1639 /* Get the last target status returned by target_wait(). */
1640 get_last_target_status (&wait_ptid, &wait_status);
1641
1642 /* Make sure we were stopped at a breakpoint. */
1643 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1644 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1645 && wait_status.value.sig != TARGET_SIGNAL_ILL
1646 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1647 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1648 {
1649 return 0;
1650 }
1651
1652 schedlock_enabled = (scheduler_mode == schedlock_on
1653 || (scheduler_mode == schedlock_step
1654 && step));
1655
1656 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1657 if (schedlock_enabled)
1658 return 0;
1659
1660 /* Don't switch over if we're about to resume some other process
1661 other than WAIT_PTID's, and schedule-multiple is off. */
1662 if (!sched_multi
1663 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1664 return 0;
1665
1666 /* Switched over from WAIT_PID. */
1667 if (!ptid_equal (wait_ptid, minus_one_ptid)
1668 && !ptid_equal (inferior_ptid, wait_ptid))
1669 {
1670 struct regcache *regcache = get_thread_regcache (wait_ptid);
1671
1672 if (breakpoint_here_p (get_regcache_aspace (regcache),
1673 regcache_read_pc (regcache)))
1674 {
1675 /* If stepping, remember current thread to switch back to. */
1676 if (step)
1677 deferred_step_ptid = inferior_ptid;
1678
1679 /* Switch back to WAIT_PID thread. */
1680 switch_to_thread (wait_ptid);
1681
1682 /* We return 1 to indicate that there is a breakpoint here,
1683 so we need to step over it before continuing to avoid
1684 hitting it straight away. */
1685 return 1;
1686 }
1687 }
1688
1689 return 0;
1690 }
1691
1692 /* Basic routine for continuing the program in various fashions.
1693
1694 ADDR is the address to resume at, or -1 for resume where stopped.
1695 SIGGNAL is the signal to give it, or 0 for none,
1696 or -1 for act according to how it stopped.
1697 STEP is nonzero if should trap after one instruction.
1698 -1 means return after that and print nothing.
1699 You should probably set various step_... variables
1700 before calling here, if you are stepping.
1701
1702 You should call clear_proceed_status before calling proceed. */
1703
1704 void
1705 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1706 {
1707 struct regcache *regcache;
1708 struct gdbarch *gdbarch;
1709 struct thread_info *tp;
1710 CORE_ADDR pc;
1711 struct address_space *aspace;
1712 int oneproc = 0;
1713
1714 /* If we're stopped at a fork/vfork, follow the branch set by the
1715 "set follow-fork-mode" command; otherwise, we'll just proceed
1716 resuming the current thread. */
1717 if (!follow_fork ())
1718 {
1719 /* The target for some reason decided not to resume. */
1720 normal_stop ();
1721 return;
1722 }
1723
1724 regcache = get_current_regcache ();
1725 gdbarch = get_regcache_arch (regcache);
1726 aspace = get_regcache_aspace (regcache);
1727 pc = regcache_read_pc (regcache);
1728
1729 if (step > 0)
1730 step_start_function = find_pc_function (pc);
1731 if (step < 0)
1732 stop_after_trap = 1;
1733
1734 if (addr == (CORE_ADDR) -1)
1735 {
1736 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1737 && execution_direction != EXEC_REVERSE)
1738 /* There is a breakpoint at the address we will resume at,
1739 step one instruction before inserting breakpoints so that
1740 we do not stop right away (and report a second hit at this
1741 breakpoint).
1742
1743 Note, we don't do this in reverse, because we won't
1744 actually be executing the breakpoint insn anyway.
1745 We'll be (un-)executing the previous instruction. */
1746
1747 oneproc = 1;
1748 else if (gdbarch_single_step_through_delay_p (gdbarch)
1749 && gdbarch_single_step_through_delay (gdbarch,
1750 get_current_frame ()))
1751 /* We stepped onto an instruction that needs to be stepped
1752 again before re-inserting the breakpoint, do so. */
1753 oneproc = 1;
1754 }
1755 else
1756 {
1757 regcache_write_pc (regcache, addr);
1758 }
1759
1760 if (debug_infrun)
1761 fprintf_unfiltered (gdb_stdlog,
1762 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1763 paddress (gdbarch, addr), siggnal, step);
1764
1765 /* We're handling a live event, so make sure we're doing live
1766 debugging. If we're looking at traceframes while the target is
1767 running, we're going to need to get back to that mode after
1768 handling the event. */
1769 if (non_stop)
1770 {
1771 make_cleanup_restore_current_traceframe ();
1772 set_traceframe_number (-1);
1773 }
1774
1775 if (non_stop)
1776 /* In non-stop, each thread is handled individually. The context
1777 must already be set to the right thread here. */
1778 ;
1779 else
1780 {
1781 /* In a multi-threaded task we may select another thread and
1782 then continue or step.
1783
1784 But if the old thread was stopped at a breakpoint, it will
1785 immediately cause another breakpoint stop without any
1786 execution (i.e. it will report a breakpoint hit incorrectly).
1787 So we must step over it first.
1788
1789 prepare_to_proceed checks the current thread against the
1790 thread that reported the most recent event. If a step-over
1791 is required it returns TRUE and sets the current thread to
1792 the old thread. */
1793 if (prepare_to_proceed (step))
1794 oneproc = 1;
1795 }
1796
1797 /* prepare_to_proceed may change the current thread. */
1798 tp = inferior_thread ();
1799
1800 if (oneproc)
1801 {
1802 tp->trap_expected = 1;
1803 /* If displaced stepping is enabled, we can step over the
1804 breakpoint without hitting it, so leave all breakpoints
1805 inserted. Otherwise we need to disable all breakpoints, step
1806 one instruction, and then re-add them when that step is
1807 finished. */
1808 if (!use_displaced_stepping (gdbarch))
1809 remove_breakpoints ();
1810 }
1811
1812 /* We can insert breakpoints if we're not trying to step over one,
1813 or if we are stepping over one but we're using displaced stepping
1814 to do so. */
1815 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1816 insert_breakpoints ();
1817
1818 if (!non_stop)
1819 {
1820 /* Pass the last stop signal to the thread we're resuming,
1821 irrespective of whether the current thread is the thread that
1822 got the last event or not. This was historically GDB's
1823 behaviour before keeping a stop_signal per thread. */
1824
1825 struct thread_info *last_thread;
1826 ptid_t last_ptid;
1827 struct target_waitstatus last_status;
1828
1829 get_last_target_status (&last_ptid, &last_status);
1830 if (!ptid_equal (inferior_ptid, last_ptid)
1831 && !ptid_equal (last_ptid, null_ptid)
1832 && !ptid_equal (last_ptid, minus_one_ptid))
1833 {
1834 last_thread = find_thread_ptid (last_ptid);
1835 if (last_thread)
1836 {
1837 tp->stop_signal = last_thread->stop_signal;
1838 last_thread->stop_signal = TARGET_SIGNAL_0;
1839 }
1840 }
1841 }
1842
1843 if (siggnal != TARGET_SIGNAL_DEFAULT)
1844 tp->stop_signal = siggnal;
1845 /* If this signal should not be seen by program,
1846 give it zero. Used for debugging signals. */
1847 else if (!signal_program[tp->stop_signal])
1848 tp->stop_signal = TARGET_SIGNAL_0;
1849
1850 annotate_starting ();
1851
1852 /* Make sure that output from GDB appears before output from the
1853 inferior. */
1854 gdb_flush (gdb_stdout);
1855
1856 /* Refresh prev_pc value just prior to resuming. This used to be
1857 done in stop_stepping, however, setting prev_pc there did not handle
1858 scenarios such as inferior function calls or returning from
1859 a function via the return command. In those cases, the prev_pc
1860 value was not set properly for subsequent commands. The prev_pc value
1861 is used to initialize the starting line number in the ecs. With an
1862 invalid value, the gdb next command ends up stopping at the position
1863 represented by the next line table entry past our start position.
1864 On platforms that generate one line table entry per line, this
1865 is not a problem. However, on the ia64, the compiler generates
1866 extraneous line table entries that do not increase the line number.
1867 When we issue the gdb next command on the ia64 after an inferior call
1868 or a return command, we often end up a few instructions forward, still
1869 within the original line we started.
1870
1871 An attempt was made to refresh the prev_pc at the same time the
1872 execution_control_state is initialized (for instance, just before
1873 waiting for an inferior event). But this approach did not work
1874 because of platforms that use ptrace, where the pc register cannot
1875 be read unless the inferior is stopped. At that point, we are not
1876 guaranteed the inferior is stopped and so the regcache_read_pc() call
1877 can fail. Setting the prev_pc value here ensures the value is updated
1878 correctly when the inferior is stopped. */
1879 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1880
1881 /* Fill in with reasonable starting values. */
1882 init_thread_stepping_state (tp);
1883
1884 /* Reset to normal state. */
1885 init_infwait_state ();
1886
1887 /* Resume inferior. */
1888 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1889
1890 /* Wait for it to stop (if not standalone)
1891 and in any case decode why it stopped, and act accordingly. */
1892 /* Do this only if we are not using the event loop, or if the target
1893 does not support asynchronous execution. */
1894 if (!target_can_async_p ())
1895 {
1896 wait_for_inferior (0);
1897 normal_stop ();
1898 }
1899 }
1900 \f
1901
1902 /* Start remote-debugging of a machine over a serial link. */
1903
1904 void
1905 start_remote (int from_tty)
1906 {
1907 struct inferior *inferior;
1908 init_wait_for_inferior ();
1909
1910 inferior = current_inferior ();
1911 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1912
1913 /* Always go on waiting for the target, regardless of the mode. */
1914 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1915 indicate to wait_for_inferior that a target should timeout if
1916 nothing is returned (instead of just blocking). Because of this,
1917 targets expecting an immediate response need to, internally, set
1918 things up so that the target_wait() is forced to eventually
1919 timeout. */
1920 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1921 differentiate to its caller what the state of the target is after
1922 the initial open has been performed. Here we're assuming that
1923 the target has stopped. It should be possible to eventually have
1924 target_open() return to the caller an indication that the target
1925 is currently running and GDB state should be set to the same as
1926 for an async run. */
1927 wait_for_inferior (0);
1928
1929 /* Now that the inferior has stopped, do any bookkeeping like
1930 loading shared libraries. We want to do this before normal_stop,
1931 so that the displayed frame is up to date. */
1932 post_create_inferior (&current_target, from_tty);
1933
1934 normal_stop ();
1935 }
1936
1937 /* Initialize static vars when a new inferior begins. */
1938
1939 void
1940 init_wait_for_inferior (void)
1941 {
1942 /* These are meaningless until the first time through wait_for_inferior. */
1943
1944 breakpoint_init_inferior (inf_starting);
1945
1946 clear_proceed_status ();
1947
1948 stepping_past_singlestep_breakpoint = 0;
1949 deferred_step_ptid = null_ptid;
1950
1951 target_last_wait_ptid = minus_one_ptid;
1952
1953 previous_inferior_ptid = null_ptid;
1954 init_infwait_state ();
1955
1956 displaced_step_clear ();
1957
1958 /* Discard any skipped inlined frames. */
1959 clear_inline_frame_state (minus_one_ptid);
1960 }
1961
1962 \f
1963 /* This enum encodes possible reasons for doing a target_wait, so that
1964 wfi can call target_wait in one place. (Ultimately the call will be
1965 moved out of the infinite loop entirely.) */
1966
1967 enum infwait_states
1968 {
1969 infwait_normal_state,
1970 infwait_thread_hop_state,
1971 infwait_step_watch_state,
1972 infwait_nonstep_watch_state
1973 };
1974
1975 /* Why did the inferior stop? Used to print the appropriate messages
1976 to the interface from within handle_inferior_event(). */
1977 enum inferior_stop_reason
1978 {
1979 /* Step, next, nexti, stepi finished. */
1980 END_STEPPING_RANGE,
1981 /* Inferior terminated by signal. */
1982 SIGNAL_EXITED,
1983 /* Inferior exited. */
1984 EXITED,
1985 /* Inferior received signal, and user asked to be notified. */
1986 SIGNAL_RECEIVED,
1987 /* Reverse execution -- target ran out of history info. */
1988 NO_HISTORY
1989 };
1990
1991 /* The PTID we'll do a target_wait on.*/
1992 ptid_t waiton_ptid;
1993
1994 /* Current inferior wait state. */
1995 enum infwait_states infwait_state;
1996
1997 /* Data to be passed around while handling an event. This data is
1998 discarded between events. */
1999 struct execution_control_state
2000 {
2001 ptid_t ptid;
2002 /* The thread that got the event, if this was a thread event; NULL
2003 otherwise. */
2004 struct thread_info *event_thread;
2005
2006 struct target_waitstatus ws;
2007 int random_signal;
2008 CORE_ADDR stop_func_start;
2009 CORE_ADDR stop_func_end;
2010 char *stop_func_name;
2011 int new_thread_event;
2012 int wait_some_more;
2013 };
2014
2015 static void handle_inferior_event (struct execution_control_state *ecs);
2016
2017 static void handle_step_into_function (struct gdbarch *gdbarch,
2018 struct execution_control_state *ecs);
2019 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2020 struct execution_control_state *ecs);
2021 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2022 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2023 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2024 struct symtab_and_line sr_sal,
2025 struct frame_id sr_id);
2026 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2027
2028 static void stop_stepping (struct execution_control_state *ecs);
2029 static void prepare_to_wait (struct execution_control_state *ecs);
2030 static void keep_going (struct execution_control_state *ecs);
2031 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2032 int stop_info);
2033
2034 /* Callback for iterate over threads. If the thread is stopped, but
2035 the user/frontend doesn't know about that yet, go through
2036 normal_stop, as if the thread had just stopped now. ARG points at
2037 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2038 ptid_is_pid(PTID) is true, applies to all threads of the process
2039 pointed at by PTID. Otherwise, apply only to the thread pointed by
2040 PTID. */
2041
2042 static int
2043 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2044 {
2045 ptid_t ptid = * (ptid_t *) arg;
2046
2047 if ((ptid_equal (info->ptid, ptid)
2048 || ptid_equal (minus_one_ptid, ptid)
2049 || (ptid_is_pid (ptid)
2050 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2051 && is_running (info->ptid)
2052 && !is_executing (info->ptid))
2053 {
2054 struct cleanup *old_chain;
2055 struct execution_control_state ecss;
2056 struct execution_control_state *ecs = &ecss;
2057
2058 memset (ecs, 0, sizeof (*ecs));
2059
2060 old_chain = make_cleanup_restore_current_thread ();
2061
2062 switch_to_thread (info->ptid);
2063
2064 /* Go through handle_inferior_event/normal_stop, so we always
2065 have consistent output as if the stop event had been
2066 reported. */
2067 ecs->ptid = info->ptid;
2068 ecs->event_thread = find_thread_ptid (info->ptid);
2069 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2070 ecs->ws.value.sig = TARGET_SIGNAL_0;
2071
2072 handle_inferior_event (ecs);
2073
2074 if (!ecs->wait_some_more)
2075 {
2076 struct thread_info *tp;
2077
2078 normal_stop ();
2079
2080 /* Finish off the continuations. The continations
2081 themselves are responsible for realising the thread
2082 didn't finish what it was supposed to do. */
2083 tp = inferior_thread ();
2084 do_all_intermediate_continuations_thread (tp);
2085 do_all_continuations_thread (tp);
2086 }
2087
2088 do_cleanups (old_chain);
2089 }
2090
2091 return 0;
2092 }
2093
2094 /* This function is attached as a "thread_stop_requested" observer.
2095 Cleanup local state that assumed the PTID was to be resumed, and
2096 report the stop to the frontend. */
2097
2098 static void
2099 infrun_thread_stop_requested (ptid_t ptid)
2100 {
2101 struct displaced_step_request *it, *next, *prev = NULL;
2102
2103 /* PTID was requested to stop. Remove it from the displaced
2104 stepping queue, so we don't try to resume it automatically. */
2105 for (it = displaced_step_request_queue; it; it = next)
2106 {
2107 next = it->next;
2108
2109 if (ptid_equal (it->ptid, ptid)
2110 || ptid_equal (minus_one_ptid, ptid)
2111 || (ptid_is_pid (ptid)
2112 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
2113 {
2114 if (displaced_step_request_queue == it)
2115 displaced_step_request_queue = it->next;
2116 else
2117 prev->next = it->next;
2118
2119 xfree (it);
2120 }
2121 else
2122 prev = it;
2123 }
2124
2125 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2126 }
2127
2128 static void
2129 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2130 {
2131 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2132 nullify_last_target_wait_ptid ();
2133 }
2134
2135 /* Callback for iterate_over_threads. */
2136
2137 static int
2138 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2139 {
2140 if (is_exited (info->ptid))
2141 return 0;
2142
2143 delete_step_resume_breakpoint (info);
2144 return 0;
2145 }
2146
2147 /* In all-stop, delete the step resume breakpoint of any thread that
2148 had one. In non-stop, delete the step resume breakpoint of the
2149 thread that just stopped. */
2150
2151 static void
2152 delete_step_thread_step_resume_breakpoint (void)
2153 {
2154 if (!target_has_execution
2155 || ptid_equal (inferior_ptid, null_ptid))
2156 /* If the inferior has exited, we have already deleted the step
2157 resume breakpoints out of GDB's lists. */
2158 return;
2159
2160 if (non_stop)
2161 {
2162 /* If in non-stop mode, only delete the step-resume or
2163 longjmp-resume breakpoint of the thread that just stopped
2164 stepping. */
2165 struct thread_info *tp = inferior_thread ();
2166 delete_step_resume_breakpoint (tp);
2167 }
2168 else
2169 /* In all-stop mode, delete all step-resume and longjmp-resume
2170 breakpoints of any thread that had them. */
2171 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2172 }
2173
2174 /* A cleanup wrapper. */
2175
2176 static void
2177 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2178 {
2179 delete_step_thread_step_resume_breakpoint ();
2180 }
2181
2182 /* Pretty print the results of target_wait, for debugging purposes. */
2183
2184 static void
2185 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2186 const struct target_waitstatus *ws)
2187 {
2188 char *status_string = target_waitstatus_to_string (ws);
2189 struct ui_file *tmp_stream = mem_fileopen ();
2190 char *text;
2191
2192 /* The text is split over several lines because it was getting too long.
2193 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2194 output as a unit; we want only one timestamp printed if debug_timestamp
2195 is set. */
2196
2197 fprintf_unfiltered (tmp_stream,
2198 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2199 if (PIDGET (waiton_ptid) != -1)
2200 fprintf_unfiltered (tmp_stream,
2201 " [%s]", target_pid_to_str (waiton_ptid));
2202 fprintf_unfiltered (tmp_stream, ", status) =\n");
2203 fprintf_unfiltered (tmp_stream,
2204 "infrun: %d [%s],\n",
2205 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2206 fprintf_unfiltered (tmp_stream,
2207 "infrun: %s\n",
2208 status_string);
2209
2210 text = ui_file_xstrdup (tmp_stream, NULL);
2211
2212 /* This uses %s in part to handle %'s in the text, but also to avoid
2213 a gcc error: the format attribute requires a string literal. */
2214 fprintf_unfiltered (gdb_stdlog, "%s", text);
2215
2216 xfree (status_string);
2217 xfree (text);
2218 ui_file_delete (tmp_stream);
2219 }
2220
2221 /* Wait for control to return from inferior to debugger.
2222
2223 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2224 as if they were SIGTRAP signals. This can be useful during
2225 the startup sequence on some targets such as HP/UX, where
2226 we receive an EXEC event instead of the expected SIGTRAP.
2227
2228 If inferior gets a signal, we may decide to start it up again
2229 instead of returning. That is why there is a loop in this function.
2230 When this function actually returns it means the inferior
2231 should be left stopped and GDB should read more commands. */
2232
2233 void
2234 wait_for_inferior (int treat_exec_as_sigtrap)
2235 {
2236 struct cleanup *old_cleanups;
2237 struct execution_control_state ecss;
2238 struct execution_control_state *ecs;
2239
2240 if (debug_infrun)
2241 fprintf_unfiltered
2242 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2243 treat_exec_as_sigtrap);
2244
2245 old_cleanups =
2246 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2247
2248 ecs = &ecss;
2249 memset (ecs, 0, sizeof (*ecs));
2250
2251 /* We'll update this if & when we switch to a new thread. */
2252 previous_inferior_ptid = inferior_ptid;
2253
2254 while (1)
2255 {
2256 struct cleanup *old_chain;
2257
2258 /* We have to invalidate the registers BEFORE calling target_wait
2259 because they can be loaded from the target while in target_wait.
2260 This makes remote debugging a bit more efficient for those
2261 targets that provide critical registers as part of their normal
2262 status mechanism. */
2263
2264 overlay_cache_invalid = 1;
2265 registers_changed ();
2266
2267 if (deprecated_target_wait_hook)
2268 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2269 else
2270 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2271
2272 if (debug_infrun)
2273 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2274
2275 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2276 {
2277 xfree (ecs->ws.value.execd_pathname);
2278 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2279 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2280 }
2281
2282 /* If an error happens while handling the event, propagate GDB's
2283 knowledge of the executing state to the frontend/user running
2284 state. */
2285 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2286
2287 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2288 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2289 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2290
2291 /* Now figure out what to do with the result of the result. */
2292 handle_inferior_event (ecs);
2293
2294 /* No error, don't finish the state yet. */
2295 discard_cleanups (old_chain);
2296
2297 if (!ecs->wait_some_more)
2298 break;
2299 }
2300
2301 do_cleanups (old_cleanups);
2302 }
2303
2304 /* Asynchronous version of wait_for_inferior. It is called by the
2305 event loop whenever a change of state is detected on the file
2306 descriptor corresponding to the target. It can be called more than
2307 once to complete a single execution command. In such cases we need
2308 to keep the state in a global variable ECSS. If it is the last time
2309 that this function is called for a single execution command, then
2310 report to the user that the inferior has stopped, and do the
2311 necessary cleanups. */
2312
2313 void
2314 fetch_inferior_event (void *client_data)
2315 {
2316 struct execution_control_state ecss;
2317 struct execution_control_state *ecs = &ecss;
2318 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2319 struct cleanup *ts_old_chain;
2320 int was_sync = sync_execution;
2321
2322 memset (ecs, 0, sizeof (*ecs));
2323
2324 /* We'll update this if & when we switch to a new thread. */
2325 previous_inferior_ptid = inferior_ptid;
2326
2327 if (non_stop)
2328 /* In non-stop mode, the user/frontend should not notice a thread
2329 switch due to internal events. Make sure we reverse to the
2330 user selected thread and frame after handling the event and
2331 running any breakpoint commands. */
2332 make_cleanup_restore_current_thread ();
2333
2334 /* We have to invalidate the registers BEFORE calling target_wait
2335 because they can be loaded from the target while in target_wait.
2336 This makes remote debugging a bit more efficient for those
2337 targets that provide critical registers as part of their normal
2338 status mechanism. */
2339
2340 overlay_cache_invalid = 1;
2341 registers_changed ();
2342
2343 if (deprecated_target_wait_hook)
2344 ecs->ptid =
2345 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2346 else
2347 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2348
2349 if (debug_infrun)
2350 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2351
2352 if (non_stop
2353 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2354 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2355 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2356 /* In non-stop mode, each thread is handled individually. Switch
2357 early, so the global state is set correctly for this
2358 thread. */
2359 context_switch (ecs->ptid);
2360
2361 /* If an error happens while handling the event, propagate GDB's
2362 knowledge of the executing state to the frontend/user running
2363 state. */
2364 if (!non_stop)
2365 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2366 else
2367 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2368
2369 /* Now figure out what to do with the result of the result. */
2370 handle_inferior_event (ecs);
2371
2372 if (!ecs->wait_some_more)
2373 {
2374 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2375
2376 delete_step_thread_step_resume_breakpoint ();
2377
2378 /* We may not find an inferior if this was a process exit. */
2379 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2380 normal_stop ();
2381
2382 if (target_has_execution
2383 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2384 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2385 && ecs->event_thread->step_multi
2386 && ecs->event_thread->stop_step)
2387 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2388 else
2389 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2390 }
2391
2392 /* No error, don't finish the thread states yet. */
2393 discard_cleanups (ts_old_chain);
2394
2395 /* Revert thread and frame. */
2396 do_cleanups (old_chain);
2397
2398 /* If the inferior was in sync execution mode, and now isn't,
2399 restore the prompt. */
2400 if (was_sync && !sync_execution)
2401 display_gdb_prompt (0);
2402 }
2403
2404 /* Record the frame and location we're currently stepping through. */
2405 void
2406 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2407 {
2408 struct thread_info *tp = inferior_thread ();
2409
2410 tp->step_frame_id = get_frame_id (frame);
2411 tp->step_stack_frame_id = get_stack_frame_id (frame);
2412
2413 tp->current_symtab = sal.symtab;
2414 tp->current_line = sal.line;
2415 }
2416
2417 /* Clear context switchable stepping state. */
2418
2419 void
2420 init_thread_stepping_state (struct thread_info *tss)
2421 {
2422 tss->stepping_over_breakpoint = 0;
2423 tss->step_after_step_resume_breakpoint = 0;
2424 tss->stepping_through_solib_after_catch = 0;
2425 tss->stepping_through_solib_catchpoints = NULL;
2426 }
2427
2428 /* Return the cached copy of the last pid/waitstatus returned by
2429 target_wait()/deprecated_target_wait_hook(). The data is actually
2430 cached by handle_inferior_event(), which gets called immediately
2431 after target_wait()/deprecated_target_wait_hook(). */
2432
2433 void
2434 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2435 {
2436 *ptidp = target_last_wait_ptid;
2437 *status = target_last_waitstatus;
2438 }
2439
2440 void
2441 nullify_last_target_wait_ptid (void)
2442 {
2443 target_last_wait_ptid = minus_one_ptid;
2444 }
2445
2446 /* Switch thread contexts. */
2447
2448 static void
2449 context_switch (ptid_t ptid)
2450 {
2451 if (debug_infrun)
2452 {
2453 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2454 target_pid_to_str (inferior_ptid));
2455 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2456 target_pid_to_str (ptid));
2457 }
2458
2459 switch_to_thread (ptid);
2460 }
2461
2462 static void
2463 adjust_pc_after_break (struct execution_control_state *ecs)
2464 {
2465 struct regcache *regcache;
2466 struct gdbarch *gdbarch;
2467 struct address_space *aspace;
2468 CORE_ADDR breakpoint_pc;
2469
2470 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2471 we aren't, just return.
2472
2473 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2474 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2475 implemented by software breakpoints should be handled through the normal
2476 breakpoint layer.
2477
2478 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2479 different signals (SIGILL or SIGEMT for instance), but it is less
2480 clear where the PC is pointing afterwards. It may not match
2481 gdbarch_decr_pc_after_break. I don't know any specific target that
2482 generates these signals at breakpoints (the code has been in GDB since at
2483 least 1992) so I can not guess how to handle them here.
2484
2485 In earlier versions of GDB, a target with
2486 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2487 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2488 target with both of these set in GDB history, and it seems unlikely to be
2489 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2490
2491 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2492 return;
2493
2494 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2495 return;
2496
2497 /* In reverse execution, when a breakpoint is hit, the instruction
2498 under it has already been de-executed. The reported PC always
2499 points at the breakpoint address, so adjusting it further would
2500 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2501 architecture:
2502
2503 B1 0x08000000 : INSN1
2504 B2 0x08000001 : INSN2
2505 0x08000002 : INSN3
2506 PC -> 0x08000003 : INSN4
2507
2508 Say you're stopped at 0x08000003 as above. Reverse continuing
2509 from that point should hit B2 as below. Reading the PC when the
2510 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2511 been de-executed already.
2512
2513 B1 0x08000000 : INSN1
2514 B2 PC -> 0x08000001 : INSN2
2515 0x08000002 : INSN3
2516 0x08000003 : INSN4
2517
2518 We can't apply the same logic as for forward execution, because
2519 we would wrongly adjust the PC to 0x08000000, since there's a
2520 breakpoint at PC - 1. We'd then report a hit on B1, although
2521 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2522 behaviour. */
2523 if (execution_direction == EXEC_REVERSE)
2524 return;
2525
2526 /* If this target does not decrement the PC after breakpoints, then
2527 we have nothing to do. */
2528 regcache = get_thread_regcache (ecs->ptid);
2529 gdbarch = get_regcache_arch (regcache);
2530 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2531 return;
2532
2533 aspace = get_regcache_aspace (regcache);
2534
2535 /* Find the location where (if we've hit a breakpoint) the
2536 breakpoint would be. */
2537 breakpoint_pc = regcache_read_pc (regcache)
2538 - gdbarch_decr_pc_after_break (gdbarch);
2539
2540 /* Check whether there actually is a software breakpoint inserted at
2541 that location.
2542
2543 If in non-stop mode, a race condition is possible where we've
2544 removed a breakpoint, but stop events for that breakpoint were
2545 already queued and arrive later. To suppress those spurious
2546 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2547 and retire them after a number of stop events are reported. */
2548 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2549 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2550 {
2551 struct cleanup *old_cleanups = NULL;
2552 if (RECORD_IS_USED)
2553 old_cleanups = record_gdb_operation_disable_set ();
2554
2555 /* When using hardware single-step, a SIGTRAP is reported for both
2556 a completed single-step and a software breakpoint. Need to
2557 differentiate between the two, as the latter needs adjusting
2558 but the former does not.
2559
2560 The SIGTRAP can be due to a completed hardware single-step only if
2561 - we didn't insert software single-step breakpoints
2562 - the thread to be examined is still the current thread
2563 - this thread is currently being stepped
2564
2565 If any of these events did not occur, we must have stopped due
2566 to hitting a software breakpoint, and have to back up to the
2567 breakpoint address.
2568
2569 As a special case, we could have hardware single-stepped a
2570 software breakpoint. In this case (prev_pc == breakpoint_pc),
2571 we also need to back up to the breakpoint address. */
2572
2573 if (singlestep_breakpoints_inserted_p
2574 || !ptid_equal (ecs->ptid, inferior_ptid)
2575 || !currently_stepping (ecs->event_thread)
2576 || ecs->event_thread->prev_pc == breakpoint_pc)
2577 regcache_write_pc (regcache, breakpoint_pc);
2578
2579 if (RECORD_IS_USED)
2580 do_cleanups (old_cleanups);
2581 }
2582 }
2583
2584 void
2585 init_infwait_state (void)
2586 {
2587 waiton_ptid = pid_to_ptid (-1);
2588 infwait_state = infwait_normal_state;
2589 }
2590
2591 void
2592 error_is_running (void)
2593 {
2594 error (_("\
2595 Cannot execute this command while the selected thread is running."));
2596 }
2597
2598 void
2599 ensure_not_running (void)
2600 {
2601 if (is_running (inferior_ptid))
2602 error_is_running ();
2603 }
2604
2605 static int
2606 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2607 {
2608 for (frame = get_prev_frame (frame);
2609 frame != NULL;
2610 frame = get_prev_frame (frame))
2611 {
2612 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2613 return 1;
2614 if (get_frame_type (frame) != INLINE_FRAME)
2615 break;
2616 }
2617
2618 return 0;
2619 }
2620
2621 /* Auxiliary function that handles syscall entry/return events.
2622 It returns 1 if the inferior should keep going (and GDB
2623 should ignore the event), or 0 if the event deserves to be
2624 processed. */
2625
2626 static int
2627 handle_syscall_event (struct execution_control_state *ecs)
2628 {
2629 struct regcache *regcache;
2630 struct gdbarch *gdbarch;
2631 int syscall_number;
2632
2633 if (!ptid_equal (ecs->ptid, inferior_ptid))
2634 context_switch (ecs->ptid);
2635
2636 regcache = get_thread_regcache (ecs->ptid);
2637 gdbarch = get_regcache_arch (regcache);
2638 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2639 stop_pc = regcache_read_pc (regcache);
2640
2641 target_last_waitstatus.value.syscall_number = syscall_number;
2642
2643 if (catch_syscall_enabled () > 0
2644 && catching_syscall_number (syscall_number) > 0)
2645 {
2646 if (debug_infrun)
2647 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2648 syscall_number);
2649
2650 ecs->event_thread->stop_bpstat
2651 = bpstat_stop_status (get_regcache_aspace (regcache),
2652 stop_pc, ecs->ptid);
2653 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2654
2655 if (!ecs->random_signal)
2656 {
2657 /* Catchpoint hit. */
2658 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2659 return 0;
2660 }
2661 }
2662
2663 /* If no catchpoint triggered for this, then keep going. */
2664 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2665 keep_going (ecs);
2666 return 1;
2667 }
2668
2669 /* Given an execution control state that has been freshly filled in
2670 by an event from the inferior, figure out what it means and take
2671 appropriate action. */
2672
2673 static void
2674 handle_inferior_event (struct execution_control_state *ecs)
2675 {
2676 struct frame_info *frame;
2677 struct gdbarch *gdbarch;
2678 int sw_single_step_trap_p = 0;
2679 int stopped_by_watchpoint;
2680 int stepped_after_stopped_by_watchpoint = 0;
2681 struct symtab_and_line stop_pc_sal;
2682 enum stop_kind stop_soon;
2683
2684 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2685 {
2686 /* We had an event in the inferior, but we are not interested in
2687 handling it at this level. The lower layers have already
2688 done what needs to be done, if anything.
2689
2690 One of the possible circumstances for this is when the
2691 inferior produces output for the console. The inferior has
2692 not stopped, and we are ignoring the event. Another possible
2693 circumstance is any event which the lower level knows will be
2694 reported multiple times without an intervening resume. */
2695 if (debug_infrun)
2696 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2697 prepare_to_wait (ecs);
2698 return;
2699 }
2700
2701 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2702 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2703 {
2704 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2705 gdb_assert (inf);
2706 stop_soon = inf->stop_soon;
2707 }
2708 else
2709 stop_soon = NO_STOP_QUIETLY;
2710
2711 /* Cache the last pid/waitstatus. */
2712 target_last_wait_ptid = ecs->ptid;
2713 target_last_waitstatus = ecs->ws;
2714
2715 /* Always clear state belonging to the previous time we stopped. */
2716 stop_stack_dummy = 0;
2717
2718 /* If it's a new process, add it to the thread database */
2719
2720 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2721 && !ptid_equal (ecs->ptid, minus_one_ptid)
2722 && !in_thread_list (ecs->ptid));
2723
2724 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2725 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2726 add_thread (ecs->ptid);
2727
2728 ecs->event_thread = find_thread_ptid (ecs->ptid);
2729
2730 /* Dependent on valid ECS->EVENT_THREAD. */
2731 adjust_pc_after_break (ecs);
2732
2733 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2734 reinit_frame_cache ();
2735
2736 breakpoint_retire_moribund ();
2737
2738 /* First, distinguish signals caused by the debugger from signals
2739 that have to do with the program's own actions. Note that
2740 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2741 on the operating system version. Here we detect when a SIGILL or
2742 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2743 something similar for SIGSEGV, since a SIGSEGV will be generated
2744 when we're trying to execute a breakpoint instruction on a
2745 non-executable stack. This happens for call dummy breakpoints
2746 for architectures like SPARC that place call dummies on the
2747 stack. */
2748 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2749 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2750 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2751 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2752 {
2753 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2754
2755 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2756 regcache_read_pc (regcache)))
2757 {
2758 if (debug_infrun)
2759 fprintf_unfiltered (gdb_stdlog,
2760 "infrun: Treating signal as SIGTRAP\n");
2761 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2762 }
2763 }
2764
2765 /* Mark the non-executing threads accordingly. In all-stop, all
2766 threads of all processes are stopped when we get any event
2767 reported. In non-stop mode, only the event thread stops. If
2768 we're handling a process exit in non-stop mode, there's nothing
2769 to do, as threads of the dead process are gone, and threads of
2770 any other process were left running. */
2771 if (!non_stop)
2772 set_executing (minus_one_ptid, 0);
2773 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2774 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2775 set_executing (inferior_ptid, 0);
2776
2777 switch (infwait_state)
2778 {
2779 case infwait_thread_hop_state:
2780 if (debug_infrun)
2781 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2782 break;
2783
2784 case infwait_normal_state:
2785 if (debug_infrun)
2786 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2787 break;
2788
2789 case infwait_step_watch_state:
2790 if (debug_infrun)
2791 fprintf_unfiltered (gdb_stdlog,
2792 "infrun: infwait_step_watch_state\n");
2793
2794 stepped_after_stopped_by_watchpoint = 1;
2795 break;
2796
2797 case infwait_nonstep_watch_state:
2798 if (debug_infrun)
2799 fprintf_unfiltered (gdb_stdlog,
2800 "infrun: infwait_nonstep_watch_state\n");
2801 insert_breakpoints ();
2802
2803 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2804 handle things like signals arriving and other things happening
2805 in combination correctly? */
2806 stepped_after_stopped_by_watchpoint = 1;
2807 break;
2808
2809 default:
2810 internal_error (__FILE__, __LINE__, _("bad switch"));
2811 }
2812
2813 infwait_state = infwait_normal_state;
2814 waiton_ptid = pid_to_ptid (-1);
2815
2816 switch (ecs->ws.kind)
2817 {
2818 case TARGET_WAITKIND_LOADED:
2819 if (debug_infrun)
2820 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2821 /* Ignore gracefully during startup of the inferior, as it might
2822 be the shell which has just loaded some objects, otherwise
2823 add the symbols for the newly loaded objects. Also ignore at
2824 the beginning of an attach or remote session; we will query
2825 the full list of libraries once the connection is
2826 established. */
2827 if (stop_soon == NO_STOP_QUIETLY)
2828 {
2829 /* Check for any newly added shared libraries if we're
2830 supposed to be adding them automatically. Switch
2831 terminal for any messages produced by
2832 breakpoint_re_set. */
2833 target_terminal_ours_for_output ();
2834 /* NOTE: cagney/2003-11-25: Make certain that the target
2835 stack's section table is kept up-to-date. Architectures,
2836 (e.g., PPC64), use the section table to perform
2837 operations such as address => section name and hence
2838 require the table to contain all sections (including
2839 those found in shared libraries). */
2840 #ifdef SOLIB_ADD
2841 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2842 #else
2843 solib_add (NULL, 0, &current_target, auto_solib_add);
2844 #endif
2845 target_terminal_inferior ();
2846
2847 /* If requested, stop when the dynamic linker notifies
2848 gdb of events. This allows the user to get control
2849 and place breakpoints in initializer routines for
2850 dynamically loaded objects (among other things). */
2851 if (stop_on_solib_events)
2852 {
2853 /* Make sure we print "Stopped due to solib-event" in
2854 normal_stop. */
2855 stop_print_frame = 1;
2856
2857 stop_stepping (ecs);
2858 return;
2859 }
2860
2861 /* NOTE drow/2007-05-11: This might be a good place to check
2862 for "catch load". */
2863 }
2864
2865 /* If we are skipping through a shell, or through shared library
2866 loading that we aren't interested in, resume the program. If
2867 we're running the program normally, also resume. But stop if
2868 we're attaching or setting up a remote connection. */
2869 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2870 {
2871 /* Loading of shared libraries might have changed breakpoint
2872 addresses. Make sure new breakpoints are inserted. */
2873 if (stop_soon == NO_STOP_QUIETLY
2874 && !breakpoints_always_inserted_mode ())
2875 insert_breakpoints ();
2876 resume (0, TARGET_SIGNAL_0);
2877 prepare_to_wait (ecs);
2878 return;
2879 }
2880
2881 break;
2882
2883 case TARGET_WAITKIND_SPURIOUS:
2884 if (debug_infrun)
2885 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2886 resume (0, TARGET_SIGNAL_0);
2887 prepare_to_wait (ecs);
2888 return;
2889
2890 case TARGET_WAITKIND_EXITED:
2891 if (debug_infrun)
2892 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2893 inferior_ptid = ecs->ptid;
2894 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2895 set_current_program_space (current_inferior ()->pspace);
2896 handle_vfork_child_exec_or_exit (0);
2897 target_terminal_ours (); /* Must do this before mourn anyway */
2898 print_stop_reason (EXITED, ecs->ws.value.integer);
2899
2900 /* Record the exit code in the convenience variable $_exitcode, so
2901 that the user can inspect this again later. */
2902 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2903 (LONGEST) ecs->ws.value.integer);
2904 gdb_flush (gdb_stdout);
2905 target_mourn_inferior ();
2906 singlestep_breakpoints_inserted_p = 0;
2907 stop_print_frame = 0;
2908 stop_stepping (ecs);
2909 return;
2910
2911 case TARGET_WAITKIND_SIGNALLED:
2912 if (debug_infrun)
2913 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2914 inferior_ptid = ecs->ptid;
2915 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
2916 set_current_program_space (current_inferior ()->pspace);
2917 handle_vfork_child_exec_or_exit (0);
2918 stop_print_frame = 0;
2919 target_terminal_ours (); /* Must do this before mourn anyway */
2920
2921 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2922 reach here unless the inferior is dead. However, for years
2923 target_kill() was called here, which hints that fatal signals aren't
2924 really fatal on some systems. If that's true, then some changes
2925 may be needed. */
2926 target_mourn_inferior ();
2927
2928 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2929 singlestep_breakpoints_inserted_p = 0;
2930 stop_stepping (ecs);
2931 return;
2932
2933 /* The following are the only cases in which we keep going;
2934 the above cases end in a continue or goto. */
2935 case TARGET_WAITKIND_FORKED:
2936 case TARGET_WAITKIND_VFORKED:
2937 if (debug_infrun)
2938 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2939
2940 if (!ptid_equal (ecs->ptid, inferior_ptid))
2941 {
2942 context_switch (ecs->ptid);
2943 reinit_frame_cache ();
2944 }
2945
2946 /* Immediately detach breakpoints from the child before there's
2947 any chance of letting the user delete breakpoints from the
2948 breakpoint lists. If we don't do this early, it's easy to
2949 leave left over traps in the child, vis: "break foo; catch
2950 fork; c; <fork>; del; c; <child calls foo>". We only follow
2951 the fork on the last `continue', and by that time the
2952 breakpoint at "foo" is long gone from the breakpoint table.
2953 If we vforked, then we don't need to unpatch here, since both
2954 parent and child are sharing the same memory pages; we'll
2955 need to unpatch at follow/detach time instead to be certain
2956 that new breakpoints added between catchpoint hit time and
2957 vfork follow are detached. */
2958 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2959 {
2960 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2961
2962 /* This won't actually modify the breakpoint list, but will
2963 physically remove the breakpoints from the child. */
2964 detach_breakpoints (child_pid);
2965 }
2966
2967 /* In case the event is caught by a catchpoint, remember that
2968 the event is to be followed at the next resume of the thread,
2969 and not immediately. */
2970 ecs->event_thread->pending_follow = ecs->ws;
2971
2972 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2973
2974 ecs->event_thread->stop_bpstat
2975 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
2976 stop_pc, ecs->ptid);
2977
2978 /* Note that we're interested in knowing the bpstat actually
2979 causes a stop, not just if it may explain the signal.
2980 Software watchpoints, for example, always appear in the
2981 bpstat. */
2982 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
2983
2984 /* If no catchpoint triggered for this, then keep going. */
2985 if (ecs->random_signal)
2986 {
2987 ptid_t parent;
2988 ptid_t child;
2989 int should_resume;
2990 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
2991
2992 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2993
2994 should_resume = follow_fork ();
2995
2996 parent = ecs->ptid;
2997 child = ecs->ws.value.related_pid;
2998
2999 /* In non-stop mode, also resume the other branch. */
3000 if (non_stop && !detach_fork)
3001 {
3002 if (follow_child)
3003 switch_to_thread (parent);
3004 else
3005 switch_to_thread (child);
3006
3007 ecs->event_thread = inferior_thread ();
3008 ecs->ptid = inferior_ptid;
3009 keep_going (ecs);
3010 }
3011
3012 if (follow_child)
3013 switch_to_thread (child);
3014 else
3015 switch_to_thread (parent);
3016
3017 ecs->event_thread = inferior_thread ();
3018 ecs->ptid = inferior_ptid;
3019
3020 if (should_resume)
3021 keep_going (ecs);
3022 else
3023 stop_stepping (ecs);
3024 return;
3025 }
3026 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3027 goto process_event_stop_test;
3028
3029 case TARGET_WAITKIND_VFORK_DONE:
3030 /* Done with the shared memory region. Re-insert breakpoints in
3031 the parent, and keep going. */
3032
3033 if (debug_infrun)
3034 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3035
3036 if (!ptid_equal (ecs->ptid, inferior_ptid))
3037 context_switch (ecs->ptid);
3038
3039 current_inferior ()->waiting_for_vfork_done = 0;
3040 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3041 /* This also takes care of reinserting breakpoints in the
3042 previously locked inferior. */
3043 keep_going (ecs);
3044 return;
3045
3046 case TARGET_WAITKIND_EXECD:
3047 if (debug_infrun)
3048 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3049
3050 if (!ptid_equal (ecs->ptid, inferior_ptid))
3051 {
3052 context_switch (ecs->ptid);
3053 reinit_frame_cache ();
3054 }
3055
3056 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3057
3058 /* Do whatever is necessary to the parent branch of the vfork. */
3059 handle_vfork_child_exec_or_exit (1);
3060
3061 /* This causes the eventpoints and symbol table to be reset.
3062 Must do this now, before trying to determine whether to
3063 stop. */
3064 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3065
3066 ecs->event_thread->stop_bpstat
3067 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3068 stop_pc, ecs->ptid);
3069 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3070
3071 /* Note that this may be referenced from inside
3072 bpstat_stop_status above, through inferior_has_execd. */
3073 xfree (ecs->ws.value.execd_pathname);
3074 ecs->ws.value.execd_pathname = NULL;
3075
3076 /* If no catchpoint triggered for this, then keep going. */
3077 if (ecs->random_signal)
3078 {
3079 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3080 keep_going (ecs);
3081 return;
3082 }
3083 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3084 goto process_event_stop_test;
3085
3086 /* Be careful not to try to gather much state about a thread
3087 that's in a syscall. It's frequently a losing proposition. */
3088 case TARGET_WAITKIND_SYSCALL_ENTRY:
3089 if (debug_infrun)
3090 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3091 /* Getting the current syscall number */
3092 if (handle_syscall_event (ecs) != 0)
3093 return;
3094 goto process_event_stop_test;
3095
3096 /* Before examining the threads further, step this thread to
3097 get it entirely out of the syscall. (We get notice of the
3098 event when the thread is just on the verge of exiting a
3099 syscall. Stepping one instruction seems to get it back
3100 into user code.) */
3101 case TARGET_WAITKIND_SYSCALL_RETURN:
3102 if (debug_infrun)
3103 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3104 if (handle_syscall_event (ecs) != 0)
3105 return;
3106 goto process_event_stop_test;
3107
3108 case TARGET_WAITKIND_STOPPED:
3109 if (debug_infrun)
3110 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3111 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3112 break;
3113
3114 case TARGET_WAITKIND_NO_HISTORY:
3115 /* Reverse execution: target ran out of history info. */
3116 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3117 print_stop_reason (NO_HISTORY, 0);
3118 stop_stepping (ecs);
3119 return;
3120 }
3121
3122 if (ecs->new_thread_event)
3123 {
3124 if (non_stop)
3125 /* Non-stop assumes that the target handles adding new threads
3126 to the thread list. */
3127 internal_error (__FILE__, __LINE__, "\
3128 targets should add new threads to the thread list themselves in non-stop mode.");
3129
3130 /* We may want to consider not doing a resume here in order to
3131 give the user a chance to play with the new thread. It might
3132 be good to make that a user-settable option. */
3133
3134 /* At this point, all threads are stopped (happens automatically
3135 in either the OS or the native code). Therefore we need to
3136 continue all threads in order to make progress. */
3137
3138 if (!ptid_equal (ecs->ptid, inferior_ptid))
3139 context_switch (ecs->ptid);
3140 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3141 prepare_to_wait (ecs);
3142 return;
3143 }
3144
3145 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3146 {
3147 /* Do we need to clean up the state of a thread that has
3148 completed a displaced single-step? (Doing so usually affects
3149 the PC, so do it here, before we set stop_pc.) */
3150 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3151
3152 /* If we either finished a single-step or hit a breakpoint, but
3153 the user wanted this thread to be stopped, pretend we got a
3154 SIG0 (generic unsignaled stop). */
3155
3156 if (ecs->event_thread->stop_requested
3157 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3158 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3159 }
3160
3161 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3162
3163 if (debug_infrun)
3164 {
3165 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3166 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3167 struct cleanup *old_chain = save_inferior_ptid ();
3168
3169 inferior_ptid = ecs->ptid;
3170
3171 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3172 paddress (gdbarch, stop_pc));
3173 if (target_stopped_by_watchpoint ())
3174 {
3175 CORE_ADDR addr;
3176 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3177
3178 if (target_stopped_data_address (&current_target, &addr))
3179 fprintf_unfiltered (gdb_stdlog,
3180 "infrun: stopped data address = %s\n",
3181 paddress (gdbarch, addr));
3182 else
3183 fprintf_unfiltered (gdb_stdlog,
3184 "infrun: (no data address available)\n");
3185 }
3186
3187 do_cleanups (old_chain);
3188 }
3189
3190 if (stepping_past_singlestep_breakpoint)
3191 {
3192 gdb_assert (singlestep_breakpoints_inserted_p);
3193 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3194 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3195
3196 stepping_past_singlestep_breakpoint = 0;
3197
3198 /* We've either finished single-stepping past the single-step
3199 breakpoint, or stopped for some other reason. It would be nice if
3200 we could tell, but we can't reliably. */
3201 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3202 {
3203 if (debug_infrun)
3204 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3205 /* Pull the single step breakpoints out of the target. */
3206 remove_single_step_breakpoints ();
3207 singlestep_breakpoints_inserted_p = 0;
3208
3209 ecs->random_signal = 0;
3210 ecs->event_thread->trap_expected = 0;
3211
3212 context_switch (saved_singlestep_ptid);
3213 if (deprecated_context_hook)
3214 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3215
3216 resume (1, TARGET_SIGNAL_0);
3217 prepare_to_wait (ecs);
3218 return;
3219 }
3220 }
3221
3222 if (!ptid_equal (deferred_step_ptid, null_ptid))
3223 {
3224 /* In non-stop mode, there's never a deferred_step_ptid set. */
3225 gdb_assert (!non_stop);
3226
3227 /* If we stopped for some other reason than single-stepping, ignore
3228 the fact that we were supposed to switch back. */
3229 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3230 {
3231 if (debug_infrun)
3232 fprintf_unfiltered (gdb_stdlog,
3233 "infrun: handling deferred step\n");
3234
3235 /* Pull the single step breakpoints out of the target. */
3236 if (singlestep_breakpoints_inserted_p)
3237 {
3238 remove_single_step_breakpoints ();
3239 singlestep_breakpoints_inserted_p = 0;
3240 }
3241
3242 /* Note: We do not call context_switch at this point, as the
3243 context is already set up for stepping the original thread. */
3244 switch_to_thread (deferred_step_ptid);
3245 deferred_step_ptid = null_ptid;
3246 /* Suppress spurious "Switching to ..." message. */
3247 previous_inferior_ptid = inferior_ptid;
3248
3249 resume (1, TARGET_SIGNAL_0);
3250 prepare_to_wait (ecs);
3251 return;
3252 }
3253
3254 deferred_step_ptid = null_ptid;
3255 }
3256
3257 /* See if a thread hit a thread-specific breakpoint that was meant for
3258 another thread. If so, then step that thread past the breakpoint,
3259 and continue it. */
3260
3261 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3262 {
3263 int thread_hop_needed = 0;
3264 struct address_space *aspace =
3265 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3266
3267 /* Check if a regular breakpoint has been hit before checking
3268 for a potential single step breakpoint. Otherwise, GDB will
3269 not see this breakpoint hit when stepping onto breakpoints. */
3270 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3271 {
3272 ecs->random_signal = 0;
3273 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3274 thread_hop_needed = 1;
3275 }
3276 else if (singlestep_breakpoints_inserted_p)
3277 {
3278 /* We have not context switched yet, so this should be true
3279 no matter which thread hit the singlestep breakpoint. */
3280 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3281 if (debug_infrun)
3282 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3283 "trap for %s\n",
3284 target_pid_to_str (ecs->ptid));
3285
3286 ecs->random_signal = 0;
3287 /* The call to in_thread_list is necessary because PTIDs sometimes
3288 change when we go from single-threaded to multi-threaded. If
3289 the singlestep_ptid is still in the list, assume that it is
3290 really different from ecs->ptid. */
3291 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3292 && in_thread_list (singlestep_ptid))
3293 {
3294 /* If the PC of the thread we were trying to single-step
3295 has changed, discard this event (which we were going
3296 to ignore anyway), and pretend we saw that thread
3297 trap. This prevents us continuously moving the
3298 single-step breakpoint forward, one instruction at a
3299 time. If the PC has changed, then the thread we were
3300 trying to single-step has trapped or been signalled,
3301 but the event has not been reported to GDB yet.
3302
3303 There might be some cases where this loses signal
3304 information, if a signal has arrived at exactly the
3305 same time that the PC changed, but this is the best
3306 we can do with the information available. Perhaps we
3307 should arrange to report all events for all threads
3308 when they stop, or to re-poll the remote looking for
3309 this particular thread (i.e. temporarily enable
3310 schedlock). */
3311
3312 CORE_ADDR new_singlestep_pc
3313 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3314
3315 if (new_singlestep_pc != singlestep_pc)
3316 {
3317 enum target_signal stop_signal;
3318
3319 if (debug_infrun)
3320 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3321 " but expected thread advanced also\n");
3322
3323 /* The current context still belongs to
3324 singlestep_ptid. Don't swap here, since that's
3325 the context we want to use. Just fudge our
3326 state and continue. */
3327 stop_signal = ecs->event_thread->stop_signal;
3328 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3329 ecs->ptid = singlestep_ptid;
3330 ecs->event_thread = find_thread_ptid (ecs->ptid);
3331 ecs->event_thread->stop_signal = stop_signal;
3332 stop_pc = new_singlestep_pc;
3333 }
3334 else
3335 {
3336 if (debug_infrun)
3337 fprintf_unfiltered (gdb_stdlog,
3338 "infrun: unexpected thread\n");
3339
3340 thread_hop_needed = 1;
3341 stepping_past_singlestep_breakpoint = 1;
3342 saved_singlestep_ptid = singlestep_ptid;
3343 }
3344 }
3345 }
3346
3347 if (thread_hop_needed)
3348 {
3349 struct regcache *thread_regcache;
3350 int remove_status = 0;
3351
3352 if (debug_infrun)
3353 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3354
3355 /* Switch context before touching inferior memory, the
3356 previous thread may have exited. */
3357 if (!ptid_equal (inferior_ptid, ecs->ptid))
3358 context_switch (ecs->ptid);
3359
3360 /* Saw a breakpoint, but it was hit by the wrong thread.
3361 Just continue. */
3362
3363 if (singlestep_breakpoints_inserted_p)
3364 {
3365 /* Pull the single step breakpoints out of the target. */
3366 remove_single_step_breakpoints ();
3367 singlestep_breakpoints_inserted_p = 0;
3368 }
3369
3370 /* If the arch can displace step, don't remove the
3371 breakpoints. */
3372 thread_regcache = get_thread_regcache (ecs->ptid);
3373 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3374 remove_status = remove_breakpoints ();
3375
3376 /* Did we fail to remove breakpoints? If so, try
3377 to set the PC past the bp. (There's at least
3378 one situation in which we can fail to remove
3379 the bp's: On HP-UX's that use ttrace, we can't
3380 change the address space of a vforking child
3381 process until the child exits (well, okay, not
3382 then either :-) or execs. */
3383 if (remove_status != 0)
3384 error (_("Cannot step over breakpoint hit in wrong thread"));
3385 else
3386 { /* Single step */
3387 if (!non_stop)
3388 {
3389 /* Only need to require the next event from this
3390 thread in all-stop mode. */
3391 waiton_ptid = ecs->ptid;
3392 infwait_state = infwait_thread_hop_state;
3393 }
3394
3395 ecs->event_thread->stepping_over_breakpoint = 1;
3396 keep_going (ecs);
3397 return;
3398 }
3399 }
3400 else if (singlestep_breakpoints_inserted_p)
3401 {
3402 sw_single_step_trap_p = 1;
3403 ecs->random_signal = 0;
3404 }
3405 }
3406 else
3407 ecs->random_signal = 1;
3408
3409 /* See if something interesting happened to the non-current thread. If
3410 so, then switch to that thread. */
3411 if (!ptid_equal (ecs->ptid, inferior_ptid))
3412 {
3413 if (debug_infrun)
3414 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3415
3416 context_switch (ecs->ptid);
3417
3418 if (deprecated_context_hook)
3419 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3420 }
3421
3422 /* At this point, get hold of the now-current thread's frame. */
3423 frame = get_current_frame ();
3424 gdbarch = get_frame_arch (frame);
3425
3426 if (singlestep_breakpoints_inserted_p)
3427 {
3428 /* Pull the single step breakpoints out of the target. */
3429 remove_single_step_breakpoints ();
3430 singlestep_breakpoints_inserted_p = 0;
3431 }
3432
3433 if (stepped_after_stopped_by_watchpoint)
3434 stopped_by_watchpoint = 0;
3435 else
3436 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3437
3438 /* If necessary, step over this watchpoint. We'll be back to display
3439 it in a moment. */
3440 if (stopped_by_watchpoint
3441 && (target_have_steppable_watchpoint
3442 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3443 {
3444 /* At this point, we are stopped at an instruction which has
3445 attempted to write to a piece of memory under control of
3446 a watchpoint. The instruction hasn't actually executed
3447 yet. If we were to evaluate the watchpoint expression
3448 now, we would get the old value, and therefore no change
3449 would seem to have occurred.
3450
3451 In order to make watchpoints work `right', we really need
3452 to complete the memory write, and then evaluate the
3453 watchpoint expression. We do this by single-stepping the
3454 target.
3455
3456 It may not be necessary to disable the watchpoint to stop over
3457 it. For example, the PA can (with some kernel cooperation)
3458 single step over a watchpoint without disabling the watchpoint.
3459
3460 It is far more common to need to disable a watchpoint to step
3461 the inferior over it. If we have non-steppable watchpoints,
3462 we must disable the current watchpoint; it's simplest to
3463 disable all watchpoints and breakpoints. */
3464 int hw_step = 1;
3465
3466 if (!target_have_steppable_watchpoint)
3467 remove_breakpoints ();
3468 /* Single step */
3469 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3470 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3471 waiton_ptid = ecs->ptid;
3472 if (target_have_steppable_watchpoint)
3473 infwait_state = infwait_step_watch_state;
3474 else
3475 infwait_state = infwait_nonstep_watch_state;
3476 prepare_to_wait (ecs);
3477 return;
3478 }
3479
3480 ecs->stop_func_start = 0;
3481 ecs->stop_func_end = 0;
3482 ecs->stop_func_name = 0;
3483 /* Don't care about return value; stop_func_start and stop_func_name
3484 will both be 0 if it doesn't work. */
3485 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3486 &ecs->stop_func_start, &ecs->stop_func_end);
3487 ecs->stop_func_start
3488 += gdbarch_deprecated_function_start_offset (gdbarch);
3489 ecs->event_thread->stepping_over_breakpoint = 0;
3490 bpstat_clear (&ecs->event_thread->stop_bpstat);
3491 ecs->event_thread->stop_step = 0;
3492 stop_print_frame = 1;
3493 ecs->random_signal = 0;
3494 stopped_by_random_signal = 0;
3495
3496 /* Hide inlined functions starting here, unless we just performed stepi or
3497 nexti. After stepi and nexti, always show the innermost frame (not any
3498 inline function call sites). */
3499 if (ecs->event_thread->step_range_end != 1)
3500 skip_inline_frames (ecs->ptid);
3501
3502 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3503 && ecs->event_thread->trap_expected
3504 && gdbarch_single_step_through_delay_p (gdbarch)
3505 && currently_stepping (ecs->event_thread))
3506 {
3507 /* We're trying to step off a breakpoint. Turns out that we're
3508 also on an instruction that needs to be stepped multiple
3509 times before it's been fully executing. E.g., architectures
3510 with a delay slot. It needs to be stepped twice, once for
3511 the instruction and once for the delay slot. */
3512 int step_through_delay
3513 = gdbarch_single_step_through_delay (gdbarch, frame);
3514 if (debug_infrun && step_through_delay)
3515 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3516 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3517 {
3518 /* The user issued a continue when stopped at a breakpoint.
3519 Set up for another trap and get out of here. */
3520 ecs->event_thread->stepping_over_breakpoint = 1;
3521 keep_going (ecs);
3522 return;
3523 }
3524 else if (step_through_delay)
3525 {
3526 /* The user issued a step when stopped at a breakpoint.
3527 Maybe we should stop, maybe we should not - the delay
3528 slot *might* correspond to a line of source. In any
3529 case, don't decide that here, just set
3530 ecs->stepping_over_breakpoint, making sure we
3531 single-step again before breakpoints are re-inserted. */
3532 ecs->event_thread->stepping_over_breakpoint = 1;
3533 }
3534 }
3535
3536 /* Look at the cause of the stop, and decide what to do.
3537 The alternatives are:
3538 1) stop_stepping and return; to really stop and return to the debugger,
3539 2) keep_going and return to start up again
3540 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3541 3) set ecs->random_signal to 1, and the decision between 1 and 2
3542 will be made according to the signal handling tables. */
3543
3544 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3545 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3546 || stop_soon == STOP_QUIETLY_REMOTE)
3547 {
3548 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3549 {
3550 if (debug_infrun)
3551 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3552 stop_print_frame = 0;
3553 stop_stepping (ecs);
3554 return;
3555 }
3556
3557 /* This is originated from start_remote(), start_inferior() and
3558 shared libraries hook functions. */
3559 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3560 {
3561 if (debug_infrun)
3562 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3563 stop_stepping (ecs);
3564 return;
3565 }
3566
3567 /* This originates from attach_command(). We need to overwrite
3568 the stop_signal here, because some kernels don't ignore a
3569 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3570 See more comments in inferior.h. On the other hand, if we
3571 get a non-SIGSTOP, report it to the user - assume the backend
3572 will handle the SIGSTOP if it should show up later.
3573
3574 Also consider that the attach is complete when we see a
3575 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3576 target extended-remote report it instead of a SIGSTOP
3577 (e.g. gdbserver). We already rely on SIGTRAP being our
3578 signal, so this is no exception.
3579
3580 Also consider that the attach is complete when we see a
3581 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3582 the target to stop all threads of the inferior, in case the
3583 low level attach operation doesn't stop them implicitly. If
3584 they weren't stopped implicitly, then the stub will report a
3585 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3586 other than GDB's request. */
3587 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3588 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3589 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3590 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3591 {
3592 stop_stepping (ecs);
3593 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3594 return;
3595 }
3596
3597 /* See if there is a breakpoint at the current PC. */
3598 ecs->event_thread->stop_bpstat
3599 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3600 stop_pc, ecs->ptid);
3601
3602 /* Following in case break condition called a
3603 function. */
3604 stop_print_frame = 1;
3605
3606 /* This is where we handle "moribund" watchpoints. Unlike
3607 software breakpoints traps, hardware watchpoint traps are
3608 always distinguishable from random traps. If no high-level
3609 watchpoint is associated with the reported stop data address
3610 anymore, then the bpstat does not explain the signal ---
3611 simply make sure to ignore it if `stopped_by_watchpoint' is
3612 set. */
3613
3614 if (debug_infrun
3615 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3616 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3617 && stopped_by_watchpoint)
3618 fprintf_unfiltered (gdb_stdlog, "\
3619 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3620
3621 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3622 at one stage in the past included checks for an inferior
3623 function call's call dummy's return breakpoint. The original
3624 comment, that went with the test, read:
3625
3626 ``End of a stack dummy. Some systems (e.g. Sony news) give
3627 another signal besides SIGTRAP, so check here as well as
3628 above.''
3629
3630 If someone ever tries to get call dummys on a
3631 non-executable stack to work (where the target would stop
3632 with something like a SIGSEGV), then those tests might need
3633 to be re-instated. Given, however, that the tests were only
3634 enabled when momentary breakpoints were not being used, I
3635 suspect that it won't be the case.
3636
3637 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3638 be necessary for call dummies on a non-executable stack on
3639 SPARC. */
3640
3641 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3642 ecs->random_signal
3643 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3644 || stopped_by_watchpoint
3645 || ecs->event_thread->trap_expected
3646 || (ecs->event_thread->step_range_end
3647 && ecs->event_thread->step_resume_breakpoint == NULL));
3648 else
3649 {
3650 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3651 if (!ecs->random_signal)
3652 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3653 }
3654 }
3655
3656 /* When we reach this point, we've pretty much decided
3657 that the reason for stopping must've been a random
3658 (unexpected) signal. */
3659
3660 else
3661 ecs->random_signal = 1;
3662
3663 process_event_stop_test:
3664
3665 /* Re-fetch current thread's frame in case we did a
3666 "goto process_event_stop_test" above. */
3667 frame = get_current_frame ();
3668 gdbarch = get_frame_arch (frame);
3669
3670 /* For the program's own signals, act according to
3671 the signal handling tables. */
3672
3673 if (ecs->random_signal)
3674 {
3675 /* Signal not for debugging purposes. */
3676 int printed = 0;
3677
3678 if (debug_infrun)
3679 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3680 ecs->event_thread->stop_signal);
3681
3682 stopped_by_random_signal = 1;
3683
3684 if (signal_print[ecs->event_thread->stop_signal])
3685 {
3686 printed = 1;
3687 target_terminal_ours_for_output ();
3688 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3689 }
3690 /* Always stop on signals if we're either just gaining control
3691 of the program, or the user explicitly requested this thread
3692 to remain stopped. */
3693 if (stop_soon != NO_STOP_QUIETLY
3694 || ecs->event_thread->stop_requested
3695 || signal_stop_state (ecs->event_thread->stop_signal))
3696 {
3697 stop_stepping (ecs);
3698 return;
3699 }
3700 /* If not going to stop, give terminal back
3701 if we took it away. */
3702 else if (printed)
3703 target_terminal_inferior ();
3704
3705 /* Clear the signal if it should not be passed. */
3706 if (signal_program[ecs->event_thread->stop_signal] == 0)
3707 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3708
3709 if (ecs->event_thread->prev_pc == stop_pc
3710 && ecs->event_thread->trap_expected
3711 && ecs->event_thread->step_resume_breakpoint == NULL)
3712 {
3713 /* We were just starting a new sequence, attempting to
3714 single-step off of a breakpoint and expecting a SIGTRAP.
3715 Instead this signal arrives. This signal will take us out
3716 of the stepping range so GDB needs to remember to, when
3717 the signal handler returns, resume stepping off that
3718 breakpoint. */
3719 /* To simplify things, "continue" is forced to use the same
3720 code paths as single-step - set a breakpoint at the
3721 signal return address and then, once hit, step off that
3722 breakpoint. */
3723 if (debug_infrun)
3724 fprintf_unfiltered (gdb_stdlog,
3725 "infrun: signal arrived while stepping over "
3726 "breakpoint\n");
3727
3728 insert_step_resume_breakpoint_at_frame (frame);
3729 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3730 keep_going (ecs);
3731 return;
3732 }
3733
3734 if (ecs->event_thread->step_range_end != 0
3735 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3736 && (ecs->event_thread->step_range_start <= stop_pc
3737 && stop_pc < ecs->event_thread->step_range_end)
3738 && frame_id_eq (get_stack_frame_id (frame),
3739 ecs->event_thread->step_stack_frame_id)
3740 && ecs->event_thread->step_resume_breakpoint == NULL)
3741 {
3742 /* The inferior is about to take a signal that will take it
3743 out of the single step range. Set a breakpoint at the
3744 current PC (which is presumably where the signal handler
3745 will eventually return) and then allow the inferior to
3746 run free.
3747
3748 Note that this is only needed for a signal delivered
3749 while in the single-step range. Nested signals aren't a
3750 problem as they eventually all return. */
3751 if (debug_infrun)
3752 fprintf_unfiltered (gdb_stdlog,
3753 "infrun: signal may take us out of "
3754 "single-step range\n");
3755
3756 insert_step_resume_breakpoint_at_frame (frame);
3757 keep_going (ecs);
3758 return;
3759 }
3760
3761 /* Note: step_resume_breakpoint may be non-NULL. This occures
3762 when either there's a nested signal, or when there's a
3763 pending signal enabled just as the signal handler returns
3764 (leaving the inferior at the step-resume-breakpoint without
3765 actually executing it). Either way continue until the
3766 breakpoint is really hit. */
3767 keep_going (ecs);
3768 return;
3769 }
3770
3771 /* Handle cases caused by hitting a breakpoint. */
3772 {
3773 CORE_ADDR jmp_buf_pc;
3774 struct bpstat_what what;
3775
3776 what = bpstat_what (ecs->event_thread->stop_bpstat);
3777
3778 if (what.call_dummy)
3779 {
3780 stop_stack_dummy = 1;
3781 }
3782
3783 switch (what.main_action)
3784 {
3785 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3786 /* If we hit the breakpoint at longjmp while stepping, we
3787 install a momentary breakpoint at the target of the
3788 jmp_buf. */
3789
3790 if (debug_infrun)
3791 fprintf_unfiltered (gdb_stdlog,
3792 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3793
3794 ecs->event_thread->stepping_over_breakpoint = 1;
3795
3796 if (!gdbarch_get_longjmp_target_p (gdbarch)
3797 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3798 {
3799 if (debug_infrun)
3800 fprintf_unfiltered (gdb_stdlog, "\
3801 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3802 keep_going (ecs);
3803 return;
3804 }
3805
3806 /* We're going to replace the current step-resume breakpoint
3807 with a longjmp-resume breakpoint. */
3808 delete_step_resume_breakpoint (ecs->event_thread);
3809
3810 /* Insert a breakpoint at resume address. */
3811 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3812
3813 keep_going (ecs);
3814 return;
3815
3816 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3817 if (debug_infrun)
3818 fprintf_unfiltered (gdb_stdlog,
3819 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3820
3821 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3822 delete_step_resume_breakpoint (ecs->event_thread);
3823
3824 ecs->event_thread->stop_step = 1;
3825 print_stop_reason (END_STEPPING_RANGE, 0);
3826 stop_stepping (ecs);
3827 return;
3828
3829 case BPSTAT_WHAT_SINGLE:
3830 if (debug_infrun)
3831 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3832 ecs->event_thread->stepping_over_breakpoint = 1;
3833 /* Still need to check other stuff, at least the case
3834 where we are stepping and step out of the right range. */
3835 break;
3836
3837 case BPSTAT_WHAT_STOP_NOISY:
3838 if (debug_infrun)
3839 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3840 stop_print_frame = 1;
3841
3842 /* We are about to nuke the step_resume_breakpointt via the
3843 cleanup chain, so no need to worry about it here. */
3844
3845 stop_stepping (ecs);
3846 return;
3847
3848 case BPSTAT_WHAT_STOP_SILENT:
3849 if (debug_infrun)
3850 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3851 stop_print_frame = 0;
3852
3853 /* We are about to nuke the step_resume_breakpoin via the
3854 cleanup chain, so no need to worry about it here. */
3855
3856 stop_stepping (ecs);
3857 return;
3858
3859 case BPSTAT_WHAT_STEP_RESUME:
3860 if (debug_infrun)
3861 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3862
3863 delete_step_resume_breakpoint (ecs->event_thread);
3864 if (ecs->event_thread->step_after_step_resume_breakpoint)
3865 {
3866 /* Back when the step-resume breakpoint was inserted, we
3867 were trying to single-step off a breakpoint. Go back
3868 to doing that. */
3869 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3870 ecs->event_thread->stepping_over_breakpoint = 1;
3871 keep_going (ecs);
3872 return;
3873 }
3874 if (stop_pc == ecs->stop_func_start
3875 && execution_direction == EXEC_REVERSE)
3876 {
3877 /* We are stepping over a function call in reverse, and
3878 just hit the step-resume breakpoint at the start
3879 address of the function. Go back to single-stepping,
3880 which should take us back to the function call. */
3881 ecs->event_thread->stepping_over_breakpoint = 1;
3882 keep_going (ecs);
3883 return;
3884 }
3885 break;
3886
3887 case BPSTAT_WHAT_CHECK_SHLIBS:
3888 {
3889 if (debug_infrun)
3890 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3891
3892 /* Check for any newly added shared libraries if we're
3893 supposed to be adding them automatically. Switch
3894 terminal for any messages produced by
3895 breakpoint_re_set. */
3896 target_terminal_ours_for_output ();
3897 /* NOTE: cagney/2003-11-25: Make certain that the target
3898 stack's section table is kept up-to-date. Architectures,
3899 (e.g., PPC64), use the section table to perform
3900 operations such as address => section name and hence
3901 require the table to contain all sections (including
3902 those found in shared libraries). */
3903 #ifdef SOLIB_ADD
3904 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3905 #else
3906 solib_add (NULL, 0, &current_target, auto_solib_add);
3907 #endif
3908 target_terminal_inferior ();
3909
3910 /* If requested, stop when the dynamic linker notifies
3911 gdb of events. This allows the user to get control
3912 and place breakpoints in initializer routines for
3913 dynamically loaded objects (among other things). */
3914 if (stop_on_solib_events || stop_stack_dummy)
3915 {
3916 stop_stepping (ecs);
3917 return;
3918 }
3919 else
3920 {
3921 /* We want to step over this breakpoint, then keep going. */
3922 ecs->event_thread->stepping_over_breakpoint = 1;
3923 break;
3924 }
3925 }
3926 break;
3927
3928 case BPSTAT_WHAT_CHECK_JIT:
3929 if (debug_infrun)
3930 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
3931
3932 /* Switch terminal for any messages produced by breakpoint_re_set. */
3933 target_terminal_ours_for_output ();
3934
3935 jit_event_handler (gdbarch);
3936
3937 target_terminal_inferior ();
3938
3939 /* We want to step over this breakpoint, then keep going. */
3940 ecs->event_thread->stepping_over_breakpoint = 1;
3941
3942 break;
3943
3944 case BPSTAT_WHAT_LAST:
3945 /* Not a real code, but listed here to shut up gcc -Wall. */
3946
3947 case BPSTAT_WHAT_KEEP_CHECKING:
3948 break;
3949 }
3950 }
3951
3952 /* We come here if we hit a breakpoint but should not
3953 stop for it. Possibly we also were stepping
3954 and should stop for that. So fall through and
3955 test for stepping. But, if not stepping,
3956 do not stop. */
3957
3958 /* In all-stop mode, if we're currently stepping but have stopped in
3959 some other thread, we need to switch back to the stepped thread. */
3960 if (!non_stop)
3961 {
3962 struct thread_info *tp;
3963 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3964 ecs->event_thread);
3965 if (tp)
3966 {
3967 /* However, if the current thread is blocked on some internal
3968 breakpoint, and we simply need to step over that breakpoint
3969 to get it going again, do that first. */
3970 if ((ecs->event_thread->trap_expected
3971 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3972 || ecs->event_thread->stepping_over_breakpoint)
3973 {
3974 keep_going (ecs);
3975 return;
3976 }
3977
3978 /* If the stepping thread exited, then don't try to switch
3979 back and resume it, which could fail in several different
3980 ways depending on the target. Instead, just keep going.
3981
3982 We can find a stepping dead thread in the thread list in
3983 two cases:
3984
3985 - The target supports thread exit events, and when the
3986 target tries to delete the thread from the thread list,
3987 inferior_ptid pointed at the exiting thread. In such
3988 case, calling delete_thread does not really remove the
3989 thread from the list; instead, the thread is left listed,
3990 with 'exited' state.
3991
3992 - The target's debug interface does not support thread
3993 exit events, and so we have no idea whatsoever if the
3994 previously stepping thread is still alive. For that
3995 reason, we need to synchronously query the target
3996 now. */
3997 if (is_exited (tp->ptid)
3998 || !target_thread_alive (tp->ptid))
3999 {
4000 if (debug_infrun)
4001 fprintf_unfiltered (gdb_stdlog, "\
4002 infrun: not switching back to stepped thread, it has vanished\n");
4003
4004 delete_thread (tp->ptid);
4005 keep_going (ecs);
4006 return;
4007 }
4008
4009 /* Otherwise, we no longer expect a trap in the current thread.
4010 Clear the trap_expected flag before switching back -- this is
4011 what keep_going would do as well, if we called it. */
4012 ecs->event_thread->trap_expected = 0;
4013
4014 if (debug_infrun)
4015 fprintf_unfiltered (gdb_stdlog,
4016 "infrun: switching back to stepped thread\n");
4017
4018 ecs->event_thread = tp;
4019 ecs->ptid = tp->ptid;
4020 context_switch (ecs->ptid);
4021 keep_going (ecs);
4022 return;
4023 }
4024 }
4025
4026 /* Are we stepping to get the inferior out of the dynamic linker's
4027 hook (and possibly the dld itself) after catching a shlib
4028 event? */
4029 if (ecs->event_thread->stepping_through_solib_after_catch)
4030 {
4031 #if defined(SOLIB_ADD)
4032 /* Have we reached our destination? If not, keep going. */
4033 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4034 {
4035 if (debug_infrun)
4036 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4037 ecs->event_thread->stepping_over_breakpoint = 1;
4038 keep_going (ecs);
4039 return;
4040 }
4041 #endif
4042 if (debug_infrun)
4043 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4044 /* Else, stop and report the catchpoint(s) whose triggering
4045 caused us to begin stepping. */
4046 ecs->event_thread->stepping_through_solib_after_catch = 0;
4047 bpstat_clear (&ecs->event_thread->stop_bpstat);
4048 ecs->event_thread->stop_bpstat
4049 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4050 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4051 stop_print_frame = 1;
4052 stop_stepping (ecs);
4053 return;
4054 }
4055
4056 if (ecs->event_thread->step_resume_breakpoint)
4057 {
4058 if (debug_infrun)
4059 fprintf_unfiltered (gdb_stdlog,
4060 "infrun: step-resume breakpoint is inserted\n");
4061
4062 /* Having a step-resume breakpoint overrides anything
4063 else having to do with stepping commands until
4064 that breakpoint is reached. */
4065 keep_going (ecs);
4066 return;
4067 }
4068
4069 if (ecs->event_thread->step_range_end == 0)
4070 {
4071 if (debug_infrun)
4072 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4073 /* Likewise if we aren't even stepping. */
4074 keep_going (ecs);
4075 return;
4076 }
4077
4078 /* Re-fetch current thread's frame in case the code above caused
4079 the frame cache to be re-initialized, making our FRAME variable
4080 a dangling pointer. */
4081 frame = get_current_frame ();
4082
4083 /* If stepping through a line, keep going if still within it.
4084
4085 Note that step_range_end is the address of the first instruction
4086 beyond the step range, and NOT the address of the last instruction
4087 within it!
4088
4089 Note also that during reverse execution, we may be stepping
4090 through a function epilogue and therefore must detect when
4091 the current-frame changes in the middle of a line. */
4092
4093 if (stop_pc >= ecs->event_thread->step_range_start
4094 && stop_pc < ecs->event_thread->step_range_end
4095 && (execution_direction != EXEC_REVERSE
4096 || frame_id_eq (get_frame_id (frame),
4097 ecs->event_thread->step_frame_id)))
4098 {
4099 if (debug_infrun)
4100 fprintf_unfiltered
4101 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4102 paddress (gdbarch, ecs->event_thread->step_range_start),
4103 paddress (gdbarch, ecs->event_thread->step_range_end));
4104
4105 /* When stepping backward, stop at beginning of line range
4106 (unless it's the function entry point, in which case
4107 keep going back to the call point). */
4108 if (stop_pc == ecs->event_thread->step_range_start
4109 && stop_pc != ecs->stop_func_start
4110 && execution_direction == EXEC_REVERSE)
4111 {
4112 ecs->event_thread->stop_step = 1;
4113 print_stop_reason (END_STEPPING_RANGE, 0);
4114 stop_stepping (ecs);
4115 }
4116 else
4117 keep_going (ecs);
4118
4119 return;
4120 }
4121
4122 /* We stepped out of the stepping range. */
4123
4124 /* If we are stepping at the source level and entered the runtime
4125 loader dynamic symbol resolution code...
4126
4127 EXEC_FORWARD: we keep on single stepping until we exit the run
4128 time loader code and reach the callee's address.
4129
4130 EXEC_REVERSE: we've already executed the callee (backward), and
4131 the runtime loader code is handled just like any other
4132 undebuggable function call. Now we need only keep stepping
4133 backward through the trampoline code, and that's handled further
4134 down, so there is nothing for us to do here. */
4135
4136 if (execution_direction != EXEC_REVERSE
4137 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4138 && in_solib_dynsym_resolve_code (stop_pc))
4139 {
4140 CORE_ADDR pc_after_resolver =
4141 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4142
4143 if (debug_infrun)
4144 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4145
4146 if (pc_after_resolver)
4147 {
4148 /* Set up a step-resume breakpoint at the address
4149 indicated by SKIP_SOLIB_RESOLVER. */
4150 struct symtab_and_line sr_sal;
4151 init_sal (&sr_sal);
4152 sr_sal.pc = pc_after_resolver;
4153 sr_sal.pspace = get_frame_program_space (frame);
4154
4155 insert_step_resume_breakpoint_at_sal (gdbarch,
4156 sr_sal, null_frame_id);
4157 }
4158
4159 keep_going (ecs);
4160 return;
4161 }
4162
4163 if (ecs->event_thread->step_range_end != 1
4164 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4165 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4166 && get_frame_type (frame) == SIGTRAMP_FRAME)
4167 {
4168 if (debug_infrun)
4169 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4170 /* The inferior, while doing a "step" or "next", has ended up in
4171 a signal trampoline (either by a signal being delivered or by
4172 the signal handler returning). Just single-step until the
4173 inferior leaves the trampoline (either by calling the handler
4174 or returning). */
4175 keep_going (ecs);
4176 return;
4177 }
4178
4179 /* Check for subroutine calls. The check for the current frame
4180 equalling the step ID is not necessary - the check of the
4181 previous frame's ID is sufficient - but it is a common case and
4182 cheaper than checking the previous frame's ID.
4183
4184 NOTE: frame_id_eq will never report two invalid frame IDs as
4185 being equal, so to get into this block, both the current and
4186 previous frame must have valid frame IDs. */
4187 /* The outer_frame_id check is a heuristic to detect stepping
4188 through startup code. If we step over an instruction which
4189 sets the stack pointer from an invalid value to a valid value,
4190 we may detect that as a subroutine call from the mythical
4191 "outermost" function. This could be fixed by marking
4192 outermost frames as !stack_p,code_p,special_p. Then the
4193 initial outermost frame, before sp was valid, would
4194 have code_addr == &_start. See the comment in frame_id_eq
4195 for more. */
4196 if (!frame_id_eq (get_stack_frame_id (frame),
4197 ecs->event_thread->step_stack_frame_id)
4198 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4199 ecs->event_thread->step_stack_frame_id)
4200 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4201 outer_frame_id)
4202 || step_start_function != find_pc_function (stop_pc))))
4203 {
4204 CORE_ADDR real_stop_pc;
4205
4206 if (debug_infrun)
4207 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4208
4209 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4210 || ((ecs->event_thread->step_range_end == 1)
4211 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4212 ecs->stop_func_start)))
4213 {
4214 /* I presume that step_over_calls is only 0 when we're
4215 supposed to be stepping at the assembly language level
4216 ("stepi"). Just stop. */
4217 /* Also, maybe we just did a "nexti" inside a prolog, so we
4218 thought it was a subroutine call but it was not. Stop as
4219 well. FENN */
4220 /* And this works the same backward as frontward. MVS */
4221 ecs->event_thread->stop_step = 1;
4222 print_stop_reason (END_STEPPING_RANGE, 0);
4223 stop_stepping (ecs);
4224 return;
4225 }
4226
4227 /* Reverse stepping through solib trampolines. */
4228
4229 if (execution_direction == EXEC_REVERSE
4230 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4231 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4232 || (ecs->stop_func_start == 0
4233 && in_solib_dynsym_resolve_code (stop_pc))))
4234 {
4235 /* Any solib trampoline code can be handled in reverse
4236 by simply continuing to single-step. We have already
4237 executed the solib function (backwards), and a few
4238 steps will take us back through the trampoline to the
4239 caller. */
4240 keep_going (ecs);
4241 return;
4242 }
4243
4244 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4245 {
4246 /* We're doing a "next".
4247
4248 Normal (forward) execution: set a breakpoint at the
4249 callee's return address (the address at which the caller
4250 will resume).
4251
4252 Reverse (backward) execution. set the step-resume
4253 breakpoint at the start of the function that we just
4254 stepped into (backwards), and continue to there. When we
4255 get there, we'll need to single-step back to the caller. */
4256
4257 if (execution_direction == EXEC_REVERSE)
4258 {
4259 struct symtab_and_line sr_sal;
4260
4261 /* Normal function call return (static or dynamic). */
4262 init_sal (&sr_sal);
4263 sr_sal.pc = ecs->stop_func_start;
4264 sr_sal.pspace = get_frame_program_space (frame);
4265 insert_step_resume_breakpoint_at_sal (gdbarch,
4266 sr_sal, null_frame_id);
4267 }
4268 else
4269 insert_step_resume_breakpoint_at_caller (frame);
4270
4271 keep_going (ecs);
4272 return;
4273 }
4274
4275 /* If we are in a function call trampoline (a stub between the
4276 calling routine and the real function), locate the real
4277 function. That's what tells us (a) whether we want to step
4278 into it at all, and (b) what prologue we want to run to the
4279 end of, if we do step into it. */
4280 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4281 if (real_stop_pc == 0)
4282 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4283 if (real_stop_pc != 0)
4284 ecs->stop_func_start = real_stop_pc;
4285
4286 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4287 {
4288 struct symtab_and_line sr_sal;
4289 init_sal (&sr_sal);
4290 sr_sal.pc = ecs->stop_func_start;
4291 sr_sal.pspace = get_frame_program_space (frame);
4292
4293 insert_step_resume_breakpoint_at_sal (gdbarch,
4294 sr_sal, null_frame_id);
4295 keep_going (ecs);
4296 return;
4297 }
4298
4299 /* If we have line number information for the function we are
4300 thinking of stepping into, step into it.
4301
4302 If there are several symtabs at that PC (e.g. with include
4303 files), just want to know whether *any* of them have line
4304 numbers. find_pc_line handles this. */
4305 {
4306 struct symtab_and_line tmp_sal;
4307
4308 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4309 tmp_sal.pspace = get_frame_program_space (frame);
4310 if (tmp_sal.line != 0)
4311 {
4312 if (execution_direction == EXEC_REVERSE)
4313 handle_step_into_function_backward (gdbarch, ecs);
4314 else
4315 handle_step_into_function (gdbarch, ecs);
4316 return;
4317 }
4318 }
4319
4320 /* If we have no line number and the step-stop-if-no-debug is
4321 set, we stop the step so that the user has a chance to switch
4322 in assembly mode. */
4323 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4324 && step_stop_if_no_debug)
4325 {
4326 ecs->event_thread->stop_step = 1;
4327 print_stop_reason (END_STEPPING_RANGE, 0);
4328 stop_stepping (ecs);
4329 return;
4330 }
4331
4332 if (execution_direction == EXEC_REVERSE)
4333 {
4334 /* Set a breakpoint at callee's start address.
4335 From there we can step once and be back in the caller. */
4336 struct symtab_and_line sr_sal;
4337 init_sal (&sr_sal);
4338 sr_sal.pc = ecs->stop_func_start;
4339 sr_sal.pspace = get_frame_program_space (frame);
4340 insert_step_resume_breakpoint_at_sal (gdbarch,
4341 sr_sal, null_frame_id);
4342 }
4343 else
4344 /* Set a breakpoint at callee's return address (the address
4345 at which the caller will resume). */
4346 insert_step_resume_breakpoint_at_caller (frame);
4347
4348 keep_going (ecs);
4349 return;
4350 }
4351
4352 /* Reverse stepping through solib trampolines. */
4353
4354 if (execution_direction == EXEC_REVERSE
4355 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4356 {
4357 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4358 || (ecs->stop_func_start == 0
4359 && in_solib_dynsym_resolve_code (stop_pc)))
4360 {
4361 /* Any solib trampoline code can be handled in reverse
4362 by simply continuing to single-step. We have already
4363 executed the solib function (backwards), and a few
4364 steps will take us back through the trampoline to the
4365 caller. */
4366 keep_going (ecs);
4367 return;
4368 }
4369 else if (in_solib_dynsym_resolve_code (stop_pc))
4370 {
4371 /* Stepped backward into the solib dynsym resolver.
4372 Set a breakpoint at its start and continue, then
4373 one more step will take us out. */
4374 struct symtab_and_line sr_sal;
4375 init_sal (&sr_sal);
4376 sr_sal.pc = ecs->stop_func_start;
4377 sr_sal.pspace = get_frame_program_space (frame);
4378 insert_step_resume_breakpoint_at_sal (gdbarch,
4379 sr_sal, null_frame_id);
4380 keep_going (ecs);
4381 return;
4382 }
4383 }
4384
4385 /* If we're in the return path from a shared library trampoline,
4386 we want to proceed through the trampoline when stepping. */
4387 if (gdbarch_in_solib_return_trampoline (gdbarch,
4388 stop_pc, ecs->stop_func_name))
4389 {
4390 /* Determine where this trampoline returns. */
4391 CORE_ADDR real_stop_pc;
4392 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4393
4394 if (debug_infrun)
4395 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4396
4397 /* Only proceed through if we know where it's going. */
4398 if (real_stop_pc)
4399 {
4400 /* And put the step-breakpoint there and go until there. */
4401 struct symtab_and_line sr_sal;
4402
4403 init_sal (&sr_sal); /* initialize to zeroes */
4404 sr_sal.pc = real_stop_pc;
4405 sr_sal.section = find_pc_overlay (sr_sal.pc);
4406 sr_sal.pspace = get_frame_program_space (frame);
4407
4408 /* Do not specify what the fp should be when we stop since
4409 on some machines the prologue is where the new fp value
4410 is established. */
4411 insert_step_resume_breakpoint_at_sal (gdbarch,
4412 sr_sal, null_frame_id);
4413
4414 /* Restart without fiddling with the step ranges or
4415 other state. */
4416 keep_going (ecs);
4417 return;
4418 }
4419 }
4420
4421 stop_pc_sal = find_pc_line (stop_pc, 0);
4422
4423 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4424 the trampoline processing logic, however, there are some trampolines
4425 that have no names, so we should do trampoline handling first. */
4426 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4427 && ecs->stop_func_name == NULL
4428 && stop_pc_sal.line == 0)
4429 {
4430 if (debug_infrun)
4431 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4432
4433 /* The inferior just stepped into, or returned to, an
4434 undebuggable function (where there is no debugging information
4435 and no line number corresponding to the address where the
4436 inferior stopped). Since we want to skip this kind of code,
4437 we keep going until the inferior returns from this
4438 function - unless the user has asked us not to (via
4439 set step-mode) or we no longer know how to get back
4440 to the call site. */
4441 if (step_stop_if_no_debug
4442 || !frame_id_p (frame_unwind_caller_id (frame)))
4443 {
4444 /* If we have no line number and the step-stop-if-no-debug
4445 is set, we stop the step so that the user has a chance to
4446 switch in assembly mode. */
4447 ecs->event_thread->stop_step = 1;
4448 print_stop_reason (END_STEPPING_RANGE, 0);
4449 stop_stepping (ecs);
4450 return;
4451 }
4452 else
4453 {
4454 /* Set a breakpoint at callee's return address (the address
4455 at which the caller will resume). */
4456 insert_step_resume_breakpoint_at_caller (frame);
4457 keep_going (ecs);
4458 return;
4459 }
4460 }
4461
4462 if (ecs->event_thread->step_range_end == 1)
4463 {
4464 /* It is stepi or nexti. We always want to stop stepping after
4465 one instruction. */
4466 if (debug_infrun)
4467 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4468 ecs->event_thread->stop_step = 1;
4469 print_stop_reason (END_STEPPING_RANGE, 0);
4470 stop_stepping (ecs);
4471 return;
4472 }
4473
4474 if (stop_pc_sal.line == 0)
4475 {
4476 /* We have no line number information. That means to stop
4477 stepping (does this always happen right after one instruction,
4478 when we do "s" in a function with no line numbers,
4479 or can this happen as a result of a return or longjmp?). */
4480 if (debug_infrun)
4481 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4482 ecs->event_thread->stop_step = 1;
4483 print_stop_reason (END_STEPPING_RANGE, 0);
4484 stop_stepping (ecs);
4485 return;
4486 }
4487
4488 /* Look for "calls" to inlined functions, part one. If the inline
4489 frame machinery detected some skipped call sites, we have entered
4490 a new inline function. */
4491
4492 if (frame_id_eq (get_frame_id (get_current_frame ()),
4493 ecs->event_thread->step_frame_id)
4494 && inline_skipped_frames (ecs->ptid))
4495 {
4496 struct symtab_and_line call_sal;
4497
4498 if (debug_infrun)
4499 fprintf_unfiltered (gdb_stdlog,
4500 "infrun: stepped into inlined function\n");
4501
4502 find_frame_sal (get_current_frame (), &call_sal);
4503
4504 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4505 {
4506 /* For "step", we're going to stop. But if the call site
4507 for this inlined function is on the same source line as
4508 we were previously stepping, go down into the function
4509 first. Otherwise stop at the call site. */
4510
4511 if (call_sal.line == ecs->event_thread->current_line
4512 && call_sal.symtab == ecs->event_thread->current_symtab)
4513 step_into_inline_frame (ecs->ptid);
4514
4515 ecs->event_thread->stop_step = 1;
4516 print_stop_reason (END_STEPPING_RANGE, 0);
4517 stop_stepping (ecs);
4518 return;
4519 }
4520 else
4521 {
4522 /* For "next", we should stop at the call site if it is on a
4523 different source line. Otherwise continue through the
4524 inlined function. */
4525 if (call_sal.line == ecs->event_thread->current_line
4526 && call_sal.symtab == ecs->event_thread->current_symtab)
4527 keep_going (ecs);
4528 else
4529 {
4530 ecs->event_thread->stop_step = 1;
4531 print_stop_reason (END_STEPPING_RANGE, 0);
4532 stop_stepping (ecs);
4533 }
4534 return;
4535 }
4536 }
4537
4538 /* Look for "calls" to inlined functions, part two. If we are still
4539 in the same real function we were stepping through, but we have
4540 to go further up to find the exact frame ID, we are stepping
4541 through a more inlined call beyond its call site. */
4542
4543 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4544 && !frame_id_eq (get_frame_id (get_current_frame ()),
4545 ecs->event_thread->step_frame_id)
4546 && stepped_in_from (get_current_frame (),
4547 ecs->event_thread->step_frame_id))
4548 {
4549 if (debug_infrun)
4550 fprintf_unfiltered (gdb_stdlog,
4551 "infrun: stepping through inlined function\n");
4552
4553 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4554 keep_going (ecs);
4555 else
4556 {
4557 ecs->event_thread->stop_step = 1;
4558 print_stop_reason (END_STEPPING_RANGE, 0);
4559 stop_stepping (ecs);
4560 }
4561 return;
4562 }
4563
4564 if ((stop_pc == stop_pc_sal.pc)
4565 && (ecs->event_thread->current_line != stop_pc_sal.line
4566 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4567 {
4568 /* We are at the start of a different line. So stop. Note that
4569 we don't stop if we step into the middle of a different line.
4570 That is said to make things like for (;;) statements work
4571 better. */
4572 if (debug_infrun)
4573 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4574 ecs->event_thread->stop_step = 1;
4575 print_stop_reason (END_STEPPING_RANGE, 0);
4576 stop_stepping (ecs);
4577 return;
4578 }
4579
4580 /* We aren't done stepping.
4581
4582 Optimize by setting the stepping range to the line.
4583 (We might not be in the original line, but if we entered a
4584 new line in mid-statement, we continue stepping. This makes
4585 things like for(;;) statements work better.) */
4586
4587 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4588 ecs->event_thread->step_range_end = stop_pc_sal.end;
4589 set_step_info (frame, stop_pc_sal);
4590
4591 if (debug_infrun)
4592 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4593 keep_going (ecs);
4594 }
4595
4596 /* Is thread TP in the middle of single-stepping? */
4597
4598 static int
4599 currently_stepping (struct thread_info *tp)
4600 {
4601 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4602 || tp->trap_expected
4603 || tp->stepping_through_solib_after_catch
4604 || bpstat_should_step ());
4605 }
4606
4607 /* Returns true if any thread *but* the one passed in "data" is in the
4608 middle of stepping or of handling a "next". */
4609
4610 static int
4611 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4612 {
4613 if (tp == data)
4614 return 0;
4615
4616 return (tp->step_range_end
4617 || tp->trap_expected
4618 || tp->stepping_through_solib_after_catch);
4619 }
4620
4621 /* Inferior has stepped into a subroutine call with source code that
4622 we should not step over. Do step to the first line of code in
4623 it. */
4624
4625 static void
4626 handle_step_into_function (struct gdbarch *gdbarch,
4627 struct execution_control_state *ecs)
4628 {
4629 struct symtab *s;
4630 struct symtab_and_line stop_func_sal, sr_sal;
4631
4632 s = find_pc_symtab (stop_pc);
4633 if (s && s->language != language_asm)
4634 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4635 ecs->stop_func_start);
4636
4637 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4638 /* Use the step_resume_break to step until the end of the prologue,
4639 even if that involves jumps (as it seems to on the vax under
4640 4.2). */
4641 /* If the prologue ends in the middle of a source line, continue to
4642 the end of that source line (if it is still within the function).
4643 Otherwise, just go to end of prologue. */
4644 if (stop_func_sal.end
4645 && stop_func_sal.pc != ecs->stop_func_start
4646 && stop_func_sal.end < ecs->stop_func_end)
4647 ecs->stop_func_start = stop_func_sal.end;
4648
4649 /* Architectures which require breakpoint adjustment might not be able
4650 to place a breakpoint at the computed address. If so, the test
4651 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4652 ecs->stop_func_start to an address at which a breakpoint may be
4653 legitimately placed.
4654
4655 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4656 made, GDB will enter an infinite loop when stepping through
4657 optimized code consisting of VLIW instructions which contain
4658 subinstructions corresponding to different source lines. On
4659 FR-V, it's not permitted to place a breakpoint on any but the
4660 first subinstruction of a VLIW instruction. When a breakpoint is
4661 set, GDB will adjust the breakpoint address to the beginning of
4662 the VLIW instruction. Thus, we need to make the corresponding
4663 adjustment here when computing the stop address. */
4664
4665 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4666 {
4667 ecs->stop_func_start
4668 = gdbarch_adjust_breakpoint_address (gdbarch,
4669 ecs->stop_func_start);
4670 }
4671
4672 if (ecs->stop_func_start == stop_pc)
4673 {
4674 /* We are already there: stop now. */
4675 ecs->event_thread->stop_step = 1;
4676 print_stop_reason (END_STEPPING_RANGE, 0);
4677 stop_stepping (ecs);
4678 return;
4679 }
4680 else
4681 {
4682 /* Put the step-breakpoint there and go until there. */
4683 init_sal (&sr_sal); /* initialize to zeroes */
4684 sr_sal.pc = ecs->stop_func_start;
4685 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4686 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4687
4688 /* Do not specify what the fp should be when we stop since on
4689 some machines the prologue is where the new fp value is
4690 established. */
4691 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4692
4693 /* And make sure stepping stops right away then. */
4694 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4695 }
4696 keep_going (ecs);
4697 }
4698
4699 /* Inferior has stepped backward into a subroutine call with source
4700 code that we should not step over. Do step to the beginning of the
4701 last line of code in it. */
4702
4703 static void
4704 handle_step_into_function_backward (struct gdbarch *gdbarch,
4705 struct execution_control_state *ecs)
4706 {
4707 struct symtab *s;
4708 struct symtab_and_line stop_func_sal, sr_sal;
4709
4710 s = find_pc_symtab (stop_pc);
4711 if (s && s->language != language_asm)
4712 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4713 ecs->stop_func_start);
4714
4715 stop_func_sal = find_pc_line (stop_pc, 0);
4716
4717 /* OK, we're just going to keep stepping here. */
4718 if (stop_func_sal.pc == stop_pc)
4719 {
4720 /* We're there already. Just stop stepping now. */
4721 ecs->event_thread->stop_step = 1;
4722 print_stop_reason (END_STEPPING_RANGE, 0);
4723 stop_stepping (ecs);
4724 }
4725 else
4726 {
4727 /* Else just reset the step range and keep going.
4728 No step-resume breakpoint, they don't work for
4729 epilogues, which can have multiple entry paths. */
4730 ecs->event_thread->step_range_start = stop_func_sal.pc;
4731 ecs->event_thread->step_range_end = stop_func_sal.end;
4732 keep_going (ecs);
4733 }
4734 return;
4735 }
4736
4737 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4738 This is used to both functions and to skip over code. */
4739
4740 static void
4741 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4742 struct symtab_and_line sr_sal,
4743 struct frame_id sr_id)
4744 {
4745 /* There should never be more than one step-resume or longjmp-resume
4746 breakpoint per thread, so we should never be setting a new
4747 step_resume_breakpoint when one is already active. */
4748 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4749
4750 if (debug_infrun)
4751 fprintf_unfiltered (gdb_stdlog,
4752 "infrun: inserting step-resume breakpoint at %s\n",
4753 paddress (gdbarch, sr_sal.pc));
4754
4755 inferior_thread ()->step_resume_breakpoint
4756 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4757 }
4758
4759 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4760 to skip a potential signal handler.
4761
4762 This is called with the interrupted function's frame. The signal
4763 handler, when it returns, will resume the interrupted function at
4764 RETURN_FRAME.pc. */
4765
4766 static void
4767 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4768 {
4769 struct symtab_and_line sr_sal;
4770 struct gdbarch *gdbarch;
4771
4772 gdb_assert (return_frame != NULL);
4773 init_sal (&sr_sal); /* initialize to zeros */
4774
4775 gdbarch = get_frame_arch (return_frame);
4776 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4777 sr_sal.section = find_pc_overlay (sr_sal.pc);
4778 sr_sal.pspace = get_frame_program_space (return_frame);
4779
4780 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4781 get_stack_frame_id (return_frame));
4782 }
4783
4784 /* Similar to insert_step_resume_breakpoint_at_frame, except
4785 but a breakpoint at the previous frame's PC. This is used to
4786 skip a function after stepping into it (for "next" or if the called
4787 function has no debugging information).
4788
4789 The current function has almost always been reached by single
4790 stepping a call or return instruction. NEXT_FRAME belongs to the
4791 current function, and the breakpoint will be set at the caller's
4792 resume address.
4793
4794 This is a separate function rather than reusing
4795 insert_step_resume_breakpoint_at_frame in order to avoid
4796 get_prev_frame, which may stop prematurely (see the implementation
4797 of frame_unwind_caller_id for an example). */
4798
4799 static void
4800 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4801 {
4802 struct symtab_and_line sr_sal;
4803 struct gdbarch *gdbarch;
4804
4805 /* We shouldn't have gotten here if we don't know where the call site
4806 is. */
4807 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4808
4809 init_sal (&sr_sal); /* initialize to zeros */
4810
4811 gdbarch = frame_unwind_caller_arch (next_frame);
4812 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4813 frame_unwind_caller_pc (next_frame));
4814 sr_sal.section = find_pc_overlay (sr_sal.pc);
4815 sr_sal.pspace = frame_unwind_program_space (next_frame);
4816
4817 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4818 frame_unwind_caller_id (next_frame));
4819 }
4820
4821 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4822 new breakpoint at the target of a jmp_buf. The handling of
4823 longjmp-resume uses the same mechanisms used for handling
4824 "step-resume" breakpoints. */
4825
4826 static void
4827 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4828 {
4829 /* There should never be more than one step-resume or longjmp-resume
4830 breakpoint per thread, so we should never be setting a new
4831 longjmp_resume_breakpoint when one is already active. */
4832 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4833
4834 if (debug_infrun)
4835 fprintf_unfiltered (gdb_stdlog,
4836 "infrun: inserting longjmp-resume breakpoint at %s\n",
4837 paddress (gdbarch, pc));
4838
4839 inferior_thread ()->step_resume_breakpoint =
4840 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4841 }
4842
4843 static void
4844 stop_stepping (struct execution_control_state *ecs)
4845 {
4846 if (debug_infrun)
4847 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4848
4849 /* Let callers know we don't want to wait for the inferior anymore. */
4850 ecs->wait_some_more = 0;
4851 }
4852
4853 /* This function handles various cases where we need to continue
4854 waiting for the inferior. */
4855 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4856
4857 static void
4858 keep_going (struct execution_control_state *ecs)
4859 {
4860 /* Make sure normal_stop is called if we get a QUIT handled before
4861 reaching resume. */
4862 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
4863
4864 /* Save the pc before execution, to compare with pc after stop. */
4865 ecs->event_thread->prev_pc
4866 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4867
4868 /* If we did not do break;, it means we should keep running the
4869 inferior and not return to debugger. */
4870
4871 if (ecs->event_thread->trap_expected
4872 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4873 {
4874 /* We took a signal (which we are supposed to pass through to
4875 the inferior, else we'd not get here) and we haven't yet
4876 gotten our trap. Simply continue. */
4877
4878 discard_cleanups (old_cleanups);
4879 resume (currently_stepping (ecs->event_thread),
4880 ecs->event_thread->stop_signal);
4881 }
4882 else
4883 {
4884 /* Either the trap was not expected, but we are continuing
4885 anyway (the user asked that this signal be passed to the
4886 child)
4887 -- or --
4888 The signal was SIGTRAP, e.g. it was our signal, but we
4889 decided we should resume from it.
4890
4891 We're going to run this baby now!
4892
4893 Note that insert_breakpoints won't try to re-insert
4894 already inserted breakpoints. Therefore, we don't
4895 care if breakpoints were already inserted, or not. */
4896
4897 if (ecs->event_thread->stepping_over_breakpoint)
4898 {
4899 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4900 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4901 /* Since we can't do a displaced step, we have to remove
4902 the breakpoint while we step it. To keep things
4903 simple, we remove them all. */
4904 remove_breakpoints ();
4905 }
4906 else
4907 {
4908 struct gdb_exception e;
4909 /* Stop stepping when inserting breakpoints
4910 has failed. */
4911 TRY_CATCH (e, RETURN_MASK_ERROR)
4912 {
4913 insert_breakpoints ();
4914 }
4915 if (e.reason < 0)
4916 {
4917 exception_print (gdb_stderr, e);
4918 stop_stepping (ecs);
4919 return;
4920 }
4921 }
4922
4923 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4924
4925 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4926 specifies that such a signal should be delivered to the
4927 target program).
4928
4929 Typically, this would occure when a user is debugging a
4930 target monitor on a simulator: the target monitor sets a
4931 breakpoint; the simulator encounters this break-point and
4932 halts the simulation handing control to GDB; GDB, noteing
4933 that the break-point isn't valid, returns control back to the
4934 simulator; the simulator then delivers the hardware
4935 equivalent of a SIGNAL_TRAP to the program being debugged. */
4936
4937 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4938 && !signal_program[ecs->event_thread->stop_signal])
4939 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4940
4941 discard_cleanups (old_cleanups);
4942 resume (currently_stepping (ecs->event_thread),
4943 ecs->event_thread->stop_signal);
4944 }
4945
4946 prepare_to_wait (ecs);
4947 }
4948
4949 /* This function normally comes after a resume, before
4950 handle_inferior_event exits. It takes care of any last bits of
4951 housekeeping, and sets the all-important wait_some_more flag. */
4952
4953 static void
4954 prepare_to_wait (struct execution_control_state *ecs)
4955 {
4956 if (debug_infrun)
4957 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4958
4959 /* This is the old end of the while loop. Let everybody know we
4960 want to wait for the inferior some more and get called again
4961 soon. */
4962 ecs->wait_some_more = 1;
4963 }
4964
4965 /* Print why the inferior has stopped. We always print something when
4966 the inferior exits, or receives a signal. The rest of the cases are
4967 dealt with later on in normal_stop() and print_it_typical(). Ideally
4968 there should be a call to this function from handle_inferior_event()
4969 each time stop_stepping() is called.*/
4970 static void
4971 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4972 {
4973 switch (stop_reason)
4974 {
4975 case END_STEPPING_RANGE:
4976 /* We are done with a step/next/si/ni command. */
4977 /* For now print nothing. */
4978 /* Print a message only if not in the middle of doing a "step n"
4979 operation for n > 1 */
4980 if (!inferior_thread ()->step_multi
4981 || !inferior_thread ()->stop_step)
4982 if (ui_out_is_mi_like_p (uiout))
4983 ui_out_field_string
4984 (uiout, "reason",
4985 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4986 break;
4987 case SIGNAL_EXITED:
4988 /* The inferior was terminated by a signal. */
4989 annotate_signalled ();
4990 if (ui_out_is_mi_like_p (uiout))
4991 ui_out_field_string
4992 (uiout, "reason",
4993 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4994 ui_out_text (uiout, "\nProgram terminated with signal ");
4995 annotate_signal_name ();
4996 ui_out_field_string (uiout, "signal-name",
4997 target_signal_to_name (stop_info));
4998 annotate_signal_name_end ();
4999 ui_out_text (uiout, ", ");
5000 annotate_signal_string ();
5001 ui_out_field_string (uiout, "signal-meaning",
5002 target_signal_to_string (stop_info));
5003 annotate_signal_string_end ();
5004 ui_out_text (uiout, ".\n");
5005 ui_out_text (uiout, "The program no longer exists.\n");
5006 break;
5007 case EXITED:
5008 /* The inferior program is finished. */
5009 annotate_exited (stop_info);
5010 if (stop_info)
5011 {
5012 if (ui_out_is_mi_like_p (uiout))
5013 ui_out_field_string (uiout, "reason",
5014 async_reason_lookup (EXEC_ASYNC_EXITED));
5015 ui_out_text (uiout, "\nProgram exited with code ");
5016 ui_out_field_fmt (uiout, "exit-code", "0%o",
5017 (unsigned int) stop_info);
5018 ui_out_text (uiout, ".\n");
5019 }
5020 else
5021 {
5022 if (ui_out_is_mi_like_p (uiout))
5023 ui_out_field_string
5024 (uiout, "reason",
5025 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5026 ui_out_text (uiout, "\nProgram exited normally.\n");
5027 }
5028 /* Support the --return-child-result option. */
5029 return_child_result_value = stop_info;
5030 break;
5031 case SIGNAL_RECEIVED:
5032 /* Signal received. The signal table tells us to print about
5033 it. */
5034 annotate_signal ();
5035
5036 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5037 {
5038 struct thread_info *t = inferior_thread ();
5039
5040 ui_out_text (uiout, "\n[");
5041 ui_out_field_string (uiout, "thread-name",
5042 target_pid_to_str (t->ptid));
5043 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5044 ui_out_text (uiout, " stopped");
5045 }
5046 else
5047 {
5048 ui_out_text (uiout, "\nProgram received signal ");
5049 annotate_signal_name ();
5050 if (ui_out_is_mi_like_p (uiout))
5051 ui_out_field_string
5052 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5053 ui_out_field_string (uiout, "signal-name",
5054 target_signal_to_name (stop_info));
5055 annotate_signal_name_end ();
5056 ui_out_text (uiout, ", ");
5057 annotate_signal_string ();
5058 ui_out_field_string (uiout, "signal-meaning",
5059 target_signal_to_string (stop_info));
5060 annotate_signal_string_end ();
5061 }
5062 ui_out_text (uiout, ".\n");
5063 break;
5064 case NO_HISTORY:
5065 /* Reverse execution: target ran out of history info. */
5066 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5067 break;
5068 default:
5069 internal_error (__FILE__, __LINE__,
5070 _("print_stop_reason: unrecognized enum value"));
5071 break;
5072 }
5073 }
5074 \f
5075
5076 /* Here to return control to GDB when the inferior stops for real.
5077 Print appropriate messages, remove breakpoints, give terminal our modes.
5078
5079 STOP_PRINT_FRAME nonzero means print the executing frame
5080 (pc, function, args, file, line number and line text).
5081 BREAKPOINTS_FAILED nonzero means stop was due to error
5082 attempting to insert breakpoints. */
5083
5084 void
5085 normal_stop (void)
5086 {
5087 struct target_waitstatus last;
5088 ptid_t last_ptid;
5089 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5090
5091 get_last_target_status (&last_ptid, &last);
5092
5093 /* If an exception is thrown from this point on, make sure to
5094 propagate GDB's knowledge of the executing state to the
5095 frontend/user running state. A QUIT is an easy exception to see
5096 here, so do this before any filtered output. */
5097 if (!non_stop)
5098 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5099 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5100 && last.kind != TARGET_WAITKIND_EXITED)
5101 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5102
5103 /* In non-stop mode, we don't want GDB to switch threads behind the
5104 user's back, to avoid races where the user is typing a command to
5105 apply to thread x, but GDB switches to thread y before the user
5106 finishes entering the command. */
5107
5108 /* As with the notification of thread events, we want to delay
5109 notifying the user that we've switched thread context until
5110 the inferior actually stops.
5111
5112 There's no point in saying anything if the inferior has exited.
5113 Note that SIGNALLED here means "exited with a signal", not
5114 "received a signal". */
5115 if (!non_stop
5116 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5117 && target_has_execution
5118 && last.kind != TARGET_WAITKIND_SIGNALLED
5119 && last.kind != TARGET_WAITKIND_EXITED)
5120 {
5121 target_terminal_ours_for_output ();
5122 printf_filtered (_("[Switching to %s]\n"),
5123 target_pid_to_str (inferior_ptid));
5124 annotate_thread_changed ();
5125 previous_inferior_ptid = inferior_ptid;
5126 }
5127
5128 if (!breakpoints_always_inserted_mode () && target_has_execution)
5129 {
5130 if (remove_breakpoints ())
5131 {
5132 target_terminal_ours_for_output ();
5133 printf_filtered (_("\
5134 Cannot remove breakpoints because program is no longer writable.\n\
5135 Further execution is probably impossible.\n"));
5136 }
5137 }
5138
5139 /* If an auto-display called a function and that got a signal,
5140 delete that auto-display to avoid an infinite recursion. */
5141
5142 if (stopped_by_random_signal)
5143 disable_current_display ();
5144
5145 /* Don't print a message if in the middle of doing a "step n"
5146 operation for n > 1 */
5147 if (target_has_execution
5148 && last.kind != TARGET_WAITKIND_SIGNALLED
5149 && last.kind != TARGET_WAITKIND_EXITED
5150 && inferior_thread ()->step_multi
5151 && inferior_thread ()->stop_step)
5152 goto done;
5153
5154 target_terminal_ours ();
5155
5156 /* Set the current source location. This will also happen if we
5157 display the frame below, but the current SAL will be incorrect
5158 during a user hook-stop function. */
5159 if (has_stack_frames () && !stop_stack_dummy)
5160 set_current_sal_from_frame (get_current_frame (), 1);
5161
5162 /* Let the user/frontend see the threads as stopped. */
5163 do_cleanups (old_chain);
5164
5165 /* Look up the hook_stop and run it (CLI internally handles problem
5166 of stop_command's pre-hook not existing). */
5167 if (stop_command)
5168 catch_errors (hook_stop_stub, stop_command,
5169 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5170
5171 if (!has_stack_frames ())
5172 goto done;
5173
5174 if (last.kind == TARGET_WAITKIND_SIGNALLED
5175 || last.kind == TARGET_WAITKIND_EXITED)
5176 goto done;
5177
5178 /* Select innermost stack frame - i.e., current frame is frame 0,
5179 and current location is based on that.
5180 Don't do this on return from a stack dummy routine,
5181 or if the program has exited. */
5182
5183 if (!stop_stack_dummy)
5184 {
5185 select_frame (get_current_frame ());
5186
5187 /* Print current location without a level number, if
5188 we have changed functions or hit a breakpoint.
5189 Print source line if we have one.
5190 bpstat_print() contains the logic deciding in detail
5191 what to print, based on the event(s) that just occurred. */
5192
5193 /* If --batch-silent is enabled then there's no need to print the current
5194 source location, and to try risks causing an error message about
5195 missing source files. */
5196 if (stop_print_frame && !batch_silent)
5197 {
5198 int bpstat_ret;
5199 int source_flag;
5200 int do_frame_printing = 1;
5201 struct thread_info *tp = inferior_thread ();
5202
5203 bpstat_ret = bpstat_print (tp->stop_bpstat);
5204 switch (bpstat_ret)
5205 {
5206 case PRINT_UNKNOWN:
5207 /* If we had hit a shared library event breakpoint,
5208 bpstat_print would print out this message. If we hit
5209 an OS-level shared library event, do the same
5210 thing. */
5211 if (last.kind == TARGET_WAITKIND_LOADED)
5212 {
5213 printf_filtered (_("Stopped due to shared library event\n"));
5214 source_flag = SRC_LINE; /* something bogus */
5215 do_frame_printing = 0;
5216 break;
5217 }
5218
5219 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5220 (or should) carry around the function and does (or
5221 should) use that when doing a frame comparison. */
5222 if (tp->stop_step
5223 && frame_id_eq (tp->step_frame_id,
5224 get_frame_id (get_current_frame ()))
5225 && step_start_function == find_pc_function (stop_pc))
5226 source_flag = SRC_LINE; /* finished step, just print source line */
5227 else
5228 source_flag = SRC_AND_LOC; /* print location and source line */
5229 break;
5230 case PRINT_SRC_AND_LOC:
5231 source_flag = SRC_AND_LOC; /* print location and source line */
5232 break;
5233 case PRINT_SRC_ONLY:
5234 source_flag = SRC_LINE;
5235 break;
5236 case PRINT_NOTHING:
5237 source_flag = SRC_LINE; /* something bogus */
5238 do_frame_printing = 0;
5239 break;
5240 default:
5241 internal_error (__FILE__, __LINE__, _("Unknown value."));
5242 }
5243
5244 /* The behavior of this routine with respect to the source
5245 flag is:
5246 SRC_LINE: Print only source line
5247 LOCATION: Print only location
5248 SRC_AND_LOC: Print location and source line */
5249 if (do_frame_printing)
5250 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5251
5252 /* Display the auto-display expressions. */
5253 do_displays ();
5254 }
5255 }
5256
5257 /* Save the function value return registers, if we care.
5258 We might be about to restore their previous contents. */
5259 if (inferior_thread ()->proceed_to_finish)
5260 {
5261 /* This should not be necessary. */
5262 if (stop_registers)
5263 regcache_xfree (stop_registers);
5264
5265 /* NB: The copy goes through to the target picking up the value of
5266 all the registers. */
5267 stop_registers = regcache_dup (get_current_regcache ());
5268 }
5269
5270 if (stop_stack_dummy)
5271 {
5272 /* Pop the empty frame that contains the stack dummy.
5273 This also restores inferior state prior to the call
5274 (struct inferior_thread_state). */
5275 struct frame_info *frame = get_current_frame ();
5276 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5277 frame_pop (frame);
5278 /* frame_pop() calls reinit_frame_cache as the last thing it does
5279 which means there's currently no selected frame. We don't need
5280 to re-establish a selected frame if the dummy call returns normally,
5281 that will be done by restore_inferior_status. However, we do have
5282 to handle the case where the dummy call is returning after being
5283 stopped (e.g. the dummy call previously hit a breakpoint). We
5284 can't know which case we have so just always re-establish a
5285 selected frame here. */
5286 select_frame (get_current_frame ());
5287 }
5288
5289 done:
5290 annotate_stopped ();
5291
5292 /* Suppress the stop observer if we're in the middle of:
5293
5294 - a step n (n > 1), as there still more steps to be done.
5295
5296 - a "finish" command, as the observer will be called in
5297 finish_command_continuation, so it can include the inferior
5298 function's return value.
5299
5300 - calling an inferior function, as we pretend we inferior didn't
5301 run at all. The return value of the call is handled by the
5302 expression evaluator, through call_function_by_hand. */
5303
5304 if (!target_has_execution
5305 || last.kind == TARGET_WAITKIND_SIGNALLED
5306 || last.kind == TARGET_WAITKIND_EXITED
5307 || (!inferior_thread ()->step_multi
5308 && !(inferior_thread ()->stop_bpstat
5309 && inferior_thread ()->proceed_to_finish)
5310 && !inferior_thread ()->in_infcall))
5311 {
5312 if (!ptid_equal (inferior_ptid, null_ptid))
5313 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5314 stop_print_frame);
5315 else
5316 observer_notify_normal_stop (NULL, stop_print_frame);
5317 }
5318
5319 if (target_has_execution)
5320 {
5321 if (last.kind != TARGET_WAITKIND_SIGNALLED
5322 && last.kind != TARGET_WAITKIND_EXITED)
5323 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5324 Delete any breakpoint that is to be deleted at the next stop. */
5325 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5326 }
5327
5328 /* Try to get rid of automatically added inferiors that are no
5329 longer needed. Keeping those around slows down things linearly.
5330 Note that this never removes the current inferior. */
5331 prune_inferiors ();
5332 }
5333
5334 static int
5335 hook_stop_stub (void *cmd)
5336 {
5337 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5338 return (0);
5339 }
5340 \f
5341 int
5342 signal_stop_state (int signo)
5343 {
5344 return signal_stop[signo];
5345 }
5346
5347 int
5348 signal_print_state (int signo)
5349 {
5350 return signal_print[signo];
5351 }
5352
5353 int
5354 signal_pass_state (int signo)
5355 {
5356 return signal_program[signo];
5357 }
5358
5359 int
5360 signal_stop_update (int signo, int state)
5361 {
5362 int ret = signal_stop[signo];
5363 signal_stop[signo] = state;
5364 return ret;
5365 }
5366
5367 int
5368 signal_print_update (int signo, int state)
5369 {
5370 int ret = signal_print[signo];
5371 signal_print[signo] = state;
5372 return ret;
5373 }
5374
5375 int
5376 signal_pass_update (int signo, int state)
5377 {
5378 int ret = signal_program[signo];
5379 signal_program[signo] = state;
5380 return ret;
5381 }
5382
5383 static void
5384 sig_print_header (void)
5385 {
5386 printf_filtered (_("\
5387 Signal Stop\tPrint\tPass to program\tDescription\n"));
5388 }
5389
5390 static void
5391 sig_print_info (enum target_signal oursig)
5392 {
5393 const char *name = target_signal_to_name (oursig);
5394 int name_padding = 13 - strlen (name);
5395
5396 if (name_padding <= 0)
5397 name_padding = 0;
5398
5399 printf_filtered ("%s", name);
5400 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5401 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5402 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5403 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5404 printf_filtered ("%s\n", target_signal_to_string (oursig));
5405 }
5406
5407 /* Specify how various signals in the inferior should be handled. */
5408
5409 static void
5410 handle_command (char *args, int from_tty)
5411 {
5412 char **argv;
5413 int digits, wordlen;
5414 int sigfirst, signum, siglast;
5415 enum target_signal oursig;
5416 int allsigs;
5417 int nsigs;
5418 unsigned char *sigs;
5419 struct cleanup *old_chain;
5420
5421 if (args == NULL)
5422 {
5423 error_no_arg (_("signal to handle"));
5424 }
5425
5426 /* Allocate and zero an array of flags for which signals to handle. */
5427
5428 nsigs = (int) TARGET_SIGNAL_LAST;
5429 sigs = (unsigned char *) alloca (nsigs);
5430 memset (sigs, 0, nsigs);
5431
5432 /* Break the command line up into args. */
5433
5434 argv = gdb_buildargv (args);
5435 old_chain = make_cleanup_freeargv (argv);
5436
5437 /* Walk through the args, looking for signal oursigs, signal names, and
5438 actions. Signal numbers and signal names may be interspersed with
5439 actions, with the actions being performed for all signals cumulatively
5440 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5441
5442 while (*argv != NULL)
5443 {
5444 wordlen = strlen (*argv);
5445 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5446 {;
5447 }
5448 allsigs = 0;
5449 sigfirst = siglast = -1;
5450
5451 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5452 {
5453 /* Apply action to all signals except those used by the
5454 debugger. Silently skip those. */
5455 allsigs = 1;
5456 sigfirst = 0;
5457 siglast = nsigs - 1;
5458 }
5459 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5460 {
5461 SET_SIGS (nsigs, sigs, signal_stop);
5462 SET_SIGS (nsigs, sigs, signal_print);
5463 }
5464 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5465 {
5466 UNSET_SIGS (nsigs, sigs, signal_program);
5467 }
5468 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5469 {
5470 SET_SIGS (nsigs, sigs, signal_print);
5471 }
5472 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5473 {
5474 SET_SIGS (nsigs, sigs, signal_program);
5475 }
5476 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5477 {
5478 UNSET_SIGS (nsigs, sigs, signal_stop);
5479 }
5480 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5481 {
5482 SET_SIGS (nsigs, sigs, signal_program);
5483 }
5484 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5485 {
5486 UNSET_SIGS (nsigs, sigs, signal_print);
5487 UNSET_SIGS (nsigs, sigs, signal_stop);
5488 }
5489 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5490 {
5491 UNSET_SIGS (nsigs, sigs, signal_program);
5492 }
5493 else if (digits > 0)
5494 {
5495 /* It is numeric. The numeric signal refers to our own
5496 internal signal numbering from target.h, not to host/target
5497 signal number. This is a feature; users really should be
5498 using symbolic names anyway, and the common ones like
5499 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5500
5501 sigfirst = siglast = (int)
5502 target_signal_from_command (atoi (*argv));
5503 if ((*argv)[digits] == '-')
5504 {
5505 siglast = (int)
5506 target_signal_from_command (atoi ((*argv) + digits + 1));
5507 }
5508 if (sigfirst > siglast)
5509 {
5510 /* Bet he didn't figure we'd think of this case... */
5511 signum = sigfirst;
5512 sigfirst = siglast;
5513 siglast = signum;
5514 }
5515 }
5516 else
5517 {
5518 oursig = target_signal_from_name (*argv);
5519 if (oursig != TARGET_SIGNAL_UNKNOWN)
5520 {
5521 sigfirst = siglast = (int) oursig;
5522 }
5523 else
5524 {
5525 /* Not a number and not a recognized flag word => complain. */
5526 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5527 }
5528 }
5529
5530 /* If any signal numbers or symbol names were found, set flags for
5531 which signals to apply actions to. */
5532
5533 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5534 {
5535 switch ((enum target_signal) signum)
5536 {
5537 case TARGET_SIGNAL_TRAP:
5538 case TARGET_SIGNAL_INT:
5539 if (!allsigs && !sigs[signum])
5540 {
5541 if (query (_("%s is used by the debugger.\n\
5542 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5543 {
5544 sigs[signum] = 1;
5545 }
5546 else
5547 {
5548 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5549 gdb_flush (gdb_stdout);
5550 }
5551 }
5552 break;
5553 case TARGET_SIGNAL_0:
5554 case TARGET_SIGNAL_DEFAULT:
5555 case TARGET_SIGNAL_UNKNOWN:
5556 /* Make sure that "all" doesn't print these. */
5557 break;
5558 default:
5559 sigs[signum] = 1;
5560 break;
5561 }
5562 }
5563
5564 argv++;
5565 }
5566
5567 for (signum = 0; signum < nsigs; signum++)
5568 if (sigs[signum])
5569 {
5570 target_notice_signals (inferior_ptid);
5571
5572 if (from_tty)
5573 {
5574 /* Show the results. */
5575 sig_print_header ();
5576 for (; signum < nsigs; signum++)
5577 if (sigs[signum])
5578 sig_print_info (signum);
5579 }
5580
5581 break;
5582 }
5583
5584 do_cleanups (old_chain);
5585 }
5586
5587 static void
5588 xdb_handle_command (char *args, int from_tty)
5589 {
5590 char **argv;
5591 struct cleanup *old_chain;
5592
5593 if (args == NULL)
5594 error_no_arg (_("xdb command"));
5595
5596 /* Break the command line up into args. */
5597
5598 argv = gdb_buildargv (args);
5599 old_chain = make_cleanup_freeargv (argv);
5600 if (argv[1] != (char *) NULL)
5601 {
5602 char *argBuf;
5603 int bufLen;
5604
5605 bufLen = strlen (argv[0]) + 20;
5606 argBuf = (char *) xmalloc (bufLen);
5607 if (argBuf)
5608 {
5609 int validFlag = 1;
5610 enum target_signal oursig;
5611
5612 oursig = target_signal_from_name (argv[0]);
5613 memset (argBuf, 0, bufLen);
5614 if (strcmp (argv[1], "Q") == 0)
5615 sprintf (argBuf, "%s %s", argv[0], "noprint");
5616 else
5617 {
5618 if (strcmp (argv[1], "s") == 0)
5619 {
5620 if (!signal_stop[oursig])
5621 sprintf (argBuf, "%s %s", argv[0], "stop");
5622 else
5623 sprintf (argBuf, "%s %s", argv[0], "nostop");
5624 }
5625 else if (strcmp (argv[1], "i") == 0)
5626 {
5627 if (!signal_program[oursig])
5628 sprintf (argBuf, "%s %s", argv[0], "pass");
5629 else
5630 sprintf (argBuf, "%s %s", argv[0], "nopass");
5631 }
5632 else if (strcmp (argv[1], "r") == 0)
5633 {
5634 if (!signal_print[oursig])
5635 sprintf (argBuf, "%s %s", argv[0], "print");
5636 else
5637 sprintf (argBuf, "%s %s", argv[0], "noprint");
5638 }
5639 else
5640 validFlag = 0;
5641 }
5642 if (validFlag)
5643 handle_command (argBuf, from_tty);
5644 else
5645 printf_filtered (_("Invalid signal handling flag.\n"));
5646 if (argBuf)
5647 xfree (argBuf);
5648 }
5649 }
5650 do_cleanups (old_chain);
5651 }
5652
5653 /* Print current contents of the tables set by the handle command.
5654 It is possible we should just be printing signals actually used
5655 by the current target (but for things to work right when switching
5656 targets, all signals should be in the signal tables). */
5657
5658 static void
5659 signals_info (char *signum_exp, int from_tty)
5660 {
5661 enum target_signal oursig;
5662 sig_print_header ();
5663
5664 if (signum_exp)
5665 {
5666 /* First see if this is a symbol name. */
5667 oursig = target_signal_from_name (signum_exp);
5668 if (oursig == TARGET_SIGNAL_UNKNOWN)
5669 {
5670 /* No, try numeric. */
5671 oursig =
5672 target_signal_from_command (parse_and_eval_long (signum_exp));
5673 }
5674 sig_print_info (oursig);
5675 return;
5676 }
5677
5678 printf_filtered ("\n");
5679 /* These ugly casts brought to you by the native VAX compiler. */
5680 for (oursig = TARGET_SIGNAL_FIRST;
5681 (int) oursig < (int) TARGET_SIGNAL_LAST;
5682 oursig = (enum target_signal) ((int) oursig + 1))
5683 {
5684 QUIT;
5685
5686 if (oursig != TARGET_SIGNAL_UNKNOWN
5687 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5688 sig_print_info (oursig);
5689 }
5690
5691 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5692 }
5693
5694 /* The $_siginfo convenience variable is a bit special. We don't know
5695 for sure the type of the value until we actually have a chance to
5696 fetch the data. The type can change depending on gdbarch, so it it
5697 also dependent on which thread you have selected.
5698
5699 1. making $_siginfo be an internalvar that creates a new value on
5700 access.
5701
5702 2. making the value of $_siginfo be an lval_computed value. */
5703
5704 /* This function implements the lval_computed support for reading a
5705 $_siginfo value. */
5706
5707 static void
5708 siginfo_value_read (struct value *v)
5709 {
5710 LONGEST transferred;
5711
5712 transferred =
5713 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5714 NULL,
5715 value_contents_all_raw (v),
5716 value_offset (v),
5717 TYPE_LENGTH (value_type (v)));
5718
5719 if (transferred != TYPE_LENGTH (value_type (v)))
5720 error (_("Unable to read siginfo"));
5721 }
5722
5723 /* This function implements the lval_computed support for writing a
5724 $_siginfo value. */
5725
5726 static void
5727 siginfo_value_write (struct value *v, struct value *fromval)
5728 {
5729 LONGEST transferred;
5730
5731 transferred = target_write (&current_target,
5732 TARGET_OBJECT_SIGNAL_INFO,
5733 NULL,
5734 value_contents_all_raw (fromval),
5735 value_offset (v),
5736 TYPE_LENGTH (value_type (fromval)));
5737
5738 if (transferred != TYPE_LENGTH (value_type (fromval)))
5739 error (_("Unable to write siginfo"));
5740 }
5741
5742 static struct lval_funcs siginfo_value_funcs =
5743 {
5744 siginfo_value_read,
5745 siginfo_value_write
5746 };
5747
5748 /* Return a new value with the correct type for the siginfo object of
5749 the current thread using architecture GDBARCH. Return a void value
5750 if there's no object available. */
5751
5752 static struct value *
5753 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5754 {
5755 if (target_has_stack
5756 && !ptid_equal (inferior_ptid, null_ptid)
5757 && gdbarch_get_siginfo_type_p (gdbarch))
5758 {
5759 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5760 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5761 }
5762
5763 return allocate_value (builtin_type (gdbarch)->builtin_void);
5764 }
5765
5766 \f
5767 /* Inferior thread state.
5768 These are details related to the inferior itself, and don't include
5769 things like what frame the user had selected or what gdb was doing
5770 with the target at the time.
5771 For inferior function calls these are things we want to restore
5772 regardless of whether the function call successfully completes
5773 or the dummy frame has to be manually popped. */
5774
5775 struct inferior_thread_state
5776 {
5777 enum target_signal stop_signal;
5778 CORE_ADDR stop_pc;
5779 struct regcache *registers;
5780 };
5781
5782 struct inferior_thread_state *
5783 save_inferior_thread_state (void)
5784 {
5785 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5786 struct thread_info *tp = inferior_thread ();
5787
5788 inf_state->stop_signal = tp->stop_signal;
5789 inf_state->stop_pc = stop_pc;
5790
5791 inf_state->registers = regcache_dup (get_current_regcache ());
5792
5793 return inf_state;
5794 }
5795
5796 /* Restore inferior session state to INF_STATE. */
5797
5798 void
5799 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5800 {
5801 struct thread_info *tp = inferior_thread ();
5802
5803 tp->stop_signal = inf_state->stop_signal;
5804 stop_pc = inf_state->stop_pc;
5805
5806 /* The inferior can be gone if the user types "print exit(0)"
5807 (and perhaps other times). */
5808 if (target_has_execution)
5809 /* NB: The register write goes through to the target. */
5810 regcache_cpy (get_current_regcache (), inf_state->registers);
5811 regcache_xfree (inf_state->registers);
5812 xfree (inf_state);
5813 }
5814
5815 static void
5816 do_restore_inferior_thread_state_cleanup (void *state)
5817 {
5818 restore_inferior_thread_state (state);
5819 }
5820
5821 struct cleanup *
5822 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5823 {
5824 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5825 }
5826
5827 void
5828 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5829 {
5830 regcache_xfree (inf_state->registers);
5831 xfree (inf_state);
5832 }
5833
5834 struct regcache *
5835 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5836 {
5837 return inf_state->registers;
5838 }
5839
5840 /* Session related state for inferior function calls.
5841 These are the additional bits of state that need to be restored
5842 when an inferior function call successfully completes. */
5843
5844 struct inferior_status
5845 {
5846 bpstat stop_bpstat;
5847 int stop_step;
5848 int stop_stack_dummy;
5849 int stopped_by_random_signal;
5850 int stepping_over_breakpoint;
5851 CORE_ADDR step_range_start;
5852 CORE_ADDR step_range_end;
5853 struct frame_id step_frame_id;
5854 struct frame_id step_stack_frame_id;
5855 enum step_over_calls_kind step_over_calls;
5856 CORE_ADDR step_resume_break_address;
5857 int stop_after_trap;
5858 int stop_soon;
5859
5860 /* ID if the selected frame when the inferior function call was made. */
5861 struct frame_id selected_frame_id;
5862
5863 int proceed_to_finish;
5864 int in_infcall;
5865 };
5866
5867 /* Save all of the information associated with the inferior<==>gdb
5868 connection. */
5869
5870 struct inferior_status *
5871 save_inferior_status (void)
5872 {
5873 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5874 struct thread_info *tp = inferior_thread ();
5875 struct inferior *inf = current_inferior ();
5876
5877 inf_status->stop_step = tp->stop_step;
5878 inf_status->stop_stack_dummy = stop_stack_dummy;
5879 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5880 inf_status->stepping_over_breakpoint = tp->trap_expected;
5881 inf_status->step_range_start = tp->step_range_start;
5882 inf_status->step_range_end = tp->step_range_end;
5883 inf_status->step_frame_id = tp->step_frame_id;
5884 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5885 inf_status->step_over_calls = tp->step_over_calls;
5886 inf_status->stop_after_trap = stop_after_trap;
5887 inf_status->stop_soon = inf->stop_soon;
5888 /* Save original bpstat chain here; replace it with copy of chain.
5889 If caller's caller is walking the chain, they'll be happier if we
5890 hand them back the original chain when restore_inferior_status is
5891 called. */
5892 inf_status->stop_bpstat = tp->stop_bpstat;
5893 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5894 inf_status->proceed_to_finish = tp->proceed_to_finish;
5895 inf_status->in_infcall = tp->in_infcall;
5896
5897 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5898
5899 return inf_status;
5900 }
5901
5902 static int
5903 restore_selected_frame (void *args)
5904 {
5905 struct frame_id *fid = (struct frame_id *) args;
5906 struct frame_info *frame;
5907
5908 frame = frame_find_by_id (*fid);
5909
5910 /* If inf_status->selected_frame_id is NULL, there was no previously
5911 selected frame. */
5912 if (frame == NULL)
5913 {
5914 warning (_("Unable to restore previously selected frame."));
5915 return 0;
5916 }
5917
5918 select_frame (frame);
5919
5920 return (1);
5921 }
5922
5923 /* Restore inferior session state to INF_STATUS. */
5924
5925 void
5926 restore_inferior_status (struct inferior_status *inf_status)
5927 {
5928 struct thread_info *tp = inferior_thread ();
5929 struct inferior *inf = current_inferior ();
5930
5931 tp->stop_step = inf_status->stop_step;
5932 stop_stack_dummy = inf_status->stop_stack_dummy;
5933 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5934 tp->trap_expected = inf_status->stepping_over_breakpoint;
5935 tp->step_range_start = inf_status->step_range_start;
5936 tp->step_range_end = inf_status->step_range_end;
5937 tp->step_frame_id = inf_status->step_frame_id;
5938 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5939 tp->step_over_calls = inf_status->step_over_calls;
5940 stop_after_trap = inf_status->stop_after_trap;
5941 inf->stop_soon = inf_status->stop_soon;
5942 bpstat_clear (&tp->stop_bpstat);
5943 tp->stop_bpstat = inf_status->stop_bpstat;
5944 inf_status->stop_bpstat = NULL;
5945 tp->proceed_to_finish = inf_status->proceed_to_finish;
5946 tp->in_infcall = inf_status->in_infcall;
5947
5948 if (target_has_stack)
5949 {
5950 /* The point of catch_errors is that if the stack is clobbered,
5951 walking the stack might encounter a garbage pointer and
5952 error() trying to dereference it. */
5953 if (catch_errors
5954 (restore_selected_frame, &inf_status->selected_frame_id,
5955 "Unable to restore previously selected frame:\n",
5956 RETURN_MASK_ERROR) == 0)
5957 /* Error in restoring the selected frame. Select the innermost
5958 frame. */
5959 select_frame (get_current_frame ());
5960 }
5961
5962 xfree (inf_status);
5963 }
5964
5965 static void
5966 do_restore_inferior_status_cleanup (void *sts)
5967 {
5968 restore_inferior_status (sts);
5969 }
5970
5971 struct cleanup *
5972 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5973 {
5974 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5975 }
5976
5977 void
5978 discard_inferior_status (struct inferior_status *inf_status)
5979 {
5980 /* See save_inferior_status for info on stop_bpstat. */
5981 bpstat_clear (&inf_status->stop_bpstat);
5982 xfree (inf_status);
5983 }
5984 \f
5985 int
5986 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5987 {
5988 struct target_waitstatus last;
5989 ptid_t last_ptid;
5990
5991 get_last_target_status (&last_ptid, &last);
5992
5993 if (last.kind != TARGET_WAITKIND_FORKED)
5994 return 0;
5995
5996 if (!ptid_equal (last_ptid, pid))
5997 return 0;
5998
5999 *child_pid = last.value.related_pid;
6000 return 1;
6001 }
6002
6003 int
6004 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6005 {
6006 struct target_waitstatus last;
6007 ptid_t last_ptid;
6008
6009 get_last_target_status (&last_ptid, &last);
6010
6011 if (last.kind != TARGET_WAITKIND_VFORKED)
6012 return 0;
6013
6014 if (!ptid_equal (last_ptid, pid))
6015 return 0;
6016
6017 *child_pid = last.value.related_pid;
6018 return 1;
6019 }
6020
6021 int
6022 inferior_has_execd (ptid_t pid, char **execd_pathname)
6023 {
6024 struct target_waitstatus last;
6025 ptid_t last_ptid;
6026
6027 get_last_target_status (&last_ptid, &last);
6028
6029 if (last.kind != TARGET_WAITKIND_EXECD)
6030 return 0;
6031
6032 if (!ptid_equal (last_ptid, pid))
6033 return 0;
6034
6035 *execd_pathname = xstrdup (last.value.execd_pathname);
6036 return 1;
6037 }
6038
6039 int
6040 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6041 {
6042 struct target_waitstatus last;
6043 ptid_t last_ptid;
6044
6045 get_last_target_status (&last_ptid, &last);
6046
6047 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6048 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6049 return 0;
6050
6051 if (!ptid_equal (last_ptid, pid))
6052 return 0;
6053
6054 *syscall_number = last.value.syscall_number;
6055 return 1;
6056 }
6057
6058 /* Oft used ptids */
6059 ptid_t null_ptid;
6060 ptid_t minus_one_ptid;
6061
6062 /* Create a ptid given the necessary PID, LWP, and TID components. */
6063
6064 ptid_t
6065 ptid_build (int pid, long lwp, long tid)
6066 {
6067 ptid_t ptid;
6068
6069 ptid.pid = pid;
6070 ptid.lwp = lwp;
6071 ptid.tid = tid;
6072 return ptid;
6073 }
6074
6075 /* Create a ptid from just a pid. */
6076
6077 ptid_t
6078 pid_to_ptid (int pid)
6079 {
6080 return ptid_build (pid, 0, 0);
6081 }
6082
6083 /* Fetch the pid (process id) component from a ptid. */
6084
6085 int
6086 ptid_get_pid (ptid_t ptid)
6087 {
6088 return ptid.pid;
6089 }
6090
6091 /* Fetch the lwp (lightweight process) component from a ptid. */
6092
6093 long
6094 ptid_get_lwp (ptid_t ptid)
6095 {
6096 return ptid.lwp;
6097 }
6098
6099 /* Fetch the tid (thread id) component from a ptid. */
6100
6101 long
6102 ptid_get_tid (ptid_t ptid)
6103 {
6104 return ptid.tid;
6105 }
6106
6107 /* ptid_equal() is used to test equality of two ptids. */
6108
6109 int
6110 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6111 {
6112 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6113 && ptid1.tid == ptid2.tid);
6114 }
6115
6116 /* Returns true if PTID represents a process. */
6117
6118 int
6119 ptid_is_pid (ptid_t ptid)
6120 {
6121 if (ptid_equal (minus_one_ptid, ptid))
6122 return 0;
6123 if (ptid_equal (null_ptid, ptid))
6124 return 0;
6125
6126 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6127 }
6128
6129 /* restore_inferior_ptid() will be used by the cleanup machinery
6130 to restore the inferior_ptid value saved in a call to
6131 save_inferior_ptid(). */
6132
6133 static void
6134 restore_inferior_ptid (void *arg)
6135 {
6136 ptid_t *saved_ptid_ptr = arg;
6137 inferior_ptid = *saved_ptid_ptr;
6138 xfree (arg);
6139 }
6140
6141 /* Save the value of inferior_ptid so that it may be restored by a
6142 later call to do_cleanups(). Returns the struct cleanup pointer
6143 needed for later doing the cleanup. */
6144
6145 struct cleanup *
6146 save_inferior_ptid (void)
6147 {
6148 ptid_t *saved_ptid_ptr;
6149
6150 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6151 *saved_ptid_ptr = inferior_ptid;
6152 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6153 }
6154 \f
6155
6156 /* User interface for reverse debugging:
6157 Set exec-direction / show exec-direction commands
6158 (returns error unless target implements to_set_exec_direction method). */
6159
6160 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6161 static const char exec_forward[] = "forward";
6162 static const char exec_reverse[] = "reverse";
6163 static const char *exec_direction = exec_forward;
6164 static const char *exec_direction_names[] = {
6165 exec_forward,
6166 exec_reverse,
6167 NULL
6168 };
6169
6170 static void
6171 set_exec_direction_func (char *args, int from_tty,
6172 struct cmd_list_element *cmd)
6173 {
6174 if (target_can_execute_reverse)
6175 {
6176 if (!strcmp (exec_direction, exec_forward))
6177 execution_direction = EXEC_FORWARD;
6178 else if (!strcmp (exec_direction, exec_reverse))
6179 execution_direction = EXEC_REVERSE;
6180 }
6181 }
6182
6183 static void
6184 show_exec_direction_func (struct ui_file *out, int from_tty,
6185 struct cmd_list_element *cmd, const char *value)
6186 {
6187 switch (execution_direction) {
6188 case EXEC_FORWARD:
6189 fprintf_filtered (out, _("Forward.\n"));
6190 break;
6191 case EXEC_REVERSE:
6192 fprintf_filtered (out, _("Reverse.\n"));
6193 break;
6194 case EXEC_ERROR:
6195 default:
6196 fprintf_filtered (out,
6197 _("Forward (target `%s' does not support exec-direction).\n"),
6198 target_shortname);
6199 break;
6200 }
6201 }
6202
6203 /* User interface for non-stop mode. */
6204
6205 int non_stop = 0;
6206 static int non_stop_1 = 0;
6207
6208 static void
6209 set_non_stop (char *args, int from_tty,
6210 struct cmd_list_element *c)
6211 {
6212 if (target_has_execution)
6213 {
6214 non_stop_1 = non_stop;
6215 error (_("Cannot change this setting while the inferior is running."));
6216 }
6217
6218 non_stop = non_stop_1;
6219 }
6220
6221 static void
6222 show_non_stop (struct ui_file *file, int from_tty,
6223 struct cmd_list_element *c, const char *value)
6224 {
6225 fprintf_filtered (file,
6226 _("Controlling the inferior in non-stop mode is %s.\n"),
6227 value);
6228 }
6229
6230 static void
6231 show_schedule_multiple (struct ui_file *file, int from_tty,
6232 struct cmd_list_element *c, const char *value)
6233 {
6234 fprintf_filtered (file, _("\
6235 Resuming the execution of threads of all processes is %s.\n"), value);
6236 }
6237
6238 void
6239 _initialize_infrun (void)
6240 {
6241 int i;
6242 int numsigs;
6243 struct cmd_list_element *c;
6244
6245 add_info ("signals", signals_info, _("\
6246 What debugger does when program gets various signals.\n\
6247 Specify a signal as argument to print info on that signal only."));
6248 add_info_alias ("handle", "signals", 0);
6249
6250 add_com ("handle", class_run, handle_command, _("\
6251 Specify how to handle a signal.\n\
6252 Args are signals and actions to apply to those signals.\n\
6253 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6254 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6255 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6256 The special arg \"all\" is recognized to mean all signals except those\n\
6257 used by the debugger, typically SIGTRAP and SIGINT.\n\
6258 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6259 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6260 Stop means reenter debugger if this signal happens (implies print).\n\
6261 Print means print a message if this signal happens.\n\
6262 Pass means let program see this signal; otherwise program doesn't know.\n\
6263 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6264 Pass and Stop may be combined."));
6265 if (xdb_commands)
6266 {
6267 add_com ("lz", class_info, signals_info, _("\
6268 What debugger does when program gets various signals.\n\
6269 Specify a signal as argument to print info on that signal only."));
6270 add_com ("z", class_run, xdb_handle_command, _("\
6271 Specify how to handle a signal.\n\
6272 Args are signals and actions to apply to those signals.\n\
6273 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6274 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6275 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6276 The special arg \"all\" is recognized to mean all signals except those\n\
6277 used by the debugger, typically SIGTRAP and SIGINT.\n\
6278 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6279 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6280 nopass), \"Q\" (noprint)\n\
6281 Stop means reenter debugger if this signal happens (implies print).\n\
6282 Print means print a message if this signal happens.\n\
6283 Pass means let program see this signal; otherwise program doesn't know.\n\
6284 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6285 Pass and Stop may be combined."));
6286 }
6287
6288 if (!dbx_commands)
6289 stop_command = add_cmd ("stop", class_obscure,
6290 not_just_help_class_command, _("\
6291 There is no `stop' command, but you can set a hook on `stop'.\n\
6292 This allows you to set a list of commands to be run each time execution\n\
6293 of the program stops."), &cmdlist);
6294
6295 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6296 Set inferior debugging."), _("\
6297 Show inferior debugging."), _("\
6298 When non-zero, inferior specific debugging is enabled."),
6299 NULL,
6300 show_debug_infrun,
6301 &setdebuglist, &showdebuglist);
6302
6303 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6304 Set displaced stepping debugging."), _("\
6305 Show displaced stepping debugging."), _("\
6306 When non-zero, displaced stepping specific debugging is enabled."),
6307 NULL,
6308 show_debug_displaced,
6309 &setdebuglist, &showdebuglist);
6310
6311 add_setshow_boolean_cmd ("non-stop", no_class,
6312 &non_stop_1, _("\
6313 Set whether gdb controls the inferior in non-stop mode."), _("\
6314 Show whether gdb controls the inferior in non-stop mode."), _("\
6315 When debugging a multi-threaded program and this setting is\n\
6316 off (the default, also called all-stop mode), when one thread stops\n\
6317 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6318 all other threads in the program while you interact with the thread of\n\
6319 interest. When you continue or step a thread, you can allow the other\n\
6320 threads to run, or have them remain stopped, but while you inspect any\n\
6321 thread's state, all threads stop.\n\
6322 \n\
6323 In non-stop mode, when one thread stops, other threads can continue\n\
6324 to run freely. You'll be able to step each thread independently,\n\
6325 leave it stopped or free to run as needed."),
6326 set_non_stop,
6327 show_non_stop,
6328 &setlist,
6329 &showlist);
6330
6331 numsigs = (int) TARGET_SIGNAL_LAST;
6332 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6333 signal_print = (unsigned char *)
6334 xmalloc (sizeof (signal_print[0]) * numsigs);
6335 signal_program = (unsigned char *)
6336 xmalloc (sizeof (signal_program[0]) * numsigs);
6337 for (i = 0; i < numsigs; i++)
6338 {
6339 signal_stop[i] = 1;
6340 signal_print[i] = 1;
6341 signal_program[i] = 1;
6342 }
6343
6344 /* Signals caused by debugger's own actions
6345 should not be given to the program afterwards. */
6346 signal_program[TARGET_SIGNAL_TRAP] = 0;
6347 signal_program[TARGET_SIGNAL_INT] = 0;
6348
6349 /* Signals that are not errors should not normally enter the debugger. */
6350 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6351 signal_print[TARGET_SIGNAL_ALRM] = 0;
6352 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6353 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6354 signal_stop[TARGET_SIGNAL_PROF] = 0;
6355 signal_print[TARGET_SIGNAL_PROF] = 0;
6356 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6357 signal_print[TARGET_SIGNAL_CHLD] = 0;
6358 signal_stop[TARGET_SIGNAL_IO] = 0;
6359 signal_print[TARGET_SIGNAL_IO] = 0;
6360 signal_stop[TARGET_SIGNAL_POLL] = 0;
6361 signal_print[TARGET_SIGNAL_POLL] = 0;
6362 signal_stop[TARGET_SIGNAL_URG] = 0;
6363 signal_print[TARGET_SIGNAL_URG] = 0;
6364 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6365 signal_print[TARGET_SIGNAL_WINCH] = 0;
6366
6367 /* These signals are used internally by user-level thread
6368 implementations. (See signal(5) on Solaris.) Like the above
6369 signals, a healthy program receives and handles them as part of
6370 its normal operation. */
6371 signal_stop[TARGET_SIGNAL_LWP] = 0;
6372 signal_print[TARGET_SIGNAL_LWP] = 0;
6373 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6374 signal_print[TARGET_SIGNAL_WAITING] = 0;
6375 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6376 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6377
6378 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6379 &stop_on_solib_events, _("\
6380 Set stopping for shared library events."), _("\
6381 Show stopping for shared library events."), _("\
6382 If nonzero, gdb will give control to the user when the dynamic linker\n\
6383 notifies gdb of shared library events. The most common event of interest\n\
6384 to the user would be loading/unloading of a new library."),
6385 NULL,
6386 show_stop_on_solib_events,
6387 &setlist, &showlist);
6388
6389 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6390 follow_fork_mode_kind_names,
6391 &follow_fork_mode_string, _("\
6392 Set debugger response to a program call of fork or vfork."), _("\
6393 Show debugger response to a program call of fork or vfork."), _("\
6394 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6395 parent - the original process is debugged after a fork\n\
6396 child - the new process is debugged after a fork\n\
6397 The unfollowed process will continue to run.\n\
6398 By default, the debugger will follow the parent process."),
6399 NULL,
6400 show_follow_fork_mode_string,
6401 &setlist, &showlist);
6402
6403 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6404 follow_exec_mode_names,
6405 &follow_exec_mode_string, _("\
6406 Set debugger response to a program call of exec."), _("\
6407 Show debugger response to a program call of exec."), _("\
6408 An exec call replaces the program image of a process.\n\
6409 \n\
6410 follow-exec-mode can be:\n\
6411 \n\
6412 new - the debugger creates a new inferior and rebinds the process \n\
6413 to this new inferior. The program the process was running before\n\
6414 the exec call can be restarted afterwards by restarting the original\n\
6415 inferior.\n\
6416 \n\
6417 same - the debugger keeps the process bound to the same inferior.\n\
6418 The new executable image replaces the previous executable loaded in\n\
6419 the inferior. Restarting the inferior after the exec call restarts\n\
6420 the executable the process was running after the exec call.\n\
6421 \n\
6422 By default, the debugger will use the same inferior."),
6423 NULL,
6424 show_follow_exec_mode_string,
6425 &setlist, &showlist);
6426
6427 add_setshow_enum_cmd ("scheduler-locking", class_run,
6428 scheduler_enums, &scheduler_mode, _("\
6429 Set mode for locking scheduler during execution."), _("\
6430 Show mode for locking scheduler during execution."), _("\
6431 off == no locking (threads may preempt at any time)\n\
6432 on == full locking (no thread except the current thread may run)\n\
6433 step == scheduler locked during every single-step operation.\n\
6434 In this mode, no other thread may run during a step command.\n\
6435 Other threads may run while stepping over a function call ('next')."),
6436 set_schedlock_func, /* traps on target vector */
6437 show_scheduler_mode,
6438 &setlist, &showlist);
6439
6440 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6441 Set mode for resuming threads of all processes."), _("\
6442 Show mode for resuming threads of all processes."), _("\
6443 When on, execution commands (such as 'continue' or 'next') resume all\n\
6444 threads of all processes. When off (which is the default), execution\n\
6445 commands only resume the threads of the current process. The set of\n\
6446 threads that are resumed is further refined by the scheduler-locking\n\
6447 mode (see help set scheduler-locking)."),
6448 NULL,
6449 show_schedule_multiple,
6450 &setlist, &showlist);
6451
6452 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6453 Set mode of the step operation."), _("\
6454 Show mode of the step operation."), _("\
6455 When set, doing a step over a function without debug line information\n\
6456 will stop at the first instruction of that function. Otherwise, the\n\
6457 function is skipped and the step command stops at a different source line."),
6458 NULL,
6459 show_step_stop_if_no_debug,
6460 &setlist, &showlist);
6461
6462 add_setshow_enum_cmd ("displaced-stepping", class_run,
6463 can_use_displaced_stepping_enum,
6464 &can_use_displaced_stepping, _("\
6465 Set debugger's willingness to use displaced stepping."), _("\
6466 Show debugger's willingness to use displaced stepping."), _("\
6467 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6468 supported by the target architecture. If off, gdb will not use displaced\n\
6469 stepping to step over breakpoints, even if such is supported by the target\n\
6470 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6471 if the target architecture supports it and non-stop mode is active, but will not\n\
6472 use it in all-stop mode (see help set non-stop)."),
6473 NULL,
6474 show_can_use_displaced_stepping,
6475 &setlist, &showlist);
6476
6477 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6478 &exec_direction, _("Set direction of execution.\n\
6479 Options are 'forward' or 'reverse'."),
6480 _("Show direction of execution (forward/reverse)."),
6481 _("Tells gdb whether to execute forward or backward."),
6482 set_exec_direction_func, show_exec_direction_func,
6483 &setlist, &showlist);
6484
6485 /* Set/show detach-on-fork: user-settable mode. */
6486
6487 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6488 Set whether gdb will detach the child of a fork."), _("\
6489 Show whether gdb will detach the child of a fork."), _("\
6490 Tells gdb whether to detach the child of a fork."),
6491 NULL, NULL, &setlist, &showlist);
6492
6493 /* ptid initializations */
6494 null_ptid = ptid_build (0, 0, 0);
6495 minus_one_ptid = ptid_build (-1, 0, 0);
6496 inferior_ptid = null_ptid;
6497 target_last_wait_ptid = minus_one_ptid;
6498 displaced_step_ptid = null_ptid;
6499
6500 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6501 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6502 observer_attach_thread_exit (infrun_thread_thread_exit);
6503
6504 /* Explicitly create without lookup, since that tries to create a
6505 value with a void typed value, and when we get here, gdbarch
6506 isn't initialized yet. At this point, we're quite sure there
6507 isn't another convenience variable of the same name. */
6508 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6509 }
This page took 0.179193 seconds and 4 git commands to generate.