gdb
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Convert the #defines into values. This is temporary until wfi control
183 flow is completely sorted out. */
184
185 #ifndef CANNOT_STEP_HW_WATCHPOINTS
186 #define CANNOT_STEP_HW_WATCHPOINTS 0
187 #else
188 #undef CANNOT_STEP_HW_WATCHPOINTS
189 #define CANNOT_STEP_HW_WATCHPOINTS 1
190 #endif
191
192 /* Tables of how to react to signals; the user sets them. */
193
194 static unsigned char *signal_stop;
195 static unsigned char *signal_print;
196 static unsigned char *signal_program;
197
198 #define SET_SIGS(nsigs,sigs,flags) \
199 do { \
200 int signum = (nsigs); \
201 while (signum-- > 0) \
202 if ((sigs)[signum]) \
203 (flags)[signum] = 1; \
204 } while (0)
205
206 #define UNSET_SIGS(nsigs,sigs,flags) \
207 do { \
208 int signum = (nsigs); \
209 while (signum-- > 0) \
210 if ((sigs)[signum]) \
211 (flags)[signum] = 0; \
212 } while (0)
213
214 /* Value to pass to target_resume() to cause all threads to resume */
215
216 #define RESUME_ALL minus_one_ptid
217
218 /* Command list pointer for the "stop" placeholder. */
219
220 static struct cmd_list_element *stop_command;
221
222 /* Function inferior was in as of last step command. */
223
224 static struct symbol *step_start_function;
225
226 /* Nonzero if we want to give control to the user when we're notified
227 of shared library events by the dynamic linker. */
228 static int stop_on_solib_events;
229 static void
230 show_stop_on_solib_events (struct ui_file *file, int from_tty,
231 struct cmd_list_element *c, const char *value)
232 {
233 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
234 value);
235 }
236
237 /* Nonzero means expecting a trace trap
238 and should stop the inferior and return silently when it happens. */
239
240 int stop_after_trap;
241
242 /* Save register contents here when executing a "finish" command or are
243 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
244 Thus this contains the return value from the called function (assuming
245 values are returned in a register). */
246
247 struct regcache *stop_registers;
248
249 /* Nonzero after stop if current stack frame should be printed. */
250
251 static int stop_print_frame;
252
253 /* This is a cached copy of the pid/waitstatus of the last event
254 returned by target_wait()/deprecated_target_wait_hook(). This
255 information is returned by get_last_target_status(). */
256 static ptid_t target_last_wait_ptid;
257 static struct target_waitstatus target_last_waitstatus;
258
259 static void context_switch (ptid_t ptid);
260
261 void init_thread_stepping_state (struct thread_info *tss);
262
263 void init_infwait_state (void);
264
265 static const char follow_fork_mode_child[] = "child";
266 static const char follow_fork_mode_parent[] = "parent";
267
268 static const char *follow_fork_mode_kind_names[] = {
269 follow_fork_mode_child,
270 follow_fork_mode_parent,
271 NULL
272 };
273
274 static const char *follow_fork_mode_string = follow_fork_mode_parent;
275 static void
276 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
277 struct cmd_list_element *c, const char *value)
278 {
279 fprintf_filtered (file, _("\
280 Debugger response to a program call of fork or vfork is \"%s\".\n"),
281 value);
282 }
283 \f
284
285 /* Tell the target to follow the fork we're stopped at. Returns true
286 if the inferior should be resumed; false, if the target for some
287 reason decided it's best not to resume. */
288
289 static int
290 follow_fork (void)
291 {
292 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
293 int should_resume = 1;
294 struct thread_info *tp;
295
296 /* Copy user stepping state to the new inferior thread. FIXME: the
297 followed fork child thread should have a copy of most of the
298 parent thread structure's run control related fields, not just these.
299 Initialized to avoid "may be used uninitialized" warnings from gcc. */
300 struct breakpoint *step_resume_breakpoint = NULL;
301 CORE_ADDR step_range_start = 0;
302 CORE_ADDR step_range_end = 0;
303 struct frame_id step_frame_id = { 0 };
304
305 if (!non_stop)
306 {
307 ptid_t wait_ptid;
308 struct target_waitstatus wait_status;
309
310 /* Get the last target status returned by target_wait(). */
311 get_last_target_status (&wait_ptid, &wait_status);
312
313 /* If not stopped at a fork event, then there's nothing else to
314 do. */
315 if (wait_status.kind != TARGET_WAITKIND_FORKED
316 && wait_status.kind != TARGET_WAITKIND_VFORKED)
317 return 1;
318
319 /* Check if we switched over from WAIT_PTID, since the event was
320 reported. */
321 if (!ptid_equal (wait_ptid, minus_one_ptid)
322 && !ptid_equal (inferior_ptid, wait_ptid))
323 {
324 /* We did. Switch back to WAIT_PTID thread, to tell the
325 target to follow it (in either direction). We'll
326 afterwards refuse to resume, and inform the user what
327 happened. */
328 switch_to_thread (wait_ptid);
329 should_resume = 0;
330 }
331 }
332
333 tp = inferior_thread ();
334
335 /* If there were any forks/vforks that were caught and are now to be
336 followed, then do so now. */
337 switch (tp->pending_follow.kind)
338 {
339 case TARGET_WAITKIND_FORKED:
340 case TARGET_WAITKIND_VFORKED:
341 {
342 ptid_t parent, child;
343
344 /* If the user did a next/step, etc, over a fork call,
345 preserve the stepping state in the fork child. */
346 if (follow_child && should_resume)
347 {
348 step_resume_breakpoint
349 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
350 step_range_start = tp->step_range_start;
351 step_range_end = tp->step_range_end;
352 step_frame_id = tp->step_frame_id;
353
354 /* For now, delete the parent's sr breakpoint, otherwise,
355 parent/child sr breakpoints are considered duplicates,
356 and the child version will not be installed. Remove
357 this when the breakpoints module becomes aware of
358 inferiors and address spaces. */
359 delete_step_resume_breakpoint (tp);
360 tp->step_range_start = 0;
361 tp->step_range_end = 0;
362 tp->step_frame_id = null_frame_id;
363 }
364
365 parent = inferior_ptid;
366 child = tp->pending_follow.value.related_pid;
367
368 /* Tell the target to do whatever is necessary to follow
369 either parent or child. */
370 if (target_follow_fork (follow_child))
371 {
372 /* Target refused to follow, or there's some other reason
373 we shouldn't resume. */
374 should_resume = 0;
375 }
376 else
377 {
378 /* This pending follow fork event is now handled, one way
379 or another. The previous selected thread may be gone
380 from the lists by now, but if it is still around, need
381 to clear the pending follow request. */
382 tp = find_thread_ptid (parent);
383 if (tp)
384 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
385
386 /* This makes sure we don't try to apply the "Switched
387 over from WAIT_PID" logic above. */
388 nullify_last_target_wait_ptid ();
389
390 /* If we followed the child, switch to it... */
391 if (follow_child)
392 {
393 switch_to_thread (child);
394
395 /* ... and preserve the stepping state, in case the
396 user was stepping over the fork call. */
397 if (should_resume)
398 {
399 tp = inferior_thread ();
400 tp->step_resume_breakpoint = step_resume_breakpoint;
401 tp->step_range_start = step_range_start;
402 tp->step_range_end = step_range_end;
403 tp->step_frame_id = step_frame_id;
404 }
405 else
406 {
407 /* If we get here, it was because we're trying to
408 resume from a fork catchpoint, but, the user
409 has switched threads away from the thread that
410 forked. In that case, the resume command
411 issued is most likely not applicable to the
412 child, so just warn, and refuse to resume. */
413 warning (_("\
414 Not resuming: switched threads before following fork child.\n"));
415 }
416
417 /* Reset breakpoints in the child as appropriate. */
418 follow_inferior_reset_breakpoints ();
419 }
420 else
421 switch_to_thread (parent);
422 }
423 }
424 break;
425 case TARGET_WAITKIND_SPURIOUS:
426 /* Nothing to follow. */
427 break;
428 default:
429 internal_error (__FILE__, __LINE__,
430 "Unexpected pending_follow.kind %d\n",
431 tp->pending_follow.kind);
432 break;
433 }
434
435 return should_resume;
436 }
437
438 void
439 follow_inferior_reset_breakpoints (void)
440 {
441 struct thread_info *tp = inferior_thread ();
442
443 /* Was there a step_resume breakpoint? (There was if the user
444 did a "next" at the fork() call.) If so, explicitly reset its
445 thread number.
446
447 step_resumes are a form of bp that are made to be per-thread.
448 Since we created the step_resume bp when the parent process
449 was being debugged, and now are switching to the child process,
450 from the breakpoint package's viewpoint, that's a switch of
451 "threads". We must update the bp's notion of which thread
452 it is for, or it'll be ignored when it triggers. */
453
454 if (tp->step_resume_breakpoint)
455 breakpoint_re_set_thread (tp->step_resume_breakpoint);
456
457 /* Reinsert all breakpoints in the child. The user may have set
458 breakpoints after catching the fork, in which case those
459 were never set in the child, but only in the parent. This makes
460 sure the inserted breakpoints match the breakpoint list. */
461
462 breakpoint_re_set ();
463 insert_breakpoints ();
464 }
465
466 /* The child has exited or execed: resume threads of the parent the
467 user wanted to be executing. */
468
469 static int
470 proceed_after_vfork_done (struct thread_info *thread,
471 void *arg)
472 {
473 int pid = * (int *) arg;
474
475 if (ptid_get_pid (thread->ptid) == pid
476 && is_running (thread->ptid)
477 && !is_executing (thread->ptid)
478 && !thread->stop_requested
479 && thread->stop_signal == TARGET_SIGNAL_0)
480 {
481 if (debug_infrun)
482 fprintf_unfiltered (gdb_stdlog,
483 "infrun: resuming vfork parent thread %s\n",
484 target_pid_to_str (thread->ptid));
485
486 switch_to_thread (thread->ptid);
487 clear_proceed_status ();
488 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
489 }
490
491 return 0;
492 }
493
494 /* Called whenever we notice an exec or exit event, to handle
495 detaching or resuming a vfork parent. */
496
497 static void
498 handle_vfork_child_exec_or_exit (int exec)
499 {
500 struct inferior *inf = current_inferior ();
501
502 if (inf->vfork_parent)
503 {
504 int resume_parent = -1;
505
506 /* This exec or exit marks the end of the shared memory region
507 between the parent and the child. If the user wanted to
508 detach from the parent, now is the time. */
509
510 if (inf->vfork_parent->pending_detach)
511 {
512 struct thread_info *tp;
513 struct cleanup *old_chain;
514 struct program_space *pspace;
515 struct address_space *aspace;
516
517 /* follow-fork child, detach-on-fork on */
518
519 old_chain = make_cleanup_restore_current_thread ();
520
521 /* We're letting loose of the parent. */
522 tp = any_live_thread_of_process (inf->vfork_parent->pid);
523 switch_to_thread (tp->ptid);
524
525 /* We're about to detach from the parent, which implicitly
526 removes breakpoints from its address space. There's a
527 catch here: we want to reuse the spaces for the child,
528 but, parent/child are still sharing the pspace at this
529 point, although the exec in reality makes the kernel give
530 the child a fresh set of new pages. The problem here is
531 that the breakpoints module being unaware of this, would
532 likely chose the child process to write to the parent
533 address space. Swapping the child temporarily away from
534 the spaces has the desired effect. Yes, this is "sort
535 of" a hack. */
536
537 pspace = inf->pspace;
538 aspace = inf->aspace;
539 inf->aspace = NULL;
540 inf->pspace = NULL;
541
542 if (debug_infrun || info_verbose)
543 {
544 target_terminal_ours ();
545
546 if (exec)
547 fprintf_filtered (gdb_stdlog,
548 "Detaching vfork parent process %d after child exec.\n",
549 inf->vfork_parent->pid);
550 else
551 fprintf_filtered (gdb_stdlog,
552 "Detaching vfork parent process %d after child exit.\n",
553 inf->vfork_parent->pid);
554 }
555
556 target_detach (NULL, 0);
557
558 /* Put it back. */
559 inf->pspace = pspace;
560 inf->aspace = aspace;
561
562 do_cleanups (old_chain);
563 }
564 else if (exec)
565 {
566 /* We're staying attached to the parent, so, really give the
567 child a new address space. */
568 inf->pspace = add_program_space (maybe_new_address_space ());
569 inf->aspace = inf->pspace->aspace;
570 inf->removable = 1;
571 set_current_program_space (inf->pspace);
572
573 resume_parent = inf->vfork_parent->pid;
574
575 /* Break the bonds. */
576 inf->vfork_parent->vfork_child = NULL;
577 }
578 else
579 {
580 struct cleanup *old_chain;
581 struct program_space *pspace;
582
583 /* If this is a vfork child exiting, then the pspace and
584 aspaces were shared with the parent. Since we're
585 reporting the process exit, we'll be mourning all that is
586 found in the address space, and switching to null_ptid,
587 preparing to start a new inferior. But, since we don't
588 want to clobber the parent's address/program spaces, we
589 go ahead and create a new one for this exiting
590 inferior. */
591
592 /* Switch to null_ptid, so that clone_program_space doesn't want
593 to read the selected frame of a dead process. */
594 old_chain = save_inferior_ptid ();
595 inferior_ptid = null_ptid;
596
597 /* This inferior is dead, so avoid giving the breakpoints
598 module the option to write through to it (cloning a
599 program space resets breakpoints). */
600 inf->aspace = NULL;
601 inf->pspace = NULL;
602 pspace = add_program_space (maybe_new_address_space ());
603 set_current_program_space (pspace);
604 inf->removable = 1;
605 clone_program_space (pspace, inf->vfork_parent->pspace);
606 inf->pspace = pspace;
607 inf->aspace = pspace->aspace;
608
609 /* Put back inferior_ptid. We'll continue mourning this
610 inferior. */
611 do_cleanups (old_chain);
612
613 resume_parent = inf->vfork_parent->pid;
614 /* Break the bonds. */
615 inf->vfork_parent->vfork_child = NULL;
616 }
617
618 inf->vfork_parent = NULL;
619
620 gdb_assert (current_program_space == inf->pspace);
621
622 if (non_stop && resume_parent != -1)
623 {
624 /* If the user wanted the parent to be running, let it go
625 free now. */
626 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
627
628 if (debug_infrun)
629 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
630 resume_parent);
631
632 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
633
634 do_cleanups (old_chain);
635 }
636 }
637 }
638
639 /* Enum strings for "set|show displaced-stepping". */
640
641 static const char follow_exec_mode_new[] = "new";
642 static const char follow_exec_mode_same[] = "same";
643 static const char *follow_exec_mode_names[] =
644 {
645 follow_exec_mode_new,
646 follow_exec_mode_same,
647 NULL,
648 };
649
650 static const char *follow_exec_mode_string = follow_exec_mode_same;
651 static void
652 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
653 struct cmd_list_element *c, const char *value)
654 {
655 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
656 }
657
658 /* EXECD_PATHNAME is assumed to be non-NULL. */
659
660 static void
661 follow_exec (ptid_t pid, char *execd_pathname)
662 {
663 struct target_ops *tgt;
664 struct thread_info *th = inferior_thread ();
665 struct inferior *inf = current_inferior ();
666
667 /* This is an exec event that we actually wish to pay attention to.
668 Refresh our symbol table to the newly exec'd program, remove any
669 momentary bp's, etc.
670
671 If there are breakpoints, they aren't really inserted now,
672 since the exec() transformed our inferior into a fresh set
673 of instructions.
674
675 We want to preserve symbolic breakpoints on the list, since
676 we have hopes that they can be reset after the new a.out's
677 symbol table is read.
678
679 However, any "raw" breakpoints must be removed from the list
680 (e.g., the solib bp's), since their address is probably invalid
681 now.
682
683 And, we DON'T want to call delete_breakpoints() here, since
684 that may write the bp's "shadow contents" (the instruction
685 value that was overwritten witha TRAP instruction). Since
686 we now have a new a.out, those shadow contents aren't valid. */
687
688 mark_breakpoints_out ();
689
690 update_breakpoints_after_exec ();
691
692 /* If there was one, it's gone now. We cannot truly step-to-next
693 statement through an exec(). */
694 th->step_resume_breakpoint = NULL;
695 th->step_range_start = 0;
696 th->step_range_end = 0;
697
698 /* The target reports the exec event to the main thread, even if
699 some other thread does the exec, and even if the main thread was
700 already stopped --- if debugging in non-stop mode, it's possible
701 the user had the main thread held stopped in the previous image
702 --- release it now. This is the same behavior as step-over-exec
703 with scheduler-locking on in all-stop mode. */
704 th->stop_requested = 0;
705
706 /* What is this a.out's name? */
707 printf_unfiltered (_("%s is executing new program: %s\n"),
708 target_pid_to_str (inferior_ptid),
709 execd_pathname);
710
711 /* We've followed the inferior through an exec. Therefore, the
712 inferior has essentially been killed & reborn. */
713
714 gdb_flush (gdb_stdout);
715
716 breakpoint_init_inferior (inf_execd);
717
718 if (gdb_sysroot && *gdb_sysroot)
719 {
720 char *name = alloca (strlen (gdb_sysroot)
721 + strlen (execd_pathname)
722 + 1);
723 strcpy (name, gdb_sysroot);
724 strcat (name, execd_pathname);
725 execd_pathname = name;
726 }
727
728 /* Reset the shared library package. This ensures that we get a
729 shlib event when the child reaches "_start", at which point the
730 dld will have had a chance to initialize the child. */
731 /* Also, loading a symbol file below may trigger symbol lookups, and
732 we don't want those to be satisfied by the libraries of the
733 previous incarnation of this process. */
734 no_shared_libraries (NULL, 0);
735
736 if (follow_exec_mode_string == follow_exec_mode_new)
737 {
738 struct program_space *pspace;
739 struct inferior *new_inf;
740
741 /* The user wants to keep the old inferior and program spaces
742 around. Create a new fresh one, and switch to it. */
743
744 inf = add_inferior (current_inferior ()->pid);
745 pspace = add_program_space (maybe_new_address_space ());
746 inf->pspace = pspace;
747 inf->aspace = pspace->aspace;
748
749 exit_inferior_num_silent (current_inferior ()->num);
750
751 set_current_inferior (inf);
752 set_current_program_space (pspace);
753 }
754
755 gdb_assert (current_program_space == inf->pspace);
756
757 /* That a.out is now the one to use. */
758 exec_file_attach (execd_pathname, 0);
759
760 /* Load the main file's symbols. */
761 symbol_file_add_main (execd_pathname, 0);
762
763 #ifdef SOLIB_CREATE_INFERIOR_HOOK
764 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
765 #else
766 solib_create_inferior_hook (0);
767 #endif
768
769 jit_inferior_created_hook ();
770
771 /* Reinsert all breakpoints. (Those which were symbolic have
772 been reset to the proper address in the new a.out, thanks
773 to symbol_file_command...) */
774 insert_breakpoints ();
775
776 /* The next resume of this inferior should bring it to the shlib
777 startup breakpoints. (If the user had also set bp's on
778 "main" from the old (parent) process, then they'll auto-
779 matically get reset there in the new process.) */
780 }
781
782 /* Non-zero if we just simulating a single-step. This is needed
783 because we cannot remove the breakpoints in the inferior process
784 until after the `wait' in `wait_for_inferior'. */
785 static int singlestep_breakpoints_inserted_p = 0;
786
787 /* The thread we inserted single-step breakpoints for. */
788 static ptid_t singlestep_ptid;
789
790 /* PC when we started this single-step. */
791 static CORE_ADDR singlestep_pc;
792
793 /* If another thread hit the singlestep breakpoint, we save the original
794 thread here so that we can resume single-stepping it later. */
795 static ptid_t saved_singlestep_ptid;
796 static int stepping_past_singlestep_breakpoint;
797
798 /* If not equal to null_ptid, this means that after stepping over breakpoint
799 is finished, we need to switch to deferred_step_ptid, and step it.
800
801 The use case is when one thread has hit a breakpoint, and then the user
802 has switched to another thread and issued 'step'. We need to step over
803 breakpoint in the thread which hit the breakpoint, but then continue
804 stepping the thread user has selected. */
805 static ptid_t deferred_step_ptid;
806 \f
807 /* Displaced stepping. */
808
809 /* In non-stop debugging mode, we must take special care to manage
810 breakpoints properly; in particular, the traditional strategy for
811 stepping a thread past a breakpoint it has hit is unsuitable.
812 'Displaced stepping' is a tactic for stepping one thread past a
813 breakpoint it has hit while ensuring that other threads running
814 concurrently will hit the breakpoint as they should.
815
816 The traditional way to step a thread T off a breakpoint in a
817 multi-threaded program in all-stop mode is as follows:
818
819 a0) Initially, all threads are stopped, and breakpoints are not
820 inserted.
821 a1) We single-step T, leaving breakpoints uninserted.
822 a2) We insert breakpoints, and resume all threads.
823
824 In non-stop debugging, however, this strategy is unsuitable: we
825 don't want to have to stop all threads in the system in order to
826 continue or step T past a breakpoint. Instead, we use displaced
827 stepping:
828
829 n0) Initially, T is stopped, other threads are running, and
830 breakpoints are inserted.
831 n1) We copy the instruction "under" the breakpoint to a separate
832 location, outside the main code stream, making any adjustments
833 to the instruction, register, and memory state as directed by
834 T's architecture.
835 n2) We single-step T over the instruction at its new location.
836 n3) We adjust the resulting register and memory state as directed
837 by T's architecture. This includes resetting T's PC to point
838 back into the main instruction stream.
839 n4) We resume T.
840
841 This approach depends on the following gdbarch methods:
842
843 - gdbarch_max_insn_length and gdbarch_displaced_step_location
844 indicate where to copy the instruction, and how much space must
845 be reserved there. We use these in step n1.
846
847 - gdbarch_displaced_step_copy_insn copies a instruction to a new
848 address, and makes any necessary adjustments to the instruction,
849 register contents, and memory. We use this in step n1.
850
851 - gdbarch_displaced_step_fixup adjusts registers and memory after
852 we have successfuly single-stepped the instruction, to yield the
853 same effect the instruction would have had if we had executed it
854 at its original address. We use this in step n3.
855
856 - gdbarch_displaced_step_free_closure provides cleanup.
857
858 The gdbarch_displaced_step_copy_insn and
859 gdbarch_displaced_step_fixup functions must be written so that
860 copying an instruction with gdbarch_displaced_step_copy_insn,
861 single-stepping across the copied instruction, and then applying
862 gdbarch_displaced_insn_fixup should have the same effects on the
863 thread's memory and registers as stepping the instruction in place
864 would have. Exactly which responsibilities fall to the copy and
865 which fall to the fixup is up to the author of those functions.
866
867 See the comments in gdbarch.sh for details.
868
869 Note that displaced stepping and software single-step cannot
870 currently be used in combination, although with some care I think
871 they could be made to. Software single-step works by placing
872 breakpoints on all possible subsequent instructions; if the
873 displaced instruction is a PC-relative jump, those breakpoints
874 could fall in very strange places --- on pages that aren't
875 executable, or at addresses that are not proper instruction
876 boundaries. (We do generally let other threads run while we wait
877 to hit the software single-step breakpoint, and they might
878 encounter such a corrupted instruction.) One way to work around
879 this would be to have gdbarch_displaced_step_copy_insn fully
880 simulate the effect of PC-relative instructions (and return NULL)
881 on architectures that use software single-stepping.
882
883 In non-stop mode, we can have independent and simultaneous step
884 requests, so more than one thread may need to simultaneously step
885 over a breakpoint. The current implementation assumes there is
886 only one scratch space per process. In this case, we have to
887 serialize access to the scratch space. If thread A wants to step
888 over a breakpoint, but we are currently waiting for some other
889 thread to complete a displaced step, we leave thread A stopped and
890 place it in the displaced_step_request_queue. Whenever a displaced
891 step finishes, we pick the next thread in the queue and start a new
892 displaced step operation on it. See displaced_step_prepare and
893 displaced_step_fixup for details. */
894
895 struct displaced_step_request
896 {
897 ptid_t ptid;
898 struct displaced_step_request *next;
899 };
900
901 /* Per-inferior displaced stepping state. */
902 struct displaced_step_inferior_state
903 {
904 /* Pointer to next in linked list. */
905 struct displaced_step_inferior_state *next;
906
907 /* The process this displaced step state refers to. */
908 int pid;
909
910 /* A queue of pending displaced stepping requests. One entry per
911 thread that needs to do a displaced step. */
912 struct displaced_step_request *step_request_queue;
913
914 /* If this is not null_ptid, this is the thread carrying out a
915 displaced single-step in process PID. This thread's state will
916 require fixing up once it has completed its step. */
917 ptid_t step_ptid;
918
919 /* The architecture the thread had when we stepped it. */
920 struct gdbarch *step_gdbarch;
921
922 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
923 for post-step cleanup. */
924 struct displaced_step_closure *step_closure;
925
926 /* The address of the original instruction, and the copy we
927 made. */
928 CORE_ADDR step_original, step_copy;
929
930 /* Saved contents of copy area. */
931 gdb_byte *step_saved_copy;
932 };
933
934 /* The list of states of processes involved in displaced stepping
935 presently. */
936 static struct displaced_step_inferior_state *displaced_step_inferior_states;
937
938 /* Get the displaced stepping state of process PID. */
939
940 static struct displaced_step_inferior_state *
941 get_displaced_stepping_state (int pid)
942 {
943 struct displaced_step_inferior_state *state;
944
945 for (state = displaced_step_inferior_states;
946 state != NULL;
947 state = state->next)
948 if (state->pid == pid)
949 return state;
950
951 return NULL;
952 }
953
954 /* Add a new displaced stepping state for process PID to the displaced
955 stepping state list, or return a pointer to an already existing
956 entry, if it already exists. Never returns NULL. */
957
958 static struct displaced_step_inferior_state *
959 add_displaced_stepping_state (int pid)
960 {
961 struct displaced_step_inferior_state *state;
962
963 for (state = displaced_step_inferior_states;
964 state != NULL;
965 state = state->next)
966 if (state->pid == pid)
967 return state;
968
969 state = xcalloc (1, sizeof (*state));
970 state->pid = pid;
971 state->next = displaced_step_inferior_states;
972 displaced_step_inferior_states = state;
973
974 return state;
975 }
976
977 /* Remove the displaced stepping state of process PID. */
978
979 static void
980 remove_displaced_stepping_state (int pid)
981 {
982 struct displaced_step_inferior_state *it, **prev_next_p;
983
984 gdb_assert (pid != 0);
985
986 it = displaced_step_inferior_states;
987 prev_next_p = &displaced_step_inferior_states;
988 while (it)
989 {
990 if (it->pid == pid)
991 {
992 *prev_next_p = it->next;
993 xfree (it);
994 return;
995 }
996
997 prev_next_p = &it->next;
998 it = *prev_next_p;
999 }
1000 }
1001
1002 static void
1003 infrun_inferior_exit (struct inferior *inf)
1004 {
1005 remove_displaced_stepping_state (inf->pid);
1006 }
1007
1008 /* Enum strings for "set|show displaced-stepping". */
1009
1010 static const char can_use_displaced_stepping_auto[] = "auto";
1011 static const char can_use_displaced_stepping_on[] = "on";
1012 static const char can_use_displaced_stepping_off[] = "off";
1013 static const char *can_use_displaced_stepping_enum[] =
1014 {
1015 can_use_displaced_stepping_auto,
1016 can_use_displaced_stepping_on,
1017 can_use_displaced_stepping_off,
1018 NULL,
1019 };
1020
1021 /* If ON, and the architecture supports it, GDB will use displaced
1022 stepping to step over breakpoints. If OFF, or if the architecture
1023 doesn't support it, GDB will instead use the traditional
1024 hold-and-step approach. If AUTO (which is the default), GDB will
1025 decide which technique to use to step over breakpoints depending on
1026 which of all-stop or non-stop mode is active --- displaced stepping
1027 in non-stop mode; hold-and-step in all-stop mode. */
1028
1029 static const char *can_use_displaced_stepping =
1030 can_use_displaced_stepping_auto;
1031
1032 static void
1033 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1034 struct cmd_list_element *c,
1035 const char *value)
1036 {
1037 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1038 fprintf_filtered (file, _("\
1039 Debugger's willingness to use displaced stepping to step over \
1040 breakpoints is %s (currently %s).\n"),
1041 value, non_stop ? "on" : "off");
1042 else
1043 fprintf_filtered (file, _("\
1044 Debugger's willingness to use displaced stepping to step over \
1045 breakpoints is %s.\n"), value);
1046 }
1047
1048 /* Return non-zero if displaced stepping can/should be used to step
1049 over breakpoints. */
1050
1051 static int
1052 use_displaced_stepping (struct gdbarch *gdbarch)
1053 {
1054 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1055 && non_stop)
1056 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1057 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1058 && !RECORD_IS_USED);
1059 }
1060
1061 /* Clean out any stray displaced stepping state. */
1062 static void
1063 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1064 {
1065 /* Indicate that there is no cleanup pending. */
1066 displaced->step_ptid = null_ptid;
1067
1068 if (displaced->step_closure)
1069 {
1070 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1071 displaced->step_closure);
1072 displaced->step_closure = NULL;
1073 }
1074 }
1075
1076 static void
1077 displaced_step_clear_cleanup (void *arg)
1078 {
1079 struct displaced_step_inferior_state *state = arg;
1080
1081 displaced_step_clear (state);
1082 }
1083
1084 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1085 void
1086 displaced_step_dump_bytes (struct ui_file *file,
1087 const gdb_byte *buf,
1088 size_t len)
1089 {
1090 int i;
1091
1092 for (i = 0; i < len; i++)
1093 fprintf_unfiltered (file, "%02x ", buf[i]);
1094 fputs_unfiltered ("\n", file);
1095 }
1096
1097 /* Prepare to single-step, using displaced stepping.
1098
1099 Note that we cannot use displaced stepping when we have a signal to
1100 deliver. If we have a signal to deliver and an instruction to step
1101 over, then after the step, there will be no indication from the
1102 target whether the thread entered a signal handler or ignored the
1103 signal and stepped over the instruction successfully --- both cases
1104 result in a simple SIGTRAP. In the first case we mustn't do a
1105 fixup, and in the second case we must --- but we can't tell which.
1106 Comments in the code for 'random signals' in handle_inferior_event
1107 explain how we handle this case instead.
1108
1109 Returns 1 if preparing was successful -- this thread is going to be
1110 stepped now; or 0 if displaced stepping this thread got queued. */
1111 static int
1112 displaced_step_prepare (ptid_t ptid)
1113 {
1114 struct cleanup *old_cleanups, *ignore_cleanups;
1115 struct regcache *regcache = get_thread_regcache (ptid);
1116 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1117 CORE_ADDR original, copy;
1118 ULONGEST len;
1119 struct displaced_step_closure *closure;
1120 struct displaced_step_inferior_state *displaced;
1121
1122 /* We should never reach this function if the architecture does not
1123 support displaced stepping. */
1124 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1125
1126 /* We have to displaced step one thread at a time, as we only have
1127 access to a single scratch space per inferior. */
1128
1129 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1130
1131 if (!ptid_equal (displaced->step_ptid, null_ptid))
1132 {
1133 /* Already waiting for a displaced step to finish. Defer this
1134 request and place in queue. */
1135 struct displaced_step_request *req, *new_req;
1136
1137 if (debug_displaced)
1138 fprintf_unfiltered (gdb_stdlog,
1139 "displaced: defering step of %s\n",
1140 target_pid_to_str (ptid));
1141
1142 new_req = xmalloc (sizeof (*new_req));
1143 new_req->ptid = ptid;
1144 new_req->next = NULL;
1145
1146 if (displaced->step_request_queue)
1147 {
1148 for (req = displaced->step_request_queue;
1149 req && req->next;
1150 req = req->next)
1151 ;
1152 req->next = new_req;
1153 }
1154 else
1155 displaced->step_request_queue = new_req;
1156
1157 return 0;
1158 }
1159 else
1160 {
1161 if (debug_displaced)
1162 fprintf_unfiltered (gdb_stdlog,
1163 "displaced: stepping %s now\n",
1164 target_pid_to_str (ptid));
1165 }
1166
1167 displaced_step_clear (displaced);
1168
1169 old_cleanups = save_inferior_ptid ();
1170 inferior_ptid = ptid;
1171
1172 original = regcache_read_pc (regcache);
1173
1174 copy = gdbarch_displaced_step_location (gdbarch);
1175 len = gdbarch_max_insn_length (gdbarch);
1176
1177 /* Save the original contents of the copy area. */
1178 displaced->step_saved_copy = xmalloc (len);
1179 ignore_cleanups = make_cleanup (free_current_contents,
1180 &displaced->step_saved_copy);
1181 read_memory (copy, displaced->step_saved_copy, len);
1182 if (debug_displaced)
1183 {
1184 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1185 paddress (gdbarch, copy));
1186 displaced_step_dump_bytes (gdb_stdlog,
1187 displaced->step_saved_copy,
1188 len);
1189 };
1190
1191 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1192 original, copy, regcache);
1193
1194 /* We don't support the fully-simulated case at present. */
1195 gdb_assert (closure);
1196
1197 /* Save the information we need to fix things up if the step
1198 succeeds. */
1199 displaced->step_ptid = ptid;
1200 displaced->step_gdbarch = gdbarch;
1201 displaced->step_closure = closure;
1202 displaced->step_original = original;
1203 displaced->step_copy = copy;
1204
1205 make_cleanup (displaced_step_clear_cleanup, displaced);
1206
1207 /* Resume execution at the copy. */
1208 regcache_write_pc (regcache, copy);
1209
1210 discard_cleanups (ignore_cleanups);
1211
1212 do_cleanups (old_cleanups);
1213
1214 if (debug_displaced)
1215 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1216 paddress (gdbarch, copy));
1217
1218 return 1;
1219 }
1220
1221 static void
1222 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1223 {
1224 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1225 inferior_ptid = ptid;
1226 write_memory (memaddr, myaddr, len);
1227 do_cleanups (ptid_cleanup);
1228 }
1229
1230 static void
1231 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1232 {
1233 struct cleanup *old_cleanups;
1234 struct displaced_step_inferior_state *displaced
1235 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1236
1237 /* Was any thread of this process doing a displaced step? */
1238 if (displaced == NULL)
1239 return;
1240
1241 /* Was this event for the pid we displaced? */
1242 if (ptid_equal (displaced->step_ptid, null_ptid)
1243 || ! ptid_equal (displaced->step_ptid, event_ptid))
1244 return;
1245
1246 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1247
1248 /* Restore the contents of the copy area. */
1249 {
1250 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1251 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1252 displaced->step_saved_copy, len);
1253 if (debug_displaced)
1254 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1255 paddress (displaced->step_gdbarch,
1256 displaced->step_copy));
1257 }
1258
1259 /* Did the instruction complete successfully? */
1260 if (signal == TARGET_SIGNAL_TRAP)
1261 {
1262 /* Fix up the resulting state. */
1263 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1264 displaced->step_closure,
1265 displaced->step_original,
1266 displaced->step_copy,
1267 get_thread_regcache (displaced->step_ptid));
1268 }
1269 else
1270 {
1271 /* Since the instruction didn't complete, all we can do is
1272 relocate the PC. */
1273 struct regcache *regcache = get_thread_regcache (event_ptid);
1274 CORE_ADDR pc = regcache_read_pc (regcache);
1275 pc = displaced->step_original + (pc - displaced->step_copy);
1276 regcache_write_pc (regcache, pc);
1277 }
1278
1279 do_cleanups (old_cleanups);
1280
1281 displaced->step_ptid = null_ptid;
1282
1283 /* Are there any pending displaced stepping requests? If so, run
1284 one now. Leave the state object around, since we're likely to
1285 need it again soon. */
1286 while (displaced->step_request_queue)
1287 {
1288 struct displaced_step_request *head;
1289 ptid_t ptid;
1290 struct regcache *regcache;
1291 struct gdbarch *gdbarch;
1292 CORE_ADDR actual_pc;
1293 struct address_space *aspace;
1294
1295 head = displaced->step_request_queue;
1296 ptid = head->ptid;
1297 displaced->step_request_queue = head->next;
1298 xfree (head);
1299
1300 context_switch (ptid);
1301
1302 regcache = get_thread_regcache (ptid);
1303 actual_pc = regcache_read_pc (regcache);
1304 aspace = get_regcache_aspace (regcache);
1305
1306 if (breakpoint_here_p (aspace, actual_pc))
1307 {
1308 if (debug_displaced)
1309 fprintf_unfiltered (gdb_stdlog,
1310 "displaced: stepping queued %s now\n",
1311 target_pid_to_str (ptid));
1312
1313 displaced_step_prepare (ptid);
1314
1315 gdbarch = get_regcache_arch (regcache);
1316
1317 if (debug_displaced)
1318 {
1319 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1320 gdb_byte buf[4];
1321
1322 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1323 paddress (gdbarch, actual_pc));
1324 read_memory (actual_pc, buf, sizeof (buf));
1325 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1326 }
1327
1328 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1329 displaced->step_closure))
1330 target_resume (ptid, 1, TARGET_SIGNAL_0);
1331 else
1332 target_resume (ptid, 0, TARGET_SIGNAL_0);
1333
1334 /* Done, we're stepping a thread. */
1335 break;
1336 }
1337 else
1338 {
1339 int step;
1340 struct thread_info *tp = inferior_thread ();
1341
1342 /* The breakpoint we were sitting under has since been
1343 removed. */
1344 tp->trap_expected = 0;
1345
1346 /* Go back to what we were trying to do. */
1347 step = currently_stepping (tp);
1348
1349 if (debug_displaced)
1350 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1351 target_pid_to_str (tp->ptid), step);
1352
1353 target_resume (ptid, step, TARGET_SIGNAL_0);
1354 tp->stop_signal = TARGET_SIGNAL_0;
1355
1356 /* This request was discarded. See if there's any other
1357 thread waiting for its turn. */
1358 }
1359 }
1360 }
1361
1362 /* Update global variables holding ptids to hold NEW_PTID if they were
1363 holding OLD_PTID. */
1364 static void
1365 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1366 {
1367 struct displaced_step_request *it;
1368 struct displaced_step_inferior_state *displaced;
1369
1370 if (ptid_equal (inferior_ptid, old_ptid))
1371 inferior_ptid = new_ptid;
1372
1373 if (ptid_equal (singlestep_ptid, old_ptid))
1374 singlestep_ptid = new_ptid;
1375
1376 if (ptid_equal (deferred_step_ptid, old_ptid))
1377 deferred_step_ptid = new_ptid;
1378
1379 for (displaced = displaced_step_inferior_states;
1380 displaced;
1381 displaced = displaced->next)
1382 {
1383 if (ptid_equal (displaced->step_ptid, old_ptid))
1384 displaced->step_ptid = new_ptid;
1385
1386 for (it = displaced->step_request_queue; it; it = it->next)
1387 if (ptid_equal (it->ptid, old_ptid))
1388 it->ptid = new_ptid;
1389 }
1390 }
1391
1392 \f
1393 /* Resuming. */
1394
1395 /* Things to clean up if we QUIT out of resume (). */
1396 static void
1397 resume_cleanups (void *ignore)
1398 {
1399 normal_stop ();
1400 }
1401
1402 static const char schedlock_off[] = "off";
1403 static const char schedlock_on[] = "on";
1404 static const char schedlock_step[] = "step";
1405 static const char *scheduler_enums[] = {
1406 schedlock_off,
1407 schedlock_on,
1408 schedlock_step,
1409 NULL
1410 };
1411 static const char *scheduler_mode = schedlock_off;
1412 static void
1413 show_scheduler_mode (struct ui_file *file, int from_tty,
1414 struct cmd_list_element *c, const char *value)
1415 {
1416 fprintf_filtered (file, _("\
1417 Mode for locking scheduler during execution is \"%s\".\n"),
1418 value);
1419 }
1420
1421 static void
1422 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1423 {
1424 if (!target_can_lock_scheduler)
1425 {
1426 scheduler_mode = schedlock_off;
1427 error (_("Target '%s' cannot support this command."), target_shortname);
1428 }
1429 }
1430
1431 /* True if execution commands resume all threads of all processes by
1432 default; otherwise, resume only threads of the current inferior
1433 process. */
1434 int sched_multi = 0;
1435
1436 /* Try to setup for software single stepping over the specified location.
1437 Return 1 if target_resume() should use hardware single step.
1438
1439 GDBARCH the current gdbarch.
1440 PC the location to step over. */
1441
1442 static int
1443 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1444 {
1445 int hw_step = 1;
1446
1447 if (gdbarch_software_single_step_p (gdbarch)
1448 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1449 {
1450 hw_step = 0;
1451 /* Do not pull these breakpoints until after a `wait' in
1452 `wait_for_inferior' */
1453 singlestep_breakpoints_inserted_p = 1;
1454 singlestep_ptid = inferior_ptid;
1455 singlestep_pc = pc;
1456 }
1457 return hw_step;
1458 }
1459
1460 /* Resume the inferior, but allow a QUIT. This is useful if the user
1461 wants to interrupt some lengthy single-stepping operation
1462 (for child processes, the SIGINT goes to the inferior, and so
1463 we get a SIGINT random_signal, but for remote debugging and perhaps
1464 other targets, that's not true).
1465
1466 STEP nonzero if we should step (zero to continue instead).
1467 SIG is the signal to give the inferior (zero for none). */
1468 void
1469 resume (int step, enum target_signal sig)
1470 {
1471 int should_resume = 1;
1472 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1473 struct regcache *regcache = get_current_regcache ();
1474 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1475 struct thread_info *tp = inferior_thread ();
1476 CORE_ADDR pc = regcache_read_pc (regcache);
1477 struct address_space *aspace = get_regcache_aspace (regcache);
1478
1479 QUIT;
1480
1481 if (debug_infrun)
1482 fprintf_unfiltered (gdb_stdlog,
1483 "infrun: resume (step=%d, signal=%d), "
1484 "trap_expected=%d\n",
1485 step, sig, tp->trap_expected);
1486
1487 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1488 over an instruction that causes a page fault without triggering
1489 a hardware watchpoint. The kernel properly notices that it shouldn't
1490 stop, because the hardware watchpoint is not triggered, but it forgets
1491 the step request and continues the program normally.
1492 Work around the problem by removing hardware watchpoints if a step is
1493 requested, GDB will check for a hardware watchpoint trigger after the
1494 step anyway. */
1495 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1496 remove_hw_watchpoints ();
1497
1498
1499 /* Normally, by the time we reach `resume', the breakpoints are either
1500 removed or inserted, as appropriate. The exception is if we're sitting
1501 at a permanent breakpoint; we need to step over it, but permanent
1502 breakpoints can't be removed. So we have to test for it here. */
1503 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1504 {
1505 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1506 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1507 else
1508 error (_("\
1509 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1510 how to step past a permanent breakpoint on this architecture. Try using\n\
1511 a command like `return' or `jump' to continue execution."));
1512 }
1513
1514 /* If enabled, step over breakpoints by executing a copy of the
1515 instruction at a different address.
1516
1517 We can't use displaced stepping when we have a signal to deliver;
1518 the comments for displaced_step_prepare explain why. The
1519 comments in the handle_inferior event for dealing with 'random
1520 signals' explain what we do instead. */
1521 if (use_displaced_stepping (gdbarch)
1522 && (tp->trap_expected
1523 || (step && gdbarch_software_single_step_p (gdbarch)))
1524 && sig == TARGET_SIGNAL_0)
1525 {
1526 struct displaced_step_inferior_state *displaced;
1527
1528 if (!displaced_step_prepare (inferior_ptid))
1529 {
1530 /* Got placed in displaced stepping queue. Will be resumed
1531 later when all the currently queued displaced stepping
1532 requests finish. The thread is not executing at this point,
1533 and the call to set_executing will be made later. But we
1534 need to call set_running here, since from frontend point of view,
1535 the thread is running. */
1536 set_running (inferior_ptid, 1);
1537 discard_cleanups (old_cleanups);
1538 return;
1539 }
1540
1541 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1542 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1543 displaced->step_closure);
1544 }
1545
1546 /* Do we need to do it the hard way, w/temp breakpoints? */
1547 else if (step)
1548 step = maybe_software_singlestep (gdbarch, pc);
1549
1550 if (should_resume)
1551 {
1552 ptid_t resume_ptid;
1553
1554 /* If STEP is set, it's a request to use hardware stepping
1555 facilities. But in that case, we should never
1556 use singlestep breakpoint. */
1557 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1558
1559 /* Decide the set of threads to ask the target to resume. Start
1560 by assuming everything will be resumed, than narrow the set
1561 by applying increasingly restricting conditions. */
1562
1563 /* By default, resume all threads of all processes. */
1564 resume_ptid = RESUME_ALL;
1565
1566 /* Maybe resume only all threads of the current process. */
1567 if (!sched_multi && target_supports_multi_process ())
1568 {
1569 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1570 }
1571
1572 /* Maybe resume a single thread after all. */
1573 if (singlestep_breakpoints_inserted_p
1574 && stepping_past_singlestep_breakpoint)
1575 {
1576 /* The situation here is as follows. In thread T1 we wanted to
1577 single-step. Lacking hardware single-stepping we've
1578 set breakpoint at the PC of the next instruction -- call it
1579 P. After resuming, we've hit that breakpoint in thread T2.
1580 Now we've removed original breakpoint, inserted breakpoint
1581 at P+1, and try to step to advance T2 past breakpoint.
1582 We need to step only T2, as if T1 is allowed to freely run,
1583 it can run past P, and if other threads are allowed to run,
1584 they can hit breakpoint at P+1, and nested hits of single-step
1585 breakpoints is not something we'd want -- that's complicated
1586 to support, and has no value. */
1587 resume_ptid = inferior_ptid;
1588 }
1589 else if ((step || singlestep_breakpoints_inserted_p)
1590 && tp->trap_expected)
1591 {
1592 /* We're allowing a thread to run past a breakpoint it has
1593 hit, by single-stepping the thread with the breakpoint
1594 removed. In which case, we need to single-step only this
1595 thread, and keep others stopped, as they can miss this
1596 breakpoint if allowed to run.
1597
1598 The current code actually removes all breakpoints when
1599 doing this, not just the one being stepped over, so if we
1600 let other threads run, we can actually miss any
1601 breakpoint, not just the one at PC. */
1602 resume_ptid = inferior_ptid;
1603 }
1604 else if (non_stop)
1605 {
1606 /* With non-stop mode on, threads are always handled
1607 individually. */
1608 resume_ptid = inferior_ptid;
1609 }
1610 else if ((scheduler_mode == schedlock_on)
1611 || (scheduler_mode == schedlock_step
1612 && (step || singlestep_breakpoints_inserted_p)))
1613 {
1614 /* User-settable 'scheduler' mode requires solo thread resume. */
1615 resume_ptid = inferior_ptid;
1616 }
1617
1618 if (gdbarch_cannot_step_breakpoint (gdbarch))
1619 {
1620 /* Most targets can step a breakpoint instruction, thus
1621 executing it normally. But if this one cannot, just
1622 continue and we will hit it anyway. */
1623 if (step && breakpoint_inserted_here_p (aspace, pc))
1624 step = 0;
1625 }
1626
1627 if (debug_displaced
1628 && use_displaced_stepping (gdbarch)
1629 && tp->trap_expected)
1630 {
1631 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1632 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1633 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1634 gdb_byte buf[4];
1635
1636 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1637 paddress (resume_gdbarch, actual_pc));
1638 read_memory (actual_pc, buf, sizeof (buf));
1639 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1640 }
1641
1642 /* Install inferior's terminal modes. */
1643 target_terminal_inferior ();
1644
1645 /* Avoid confusing the next resume, if the next stop/resume
1646 happens to apply to another thread. */
1647 tp->stop_signal = TARGET_SIGNAL_0;
1648
1649 target_resume (resume_ptid, step, sig);
1650 }
1651
1652 discard_cleanups (old_cleanups);
1653 }
1654 \f
1655 /* Proceeding. */
1656
1657 /* Clear out all variables saying what to do when inferior is continued.
1658 First do this, then set the ones you want, then call `proceed'. */
1659
1660 static void
1661 clear_proceed_status_thread (struct thread_info *tp)
1662 {
1663 if (debug_infrun)
1664 fprintf_unfiltered (gdb_stdlog,
1665 "infrun: clear_proceed_status_thread (%s)\n",
1666 target_pid_to_str (tp->ptid));
1667
1668 tp->trap_expected = 0;
1669 tp->step_range_start = 0;
1670 tp->step_range_end = 0;
1671 tp->step_frame_id = null_frame_id;
1672 tp->step_stack_frame_id = null_frame_id;
1673 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1674 tp->stop_requested = 0;
1675
1676 tp->stop_step = 0;
1677
1678 tp->proceed_to_finish = 0;
1679
1680 /* Discard any remaining commands or status from previous stop. */
1681 bpstat_clear (&tp->stop_bpstat);
1682 }
1683
1684 static int
1685 clear_proceed_status_callback (struct thread_info *tp, void *data)
1686 {
1687 if (is_exited (tp->ptid))
1688 return 0;
1689
1690 clear_proceed_status_thread (tp);
1691 return 0;
1692 }
1693
1694 void
1695 clear_proceed_status (void)
1696 {
1697 if (!non_stop)
1698 {
1699 /* In all-stop mode, delete the per-thread status of all
1700 threads, even if inferior_ptid is null_ptid, there may be
1701 threads on the list. E.g., we may be launching a new
1702 process, while selecting the executable. */
1703 iterate_over_threads (clear_proceed_status_callback, NULL);
1704 }
1705
1706 if (!ptid_equal (inferior_ptid, null_ptid))
1707 {
1708 struct inferior *inferior;
1709
1710 if (non_stop)
1711 {
1712 /* If in non-stop mode, only delete the per-thread status of
1713 the current thread. */
1714 clear_proceed_status_thread (inferior_thread ());
1715 }
1716
1717 inferior = current_inferior ();
1718 inferior->stop_soon = NO_STOP_QUIETLY;
1719 }
1720
1721 stop_after_trap = 0;
1722
1723 observer_notify_about_to_proceed ();
1724
1725 if (stop_registers)
1726 {
1727 regcache_xfree (stop_registers);
1728 stop_registers = NULL;
1729 }
1730 }
1731
1732 /* Check the current thread against the thread that reported the most recent
1733 event. If a step-over is required return TRUE and set the current thread
1734 to the old thread. Otherwise return FALSE.
1735
1736 This should be suitable for any targets that support threads. */
1737
1738 static int
1739 prepare_to_proceed (int step)
1740 {
1741 ptid_t wait_ptid;
1742 struct target_waitstatus wait_status;
1743 int schedlock_enabled;
1744
1745 /* With non-stop mode on, threads are always handled individually. */
1746 gdb_assert (! non_stop);
1747
1748 /* Get the last target status returned by target_wait(). */
1749 get_last_target_status (&wait_ptid, &wait_status);
1750
1751 /* Make sure we were stopped at a breakpoint. */
1752 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1753 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1754 && wait_status.value.sig != TARGET_SIGNAL_ILL
1755 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1756 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1757 {
1758 return 0;
1759 }
1760
1761 schedlock_enabled = (scheduler_mode == schedlock_on
1762 || (scheduler_mode == schedlock_step
1763 && step));
1764
1765 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1766 if (schedlock_enabled)
1767 return 0;
1768
1769 /* Don't switch over if we're about to resume some other process
1770 other than WAIT_PTID's, and schedule-multiple is off. */
1771 if (!sched_multi
1772 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1773 return 0;
1774
1775 /* Switched over from WAIT_PID. */
1776 if (!ptid_equal (wait_ptid, minus_one_ptid)
1777 && !ptid_equal (inferior_ptid, wait_ptid))
1778 {
1779 struct regcache *regcache = get_thread_regcache (wait_ptid);
1780
1781 if (breakpoint_here_p (get_regcache_aspace (regcache),
1782 regcache_read_pc (regcache)))
1783 {
1784 /* If stepping, remember current thread to switch back to. */
1785 if (step)
1786 deferred_step_ptid = inferior_ptid;
1787
1788 /* Switch back to WAIT_PID thread. */
1789 switch_to_thread (wait_ptid);
1790
1791 /* We return 1 to indicate that there is a breakpoint here,
1792 so we need to step over it before continuing to avoid
1793 hitting it straight away. */
1794 return 1;
1795 }
1796 }
1797
1798 return 0;
1799 }
1800
1801 /* Basic routine for continuing the program in various fashions.
1802
1803 ADDR is the address to resume at, or -1 for resume where stopped.
1804 SIGGNAL is the signal to give it, or 0 for none,
1805 or -1 for act according to how it stopped.
1806 STEP is nonzero if should trap after one instruction.
1807 -1 means return after that and print nothing.
1808 You should probably set various step_... variables
1809 before calling here, if you are stepping.
1810
1811 You should call clear_proceed_status before calling proceed. */
1812
1813 void
1814 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1815 {
1816 struct regcache *regcache;
1817 struct gdbarch *gdbarch;
1818 struct thread_info *tp;
1819 CORE_ADDR pc;
1820 struct address_space *aspace;
1821 int oneproc = 0;
1822
1823 /* If we're stopped at a fork/vfork, follow the branch set by the
1824 "set follow-fork-mode" command; otherwise, we'll just proceed
1825 resuming the current thread. */
1826 if (!follow_fork ())
1827 {
1828 /* The target for some reason decided not to resume. */
1829 normal_stop ();
1830 return;
1831 }
1832
1833 regcache = get_current_regcache ();
1834 gdbarch = get_regcache_arch (regcache);
1835 aspace = get_regcache_aspace (regcache);
1836 pc = regcache_read_pc (regcache);
1837
1838 if (step > 0)
1839 step_start_function = find_pc_function (pc);
1840 if (step < 0)
1841 stop_after_trap = 1;
1842
1843 if (addr == (CORE_ADDR) -1)
1844 {
1845 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1846 && execution_direction != EXEC_REVERSE)
1847 /* There is a breakpoint at the address we will resume at,
1848 step one instruction before inserting breakpoints so that
1849 we do not stop right away (and report a second hit at this
1850 breakpoint).
1851
1852 Note, we don't do this in reverse, because we won't
1853 actually be executing the breakpoint insn anyway.
1854 We'll be (un-)executing the previous instruction. */
1855
1856 oneproc = 1;
1857 else if (gdbarch_single_step_through_delay_p (gdbarch)
1858 && gdbarch_single_step_through_delay (gdbarch,
1859 get_current_frame ()))
1860 /* We stepped onto an instruction that needs to be stepped
1861 again before re-inserting the breakpoint, do so. */
1862 oneproc = 1;
1863 }
1864 else
1865 {
1866 regcache_write_pc (regcache, addr);
1867 }
1868
1869 if (debug_infrun)
1870 fprintf_unfiltered (gdb_stdlog,
1871 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1872 paddress (gdbarch, addr), siggnal, step);
1873
1874 /* We're handling a live event, so make sure we're doing live
1875 debugging. If we're looking at traceframes while the target is
1876 running, we're going to need to get back to that mode after
1877 handling the event. */
1878 if (non_stop)
1879 {
1880 make_cleanup_restore_current_traceframe ();
1881 set_traceframe_number (-1);
1882 }
1883
1884 if (non_stop)
1885 /* In non-stop, each thread is handled individually. The context
1886 must already be set to the right thread here. */
1887 ;
1888 else
1889 {
1890 /* In a multi-threaded task we may select another thread and
1891 then continue or step.
1892
1893 But if the old thread was stopped at a breakpoint, it will
1894 immediately cause another breakpoint stop without any
1895 execution (i.e. it will report a breakpoint hit incorrectly).
1896 So we must step over it first.
1897
1898 prepare_to_proceed checks the current thread against the
1899 thread that reported the most recent event. If a step-over
1900 is required it returns TRUE and sets the current thread to
1901 the old thread. */
1902 if (prepare_to_proceed (step))
1903 oneproc = 1;
1904 }
1905
1906 /* prepare_to_proceed may change the current thread. */
1907 tp = inferior_thread ();
1908
1909 if (oneproc)
1910 {
1911 tp->trap_expected = 1;
1912 /* If displaced stepping is enabled, we can step over the
1913 breakpoint without hitting it, so leave all breakpoints
1914 inserted. Otherwise we need to disable all breakpoints, step
1915 one instruction, and then re-add them when that step is
1916 finished. */
1917 if (!use_displaced_stepping (gdbarch))
1918 remove_breakpoints ();
1919 }
1920
1921 /* We can insert breakpoints if we're not trying to step over one,
1922 or if we are stepping over one but we're using displaced stepping
1923 to do so. */
1924 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1925 insert_breakpoints ();
1926
1927 if (!non_stop)
1928 {
1929 /* Pass the last stop signal to the thread we're resuming,
1930 irrespective of whether the current thread is the thread that
1931 got the last event or not. This was historically GDB's
1932 behaviour before keeping a stop_signal per thread. */
1933
1934 struct thread_info *last_thread;
1935 ptid_t last_ptid;
1936 struct target_waitstatus last_status;
1937
1938 get_last_target_status (&last_ptid, &last_status);
1939 if (!ptid_equal (inferior_ptid, last_ptid)
1940 && !ptid_equal (last_ptid, null_ptid)
1941 && !ptid_equal (last_ptid, minus_one_ptid))
1942 {
1943 last_thread = find_thread_ptid (last_ptid);
1944 if (last_thread)
1945 {
1946 tp->stop_signal = last_thread->stop_signal;
1947 last_thread->stop_signal = TARGET_SIGNAL_0;
1948 }
1949 }
1950 }
1951
1952 if (siggnal != TARGET_SIGNAL_DEFAULT)
1953 tp->stop_signal = siggnal;
1954 /* If this signal should not be seen by program,
1955 give it zero. Used for debugging signals. */
1956 else if (!signal_program[tp->stop_signal])
1957 tp->stop_signal = TARGET_SIGNAL_0;
1958
1959 annotate_starting ();
1960
1961 /* Make sure that output from GDB appears before output from the
1962 inferior. */
1963 gdb_flush (gdb_stdout);
1964
1965 /* Refresh prev_pc value just prior to resuming. This used to be
1966 done in stop_stepping, however, setting prev_pc there did not handle
1967 scenarios such as inferior function calls or returning from
1968 a function via the return command. In those cases, the prev_pc
1969 value was not set properly for subsequent commands. The prev_pc value
1970 is used to initialize the starting line number in the ecs. With an
1971 invalid value, the gdb next command ends up stopping at the position
1972 represented by the next line table entry past our start position.
1973 On platforms that generate one line table entry per line, this
1974 is not a problem. However, on the ia64, the compiler generates
1975 extraneous line table entries that do not increase the line number.
1976 When we issue the gdb next command on the ia64 after an inferior call
1977 or a return command, we often end up a few instructions forward, still
1978 within the original line we started.
1979
1980 An attempt was made to refresh the prev_pc at the same time the
1981 execution_control_state is initialized (for instance, just before
1982 waiting for an inferior event). But this approach did not work
1983 because of platforms that use ptrace, where the pc register cannot
1984 be read unless the inferior is stopped. At that point, we are not
1985 guaranteed the inferior is stopped and so the regcache_read_pc() call
1986 can fail. Setting the prev_pc value here ensures the value is updated
1987 correctly when the inferior is stopped. */
1988 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1989
1990 /* Fill in with reasonable starting values. */
1991 init_thread_stepping_state (tp);
1992
1993 /* Reset to normal state. */
1994 init_infwait_state ();
1995
1996 /* Resume inferior. */
1997 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1998
1999 /* Wait for it to stop (if not standalone)
2000 and in any case decode why it stopped, and act accordingly. */
2001 /* Do this only if we are not using the event loop, or if the target
2002 does not support asynchronous execution. */
2003 if (!target_can_async_p ())
2004 {
2005 wait_for_inferior (0);
2006 normal_stop ();
2007 }
2008 }
2009 \f
2010
2011 /* Start remote-debugging of a machine over a serial link. */
2012
2013 void
2014 start_remote (int from_tty)
2015 {
2016 struct inferior *inferior;
2017 init_wait_for_inferior ();
2018
2019 inferior = current_inferior ();
2020 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2021
2022 /* Always go on waiting for the target, regardless of the mode. */
2023 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2024 indicate to wait_for_inferior that a target should timeout if
2025 nothing is returned (instead of just blocking). Because of this,
2026 targets expecting an immediate response need to, internally, set
2027 things up so that the target_wait() is forced to eventually
2028 timeout. */
2029 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2030 differentiate to its caller what the state of the target is after
2031 the initial open has been performed. Here we're assuming that
2032 the target has stopped. It should be possible to eventually have
2033 target_open() return to the caller an indication that the target
2034 is currently running and GDB state should be set to the same as
2035 for an async run. */
2036 wait_for_inferior (0);
2037
2038 /* Now that the inferior has stopped, do any bookkeeping like
2039 loading shared libraries. We want to do this before normal_stop,
2040 so that the displayed frame is up to date. */
2041 post_create_inferior (&current_target, from_tty);
2042
2043 normal_stop ();
2044 }
2045
2046 /* Initialize static vars when a new inferior begins. */
2047
2048 void
2049 init_wait_for_inferior (void)
2050 {
2051 /* These are meaningless until the first time through wait_for_inferior. */
2052
2053 breakpoint_init_inferior (inf_starting);
2054
2055 clear_proceed_status ();
2056
2057 stepping_past_singlestep_breakpoint = 0;
2058 deferred_step_ptid = null_ptid;
2059
2060 target_last_wait_ptid = minus_one_ptid;
2061
2062 previous_inferior_ptid = null_ptid;
2063 init_infwait_state ();
2064
2065 /* Discard any skipped inlined frames. */
2066 clear_inline_frame_state (minus_one_ptid);
2067 }
2068
2069 \f
2070 /* This enum encodes possible reasons for doing a target_wait, so that
2071 wfi can call target_wait in one place. (Ultimately the call will be
2072 moved out of the infinite loop entirely.) */
2073
2074 enum infwait_states
2075 {
2076 infwait_normal_state,
2077 infwait_thread_hop_state,
2078 infwait_step_watch_state,
2079 infwait_nonstep_watch_state
2080 };
2081
2082 /* Why did the inferior stop? Used to print the appropriate messages
2083 to the interface from within handle_inferior_event(). */
2084 enum inferior_stop_reason
2085 {
2086 /* Step, next, nexti, stepi finished. */
2087 END_STEPPING_RANGE,
2088 /* Inferior terminated by signal. */
2089 SIGNAL_EXITED,
2090 /* Inferior exited. */
2091 EXITED,
2092 /* Inferior received signal, and user asked to be notified. */
2093 SIGNAL_RECEIVED,
2094 /* Reverse execution -- target ran out of history info. */
2095 NO_HISTORY
2096 };
2097
2098 /* The PTID we'll do a target_wait on.*/
2099 ptid_t waiton_ptid;
2100
2101 /* Current inferior wait state. */
2102 enum infwait_states infwait_state;
2103
2104 /* Data to be passed around while handling an event. This data is
2105 discarded between events. */
2106 struct execution_control_state
2107 {
2108 ptid_t ptid;
2109 /* The thread that got the event, if this was a thread event; NULL
2110 otherwise. */
2111 struct thread_info *event_thread;
2112
2113 struct target_waitstatus ws;
2114 int random_signal;
2115 CORE_ADDR stop_func_start;
2116 CORE_ADDR stop_func_end;
2117 char *stop_func_name;
2118 int new_thread_event;
2119 int wait_some_more;
2120 };
2121
2122 static void handle_inferior_event (struct execution_control_state *ecs);
2123
2124 static void handle_step_into_function (struct gdbarch *gdbarch,
2125 struct execution_control_state *ecs);
2126 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2127 struct execution_control_state *ecs);
2128 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2129 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2130 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2131 struct symtab_and_line sr_sal,
2132 struct frame_id sr_id);
2133 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2134
2135 static void stop_stepping (struct execution_control_state *ecs);
2136 static void prepare_to_wait (struct execution_control_state *ecs);
2137 static void keep_going (struct execution_control_state *ecs);
2138 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2139 int stop_info);
2140
2141 /* Callback for iterate over threads. If the thread is stopped, but
2142 the user/frontend doesn't know about that yet, go through
2143 normal_stop, as if the thread had just stopped now. ARG points at
2144 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2145 ptid_is_pid(PTID) is true, applies to all threads of the process
2146 pointed at by PTID. Otherwise, apply only to the thread pointed by
2147 PTID. */
2148
2149 static int
2150 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2151 {
2152 ptid_t ptid = * (ptid_t *) arg;
2153
2154 if ((ptid_equal (info->ptid, ptid)
2155 || ptid_equal (minus_one_ptid, ptid)
2156 || (ptid_is_pid (ptid)
2157 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2158 && is_running (info->ptid)
2159 && !is_executing (info->ptid))
2160 {
2161 struct cleanup *old_chain;
2162 struct execution_control_state ecss;
2163 struct execution_control_state *ecs = &ecss;
2164
2165 memset (ecs, 0, sizeof (*ecs));
2166
2167 old_chain = make_cleanup_restore_current_thread ();
2168
2169 switch_to_thread (info->ptid);
2170
2171 /* Go through handle_inferior_event/normal_stop, so we always
2172 have consistent output as if the stop event had been
2173 reported. */
2174 ecs->ptid = info->ptid;
2175 ecs->event_thread = find_thread_ptid (info->ptid);
2176 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2177 ecs->ws.value.sig = TARGET_SIGNAL_0;
2178
2179 handle_inferior_event (ecs);
2180
2181 if (!ecs->wait_some_more)
2182 {
2183 struct thread_info *tp;
2184
2185 normal_stop ();
2186
2187 /* Finish off the continuations. The continations
2188 themselves are responsible for realising the thread
2189 didn't finish what it was supposed to do. */
2190 tp = inferior_thread ();
2191 do_all_intermediate_continuations_thread (tp);
2192 do_all_continuations_thread (tp);
2193 }
2194
2195 do_cleanups (old_chain);
2196 }
2197
2198 return 0;
2199 }
2200
2201 /* This function is attached as a "thread_stop_requested" observer.
2202 Cleanup local state that assumed the PTID was to be resumed, and
2203 report the stop to the frontend. */
2204
2205 static void
2206 infrun_thread_stop_requested (ptid_t ptid)
2207 {
2208 struct displaced_step_inferior_state *displaced;
2209
2210 /* PTID was requested to stop. Remove it from the displaced
2211 stepping queue, so we don't try to resume it automatically. */
2212
2213 for (displaced = displaced_step_inferior_states;
2214 displaced;
2215 displaced = displaced->next)
2216 {
2217 struct displaced_step_request *it, **prev_next_p;
2218
2219 it = displaced->step_request_queue;
2220 prev_next_p = &displaced->step_request_queue;
2221 while (it)
2222 {
2223 if (ptid_match (it->ptid, ptid))
2224 {
2225 *prev_next_p = it->next;
2226 it->next = NULL;
2227 xfree (it);
2228 }
2229 else
2230 {
2231 prev_next_p = &it->next;
2232 }
2233
2234 it = *prev_next_p;
2235 }
2236 }
2237
2238 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2239 }
2240
2241 static void
2242 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2243 {
2244 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2245 nullify_last_target_wait_ptid ();
2246 }
2247
2248 /* Callback for iterate_over_threads. */
2249
2250 static int
2251 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2252 {
2253 if (is_exited (info->ptid))
2254 return 0;
2255
2256 delete_step_resume_breakpoint (info);
2257 return 0;
2258 }
2259
2260 /* In all-stop, delete the step resume breakpoint of any thread that
2261 had one. In non-stop, delete the step resume breakpoint of the
2262 thread that just stopped. */
2263
2264 static void
2265 delete_step_thread_step_resume_breakpoint (void)
2266 {
2267 if (!target_has_execution
2268 || ptid_equal (inferior_ptid, null_ptid))
2269 /* If the inferior has exited, we have already deleted the step
2270 resume breakpoints out of GDB's lists. */
2271 return;
2272
2273 if (non_stop)
2274 {
2275 /* If in non-stop mode, only delete the step-resume or
2276 longjmp-resume breakpoint of the thread that just stopped
2277 stepping. */
2278 struct thread_info *tp = inferior_thread ();
2279 delete_step_resume_breakpoint (tp);
2280 }
2281 else
2282 /* In all-stop mode, delete all step-resume and longjmp-resume
2283 breakpoints of any thread that had them. */
2284 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2285 }
2286
2287 /* A cleanup wrapper. */
2288
2289 static void
2290 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2291 {
2292 delete_step_thread_step_resume_breakpoint ();
2293 }
2294
2295 /* Pretty print the results of target_wait, for debugging purposes. */
2296
2297 static void
2298 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2299 const struct target_waitstatus *ws)
2300 {
2301 char *status_string = target_waitstatus_to_string (ws);
2302 struct ui_file *tmp_stream = mem_fileopen ();
2303 char *text;
2304
2305 /* The text is split over several lines because it was getting too long.
2306 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2307 output as a unit; we want only one timestamp printed if debug_timestamp
2308 is set. */
2309
2310 fprintf_unfiltered (tmp_stream,
2311 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2312 if (PIDGET (waiton_ptid) != -1)
2313 fprintf_unfiltered (tmp_stream,
2314 " [%s]", target_pid_to_str (waiton_ptid));
2315 fprintf_unfiltered (tmp_stream, ", status) =\n");
2316 fprintf_unfiltered (tmp_stream,
2317 "infrun: %d [%s],\n",
2318 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2319 fprintf_unfiltered (tmp_stream,
2320 "infrun: %s\n",
2321 status_string);
2322
2323 text = ui_file_xstrdup (tmp_stream, NULL);
2324
2325 /* This uses %s in part to handle %'s in the text, but also to avoid
2326 a gcc error: the format attribute requires a string literal. */
2327 fprintf_unfiltered (gdb_stdlog, "%s", text);
2328
2329 xfree (status_string);
2330 xfree (text);
2331 ui_file_delete (tmp_stream);
2332 }
2333
2334 /* Prepare and stabilize the inferior for detaching it. E.g.,
2335 detaching while a thread is displaced stepping is a recipe for
2336 crashing it, as nothing would readjust the PC out of the scratch
2337 pad. */
2338
2339 void
2340 prepare_for_detach (void)
2341 {
2342 struct inferior *inf = current_inferior ();
2343 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2344 struct cleanup *old_chain_1;
2345 struct displaced_step_inferior_state *displaced;
2346
2347 displaced = get_displaced_stepping_state (inf->pid);
2348
2349 /* Is any thread of this process displaced stepping? If not,
2350 there's nothing else to do. */
2351 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2352 return;
2353
2354 if (debug_infrun)
2355 fprintf_unfiltered (gdb_stdlog,
2356 "displaced-stepping in-process while detaching");
2357
2358 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2359 inf->detaching = 1;
2360
2361 while (!ptid_equal (displaced->step_ptid, null_ptid))
2362 {
2363 struct cleanup *old_chain_2;
2364 struct execution_control_state ecss;
2365 struct execution_control_state *ecs;
2366
2367 ecs = &ecss;
2368 memset (ecs, 0, sizeof (*ecs));
2369
2370 overlay_cache_invalid = 1;
2371
2372 /* We have to invalidate the registers BEFORE calling
2373 target_wait because they can be loaded from the target while
2374 in target_wait. This makes remote debugging a bit more
2375 efficient for those targets that provide critical registers
2376 as part of their normal status mechanism. */
2377
2378 registers_changed ();
2379
2380 if (deprecated_target_wait_hook)
2381 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2382 else
2383 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2384
2385 if (debug_infrun)
2386 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2387
2388 /* If an error happens while handling the event, propagate GDB's
2389 knowledge of the executing state to the frontend/user running
2390 state. */
2391 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2392
2393 /* Now figure out what to do with the result of the result. */
2394 handle_inferior_event (ecs);
2395
2396 /* No error, don't finish the state yet. */
2397 discard_cleanups (old_chain_2);
2398
2399 /* Breakpoints and watchpoints are not installed on the target
2400 at this point, and signals are passed directly to the
2401 inferior, so this must mean the process is gone. */
2402 if (!ecs->wait_some_more)
2403 {
2404 discard_cleanups (old_chain_1);
2405 error (_("Program exited while detaching"));
2406 }
2407 }
2408
2409 discard_cleanups (old_chain_1);
2410 }
2411
2412 /* Wait for control to return from inferior to debugger.
2413
2414 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2415 as if they were SIGTRAP signals. This can be useful during
2416 the startup sequence on some targets such as HP/UX, where
2417 we receive an EXEC event instead of the expected SIGTRAP.
2418
2419 If inferior gets a signal, we may decide to start it up again
2420 instead of returning. That is why there is a loop in this function.
2421 When this function actually returns it means the inferior
2422 should be left stopped and GDB should read more commands. */
2423
2424 void
2425 wait_for_inferior (int treat_exec_as_sigtrap)
2426 {
2427 struct cleanup *old_cleanups;
2428 struct execution_control_state ecss;
2429 struct execution_control_state *ecs;
2430
2431 if (debug_infrun)
2432 fprintf_unfiltered
2433 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2434 treat_exec_as_sigtrap);
2435
2436 old_cleanups =
2437 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2438
2439 ecs = &ecss;
2440 memset (ecs, 0, sizeof (*ecs));
2441
2442 /* We'll update this if & when we switch to a new thread. */
2443 previous_inferior_ptid = inferior_ptid;
2444
2445 while (1)
2446 {
2447 struct cleanup *old_chain;
2448
2449 /* We have to invalidate the registers BEFORE calling target_wait
2450 because they can be loaded from the target while in target_wait.
2451 This makes remote debugging a bit more efficient for those
2452 targets that provide critical registers as part of their normal
2453 status mechanism. */
2454
2455 overlay_cache_invalid = 1;
2456 registers_changed ();
2457
2458 if (deprecated_target_wait_hook)
2459 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2460 else
2461 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2462
2463 if (debug_infrun)
2464 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2465
2466 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2467 {
2468 xfree (ecs->ws.value.execd_pathname);
2469 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2470 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2471 }
2472
2473 /* If an error happens while handling the event, propagate GDB's
2474 knowledge of the executing state to the frontend/user running
2475 state. */
2476 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2477
2478 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2479 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2480 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2481
2482 /* Now figure out what to do with the result of the result. */
2483 handle_inferior_event (ecs);
2484
2485 /* No error, don't finish the state yet. */
2486 discard_cleanups (old_chain);
2487
2488 if (!ecs->wait_some_more)
2489 break;
2490 }
2491
2492 do_cleanups (old_cleanups);
2493 }
2494
2495 /* Asynchronous version of wait_for_inferior. It is called by the
2496 event loop whenever a change of state is detected on the file
2497 descriptor corresponding to the target. It can be called more than
2498 once to complete a single execution command. In such cases we need
2499 to keep the state in a global variable ECSS. If it is the last time
2500 that this function is called for a single execution command, then
2501 report to the user that the inferior has stopped, and do the
2502 necessary cleanups. */
2503
2504 void
2505 fetch_inferior_event (void *client_data)
2506 {
2507 struct execution_control_state ecss;
2508 struct execution_control_state *ecs = &ecss;
2509 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2510 struct cleanup *ts_old_chain;
2511 int was_sync = sync_execution;
2512
2513 memset (ecs, 0, sizeof (*ecs));
2514
2515 /* We'll update this if & when we switch to a new thread. */
2516 previous_inferior_ptid = inferior_ptid;
2517
2518 if (non_stop)
2519 /* In non-stop mode, the user/frontend should not notice a thread
2520 switch due to internal events. Make sure we reverse to the
2521 user selected thread and frame after handling the event and
2522 running any breakpoint commands. */
2523 make_cleanup_restore_current_thread ();
2524
2525 /* We have to invalidate the registers BEFORE calling target_wait
2526 because they can be loaded from the target while in target_wait.
2527 This makes remote debugging a bit more efficient for those
2528 targets that provide critical registers as part of their normal
2529 status mechanism. */
2530
2531 overlay_cache_invalid = 1;
2532 registers_changed ();
2533
2534 if (deprecated_target_wait_hook)
2535 ecs->ptid =
2536 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2537 else
2538 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2539
2540 if (debug_infrun)
2541 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2542
2543 if (non_stop
2544 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2545 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2546 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2547 /* In non-stop mode, each thread is handled individually. Switch
2548 early, so the global state is set correctly for this
2549 thread. */
2550 context_switch (ecs->ptid);
2551
2552 /* If an error happens while handling the event, propagate GDB's
2553 knowledge of the executing state to the frontend/user running
2554 state. */
2555 if (!non_stop)
2556 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2557 else
2558 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2559
2560 /* Now figure out what to do with the result of the result. */
2561 handle_inferior_event (ecs);
2562
2563 if (!ecs->wait_some_more)
2564 {
2565 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2566
2567 delete_step_thread_step_resume_breakpoint ();
2568
2569 /* We may not find an inferior if this was a process exit. */
2570 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2571 normal_stop ();
2572
2573 if (target_has_execution
2574 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2575 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2576 && ecs->event_thread->step_multi
2577 && ecs->event_thread->stop_step)
2578 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2579 else
2580 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2581 }
2582
2583 /* No error, don't finish the thread states yet. */
2584 discard_cleanups (ts_old_chain);
2585
2586 /* Revert thread and frame. */
2587 do_cleanups (old_chain);
2588
2589 /* If the inferior was in sync execution mode, and now isn't,
2590 restore the prompt. */
2591 if (was_sync && !sync_execution)
2592 display_gdb_prompt (0);
2593 }
2594
2595 /* Record the frame and location we're currently stepping through. */
2596 void
2597 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2598 {
2599 struct thread_info *tp = inferior_thread ();
2600
2601 tp->step_frame_id = get_frame_id (frame);
2602 tp->step_stack_frame_id = get_stack_frame_id (frame);
2603
2604 tp->current_symtab = sal.symtab;
2605 tp->current_line = sal.line;
2606 }
2607
2608 /* Clear context switchable stepping state. */
2609
2610 void
2611 init_thread_stepping_state (struct thread_info *tss)
2612 {
2613 tss->stepping_over_breakpoint = 0;
2614 tss->step_after_step_resume_breakpoint = 0;
2615 tss->stepping_through_solib_after_catch = 0;
2616 tss->stepping_through_solib_catchpoints = NULL;
2617 }
2618
2619 /* Return the cached copy of the last pid/waitstatus returned by
2620 target_wait()/deprecated_target_wait_hook(). The data is actually
2621 cached by handle_inferior_event(), which gets called immediately
2622 after target_wait()/deprecated_target_wait_hook(). */
2623
2624 void
2625 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2626 {
2627 *ptidp = target_last_wait_ptid;
2628 *status = target_last_waitstatus;
2629 }
2630
2631 void
2632 nullify_last_target_wait_ptid (void)
2633 {
2634 target_last_wait_ptid = minus_one_ptid;
2635 }
2636
2637 /* Switch thread contexts. */
2638
2639 static void
2640 context_switch (ptid_t ptid)
2641 {
2642 if (debug_infrun)
2643 {
2644 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2645 target_pid_to_str (inferior_ptid));
2646 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2647 target_pid_to_str (ptid));
2648 }
2649
2650 switch_to_thread (ptid);
2651 }
2652
2653 static void
2654 adjust_pc_after_break (struct execution_control_state *ecs)
2655 {
2656 struct regcache *regcache;
2657 struct gdbarch *gdbarch;
2658 struct address_space *aspace;
2659 CORE_ADDR breakpoint_pc;
2660
2661 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2662 we aren't, just return.
2663
2664 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2665 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2666 implemented by software breakpoints should be handled through the normal
2667 breakpoint layer.
2668
2669 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2670 different signals (SIGILL or SIGEMT for instance), but it is less
2671 clear where the PC is pointing afterwards. It may not match
2672 gdbarch_decr_pc_after_break. I don't know any specific target that
2673 generates these signals at breakpoints (the code has been in GDB since at
2674 least 1992) so I can not guess how to handle them here.
2675
2676 In earlier versions of GDB, a target with
2677 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2678 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2679 target with both of these set in GDB history, and it seems unlikely to be
2680 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2681
2682 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2683 return;
2684
2685 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2686 return;
2687
2688 /* In reverse execution, when a breakpoint is hit, the instruction
2689 under it has already been de-executed. The reported PC always
2690 points at the breakpoint address, so adjusting it further would
2691 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2692 architecture:
2693
2694 B1 0x08000000 : INSN1
2695 B2 0x08000001 : INSN2
2696 0x08000002 : INSN3
2697 PC -> 0x08000003 : INSN4
2698
2699 Say you're stopped at 0x08000003 as above. Reverse continuing
2700 from that point should hit B2 as below. Reading the PC when the
2701 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2702 been de-executed already.
2703
2704 B1 0x08000000 : INSN1
2705 B2 PC -> 0x08000001 : INSN2
2706 0x08000002 : INSN3
2707 0x08000003 : INSN4
2708
2709 We can't apply the same logic as for forward execution, because
2710 we would wrongly adjust the PC to 0x08000000, since there's a
2711 breakpoint at PC - 1. We'd then report a hit on B1, although
2712 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2713 behaviour. */
2714 if (execution_direction == EXEC_REVERSE)
2715 return;
2716
2717 /* If this target does not decrement the PC after breakpoints, then
2718 we have nothing to do. */
2719 regcache = get_thread_regcache (ecs->ptid);
2720 gdbarch = get_regcache_arch (regcache);
2721 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2722 return;
2723
2724 aspace = get_regcache_aspace (regcache);
2725
2726 /* Find the location where (if we've hit a breakpoint) the
2727 breakpoint would be. */
2728 breakpoint_pc = regcache_read_pc (regcache)
2729 - gdbarch_decr_pc_after_break (gdbarch);
2730
2731 /* Check whether there actually is a software breakpoint inserted at
2732 that location.
2733
2734 If in non-stop mode, a race condition is possible where we've
2735 removed a breakpoint, but stop events for that breakpoint were
2736 already queued and arrive later. To suppress those spurious
2737 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2738 and retire them after a number of stop events are reported. */
2739 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2740 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2741 {
2742 struct cleanup *old_cleanups = NULL;
2743 if (RECORD_IS_USED)
2744 old_cleanups = record_gdb_operation_disable_set ();
2745
2746 /* When using hardware single-step, a SIGTRAP is reported for both
2747 a completed single-step and a software breakpoint. Need to
2748 differentiate between the two, as the latter needs adjusting
2749 but the former does not.
2750
2751 The SIGTRAP can be due to a completed hardware single-step only if
2752 - we didn't insert software single-step breakpoints
2753 - the thread to be examined is still the current thread
2754 - this thread is currently being stepped
2755
2756 If any of these events did not occur, we must have stopped due
2757 to hitting a software breakpoint, and have to back up to the
2758 breakpoint address.
2759
2760 As a special case, we could have hardware single-stepped a
2761 software breakpoint. In this case (prev_pc == breakpoint_pc),
2762 we also need to back up to the breakpoint address. */
2763
2764 if (singlestep_breakpoints_inserted_p
2765 || !ptid_equal (ecs->ptid, inferior_ptid)
2766 || !currently_stepping (ecs->event_thread)
2767 || ecs->event_thread->prev_pc == breakpoint_pc)
2768 regcache_write_pc (regcache, breakpoint_pc);
2769
2770 if (RECORD_IS_USED)
2771 do_cleanups (old_cleanups);
2772 }
2773 }
2774
2775 void
2776 init_infwait_state (void)
2777 {
2778 waiton_ptid = pid_to_ptid (-1);
2779 infwait_state = infwait_normal_state;
2780 }
2781
2782 void
2783 error_is_running (void)
2784 {
2785 error (_("\
2786 Cannot execute this command while the selected thread is running."));
2787 }
2788
2789 void
2790 ensure_not_running (void)
2791 {
2792 if (is_running (inferior_ptid))
2793 error_is_running ();
2794 }
2795
2796 static int
2797 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2798 {
2799 for (frame = get_prev_frame (frame);
2800 frame != NULL;
2801 frame = get_prev_frame (frame))
2802 {
2803 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2804 return 1;
2805 if (get_frame_type (frame) != INLINE_FRAME)
2806 break;
2807 }
2808
2809 return 0;
2810 }
2811
2812 /* Auxiliary function that handles syscall entry/return events.
2813 It returns 1 if the inferior should keep going (and GDB
2814 should ignore the event), or 0 if the event deserves to be
2815 processed. */
2816
2817 static int
2818 handle_syscall_event (struct execution_control_state *ecs)
2819 {
2820 struct regcache *regcache;
2821 struct gdbarch *gdbarch;
2822 int syscall_number;
2823
2824 if (!ptid_equal (ecs->ptid, inferior_ptid))
2825 context_switch (ecs->ptid);
2826
2827 regcache = get_thread_regcache (ecs->ptid);
2828 gdbarch = get_regcache_arch (regcache);
2829 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2830 stop_pc = regcache_read_pc (regcache);
2831
2832 target_last_waitstatus.value.syscall_number = syscall_number;
2833
2834 if (catch_syscall_enabled () > 0
2835 && catching_syscall_number (syscall_number) > 0)
2836 {
2837 if (debug_infrun)
2838 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2839 syscall_number);
2840
2841 ecs->event_thread->stop_bpstat
2842 = bpstat_stop_status (get_regcache_aspace (regcache),
2843 stop_pc, ecs->ptid);
2844 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2845
2846 if (!ecs->random_signal)
2847 {
2848 /* Catchpoint hit. */
2849 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2850 return 0;
2851 }
2852 }
2853
2854 /* If no catchpoint triggered for this, then keep going. */
2855 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2856 keep_going (ecs);
2857 return 1;
2858 }
2859
2860 /* Given an execution control state that has been freshly filled in
2861 by an event from the inferior, figure out what it means and take
2862 appropriate action. */
2863
2864 static void
2865 handle_inferior_event (struct execution_control_state *ecs)
2866 {
2867 struct frame_info *frame;
2868 struct gdbarch *gdbarch;
2869 int sw_single_step_trap_p = 0;
2870 int stopped_by_watchpoint;
2871 int stepped_after_stopped_by_watchpoint = 0;
2872 struct symtab_and_line stop_pc_sal;
2873 enum stop_kind stop_soon;
2874
2875 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2876 {
2877 /* We had an event in the inferior, but we are not interested in
2878 handling it at this level. The lower layers have already
2879 done what needs to be done, if anything.
2880
2881 One of the possible circumstances for this is when the
2882 inferior produces output for the console. The inferior has
2883 not stopped, and we are ignoring the event. Another possible
2884 circumstance is any event which the lower level knows will be
2885 reported multiple times without an intervening resume. */
2886 if (debug_infrun)
2887 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2888 prepare_to_wait (ecs);
2889 return;
2890 }
2891
2892 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2893 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2894 {
2895 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2896 gdb_assert (inf);
2897 stop_soon = inf->stop_soon;
2898 }
2899 else
2900 stop_soon = NO_STOP_QUIETLY;
2901
2902 /* Cache the last pid/waitstatus. */
2903 target_last_wait_ptid = ecs->ptid;
2904 target_last_waitstatus = ecs->ws;
2905
2906 /* Always clear state belonging to the previous time we stopped. */
2907 stop_stack_dummy = 0;
2908
2909 /* If it's a new process, add it to the thread database */
2910
2911 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2912 && !ptid_equal (ecs->ptid, minus_one_ptid)
2913 && !in_thread_list (ecs->ptid));
2914
2915 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2916 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2917 add_thread (ecs->ptid);
2918
2919 ecs->event_thread = find_thread_ptid (ecs->ptid);
2920
2921 /* Dependent on valid ECS->EVENT_THREAD. */
2922 adjust_pc_after_break (ecs);
2923
2924 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2925 reinit_frame_cache ();
2926
2927 breakpoint_retire_moribund ();
2928
2929 /* First, distinguish signals caused by the debugger from signals
2930 that have to do with the program's own actions. Note that
2931 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2932 on the operating system version. Here we detect when a SIGILL or
2933 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2934 something similar for SIGSEGV, since a SIGSEGV will be generated
2935 when we're trying to execute a breakpoint instruction on a
2936 non-executable stack. This happens for call dummy breakpoints
2937 for architectures like SPARC that place call dummies on the
2938 stack. */
2939 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2940 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2941 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2942 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2943 {
2944 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2945
2946 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2947 regcache_read_pc (regcache)))
2948 {
2949 if (debug_infrun)
2950 fprintf_unfiltered (gdb_stdlog,
2951 "infrun: Treating signal as SIGTRAP\n");
2952 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2953 }
2954 }
2955
2956 /* Mark the non-executing threads accordingly. In all-stop, all
2957 threads of all processes are stopped when we get any event
2958 reported. In non-stop mode, only the event thread stops. If
2959 we're handling a process exit in non-stop mode, there's nothing
2960 to do, as threads of the dead process are gone, and threads of
2961 any other process were left running. */
2962 if (!non_stop)
2963 set_executing (minus_one_ptid, 0);
2964 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2965 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2966 set_executing (inferior_ptid, 0);
2967
2968 switch (infwait_state)
2969 {
2970 case infwait_thread_hop_state:
2971 if (debug_infrun)
2972 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2973 break;
2974
2975 case infwait_normal_state:
2976 if (debug_infrun)
2977 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2978 break;
2979
2980 case infwait_step_watch_state:
2981 if (debug_infrun)
2982 fprintf_unfiltered (gdb_stdlog,
2983 "infrun: infwait_step_watch_state\n");
2984
2985 stepped_after_stopped_by_watchpoint = 1;
2986 break;
2987
2988 case infwait_nonstep_watch_state:
2989 if (debug_infrun)
2990 fprintf_unfiltered (gdb_stdlog,
2991 "infrun: infwait_nonstep_watch_state\n");
2992 insert_breakpoints ();
2993
2994 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2995 handle things like signals arriving and other things happening
2996 in combination correctly? */
2997 stepped_after_stopped_by_watchpoint = 1;
2998 break;
2999
3000 default:
3001 internal_error (__FILE__, __LINE__, _("bad switch"));
3002 }
3003
3004 infwait_state = infwait_normal_state;
3005 waiton_ptid = pid_to_ptid (-1);
3006
3007 switch (ecs->ws.kind)
3008 {
3009 case TARGET_WAITKIND_LOADED:
3010 if (debug_infrun)
3011 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3012 /* Ignore gracefully during startup of the inferior, as it might
3013 be the shell which has just loaded some objects, otherwise
3014 add the symbols for the newly loaded objects. Also ignore at
3015 the beginning of an attach or remote session; we will query
3016 the full list of libraries once the connection is
3017 established. */
3018 if (stop_soon == NO_STOP_QUIETLY)
3019 {
3020 /* Check for any newly added shared libraries if we're
3021 supposed to be adding them automatically. Switch
3022 terminal for any messages produced by
3023 breakpoint_re_set. */
3024 target_terminal_ours_for_output ();
3025 /* NOTE: cagney/2003-11-25: Make certain that the target
3026 stack's section table is kept up-to-date. Architectures,
3027 (e.g., PPC64), use the section table to perform
3028 operations such as address => section name and hence
3029 require the table to contain all sections (including
3030 those found in shared libraries). */
3031 #ifdef SOLIB_ADD
3032 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3033 #else
3034 solib_add (NULL, 0, &current_target, auto_solib_add);
3035 #endif
3036 target_terminal_inferior ();
3037
3038 /* If requested, stop when the dynamic linker notifies
3039 gdb of events. This allows the user to get control
3040 and place breakpoints in initializer routines for
3041 dynamically loaded objects (among other things). */
3042 if (stop_on_solib_events)
3043 {
3044 /* Make sure we print "Stopped due to solib-event" in
3045 normal_stop. */
3046 stop_print_frame = 1;
3047
3048 stop_stepping (ecs);
3049 return;
3050 }
3051
3052 /* NOTE drow/2007-05-11: This might be a good place to check
3053 for "catch load". */
3054 }
3055
3056 /* If we are skipping through a shell, or through shared library
3057 loading that we aren't interested in, resume the program. If
3058 we're running the program normally, also resume. But stop if
3059 we're attaching or setting up a remote connection. */
3060 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3061 {
3062 /* Loading of shared libraries might have changed breakpoint
3063 addresses. Make sure new breakpoints are inserted. */
3064 if (stop_soon == NO_STOP_QUIETLY
3065 && !breakpoints_always_inserted_mode ())
3066 insert_breakpoints ();
3067 resume (0, TARGET_SIGNAL_0);
3068 prepare_to_wait (ecs);
3069 return;
3070 }
3071
3072 break;
3073
3074 case TARGET_WAITKIND_SPURIOUS:
3075 if (debug_infrun)
3076 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3077 resume (0, TARGET_SIGNAL_0);
3078 prepare_to_wait (ecs);
3079 return;
3080
3081 case TARGET_WAITKIND_EXITED:
3082 if (debug_infrun)
3083 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3084 inferior_ptid = ecs->ptid;
3085 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3086 set_current_program_space (current_inferior ()->pspace);
3087 handle_vfork_child_exec_or_exit (0);
3088 target_terminal_ours (); /* Must do this before mourn anyway */
3089 print_stop_reason (EXITED, ecs->ws.value.integer);
3090
3091 /* Record the exit code in the convenience variable $_exitcode, so
3092 that the user can inspect this again later. */
3093 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3094 (LONGEST) ecs->ws.value.integer);
3095 gdb_flush (gdb_stdout);
3096 target_mourn_inferior ();
3097 singlestep_breakpoints_inserted_p = 0;
3098 stop_print_frame = 0;
3099 stop_stepping (ecs);
3100 return;
3101
3102 case TARGET_WAITKIND_SIGNALLED:
3103 if (debug_infrun)
3104 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3105 inferior_ptid = ecs->ptid;
3106 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3107 set_current_program_space (current_inferior ()->pspace);
3108 handle_vfork_child_exec_or_exit (0);
3109 stop_print_frame = 0;
3110 target_terminal_ours (); /* Must do this before mourn anyway */
3111
3112 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3113 reach here unless the inferior is dead. However, for years
3114 target_kill() was called here, which hints that fatal signals aren't
3115 really fatal on some systems. If that's true, then some changes
3116 may be needed. */
3117 target_mourn_inferior ();
3118
3119 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3120 singlestep_breakpoints_inserted_p = 0;
3121 stop_stepping (ecs);
3122 return;
3123
3124 /* The following are the only cases in which we keep going;
3125 the above cases end in a continue or goto. */
3126 case TARGET_WAITKIND_FORKED:
3127 case TARGET_WAITKIND_VFORKED:
3128 if (debug_infrun)
3129 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3130
3131 if (!ptid_equal (ecs->ptid, inferior_ptid))
3132 {
3133 context_switch (ecs->ptid);
3134 reinit_frame_cache ();
3135 }
3136
3137 /* Immediately detach breakpoints from the child before there's
3138 any chance of letting the user delete breakpoints from the
3139 breakpoint lists. If we don't do this early, it's easy to
3140 leave left over traps in the child, vis: "break foo; catch
3141 fork; c; <fork>; del; c; <child calls foo>". We only follow
3142 the fork on the last `continue', and by that time the
3143 breakpoint at "foo" is long gone from the breakpoint table.
3144 If we vforked, then we don't need to unpatch here, since both
3145 parent and child are sharing the same memory pages; we'll
3146 need to unpatch at follow/detach time instead to be certain
3147 that new breakpoints added between catchpoint hit time and
3148 vfork follow are detached. */
3149 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3150 {
3151 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3152
3153 /* This won't actually modify the breakpoint list, but will
3154 physically remove the breakpoints from the child. */
3155 detach_breakpoints (child_pid);
3156 }
3157
3158 /* In case the event is caught by a catchpoint, remember that
3159 the event is to be followed at the next resume of the thread,
3160 and not immediately. */
3161 ecs->event_thread->pending_follow = ecs->ws;
3162
3163 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3164
3165 ecs->event_thread->stop_bpstat
3166 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3167 stop_pc, ecs->ptid);
3168
3169 /* Note that we're interested in knowing the bpstat actually
3170 causes a stop, not just if it may explain the signal.
3171 Software watchpoints, for example, always appear in the
3172 bpstat. */
3173 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3174
3175 /* If no catchpoint triggered for this, then keep going. */
3176 if (ecs->random_signal)
3177 {
3178 ptid_t parent;
3179 ptid_t child;
3180 int should_resume;
3181 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3182
3183 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3184
3185 should_resume = follow_fork ();
3186
3187 parent = ecs->ptid;
3188 child = ecs->ws.value.related_pid;
3189
3190 /* In non-stop mode, also resume the other branch. */
3191 if (non_stop && !detach_fork)
3192 {
3193 if (follow_child)
3194 switch_to_thread (parent);
3195 else
3196 switch_to_thread (child);
3197
3198 ecs->event_thread = inferior_thread ();
3199 ecs->ptid = inferior_ptid;
3200 keep_going (ecs);
3201 }
3202
3203 if (follow_child)
3204 switch_to_thread (child);
3205 else
3206 switch_to_thread (parent);
3207
3208 ecs->event_thread = inferior_thread ();
3209 ecs->ptid = inferior_ptid;
3210
3211 if (should_resume)
3212 keep_going (ecs);
3213 else
3214 stop_stepping (ecs);
3215 return;
3216 }
3217 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3218 goto process_event_stop_test;
3219
3220 case TARGET_WAITKIND_VFORK_DONE:
3221 /* Done with the shared memory region. Re-insert breakpoints in
3222 the parent, and keep going. */
3223
3224 if (debug_infrun)
3225 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3226
3227 if (!ptid_equal (ecs->ptid, inferior_ptid))
3228 context_switch (ecs->ptid);
3229
3230 current_inferior ()->waiting_for_vfork_done = 0;
3231 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3232 /* This also takes care of reinserting breakpoints in the
3233 previously locked inferior. */
3234 keep_going (ecs);
3235 return;
3236
3237 case TARGET_WAITKIND_EXECD:
3238 if (debug_infrun)
3239 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3240
3241 if (!ptid_equal (ecs->ptid, inferior_ptid))
3242 {
3243 context_switch (ecs->ptid);
3244 reinit_frame_cache ();
3245 }
3246
3247 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3248
3249 /* Do whatever is necessary to the parent branch of the vfork. */
3250 handle_vfork_child_exec_or_exit (1);
3251
3252 /* This causes the eventpoints and symbol table to be reset.
3253 Must do this now, before trying to determine whether to
3254 stop. */
3255 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3256
3257 ecs->event_thread->stop_bpstat
3258 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3259 stop_pc, ecs->ptid);
3260 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3261
3262 /* Note that this may be referenced from inside
3263 bpstat_stop_status above, through inferior_has_execd. */
3264 xfree (ecs->ws.value.execd_pathname);
3265 ecs->ws.value.execd_pathname = NULL;
3266
3267 /* If no catchpoint triggered for this, then keep going. */
3268 if (ecs->random_signal)
3269 {
3270 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3271 keep_going (ecs);
3272 return;
3273 }
3274 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3275 goto process_event_stop_test;
3276
3277 /* Be careful not to try to gather much state about a thread
3278 that's in a syscall. It's frequently a losing proposition. */
3279 case TARGET_WAITKIND_SYSCALL_ENTRY:
3280 if (debug_infrun)
3281 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3282 /* Getting the current syscall number */
3283 if (handle_syscall_event (ecs) != 0)
3284 return;
3285 goto process_event_stop_test;
3286
3287 /* Before examining the threads further, step this thread to
3288 get it entirely out of the syscall. (We get notice of the
3289 event when the thread is just on the verge of exiting a
3290 syscall. Stepping one instruction seems to get it back
3291 into user code.) */
3292 case TARGET_WAITKIND_SYSCALL_RETURN:
3293 if (debug_infrun)
3294 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3295 if (handle_syscall_event (ecs) != 0)
3296 return;
3297 goto process_event_stop_test;
3298
3299 case TARGET_WAITKIND_STOPPED:
3300 if (debug_infrun)
3301 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3302 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3303 break;
3304
3305 case TARGET_WAITKIND_NO_HISTORY:
3306 /* Reverse execution: target ran out of history info. */
3307 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3308 print_stop_reason (NO_HISTORY, 0);
3309 stop_stepping (ecs);
3310 return;
3311 }
3312
3313 if (ecs->new_thread_event)
3314 {
3315 if (non_stop)
3316 /* Non-stop assumes that the target handles adding new threads
3317 to the thread list. */
3318 internal_error (__FILE__, __LINE__, "\
3319 targets should add new threads to the thread list themselves in non-stop mode.");
3320
3321 /* We may want to consider not doing a resume here in order to
3322 give the user a chance to play with the new thread. It might
3323 be good to make that a user-settable option. */
3324
3325 /* At this point, all threads are stopped (happens automatically
3326 in either the OS or the native code). Therefore we need to
3327 continue all threads in order to make progress. */
3328
3329 if (!ptid_equal (ecs->ptid, inferior_ptid))
3330 context_switch (ecs->ptid);
3331 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3332 prepare_to_wait (ecs);
3333 return;
3334 }
3335
3336 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3337 {
3338 /* Do we need to clean up the state of a thread that has
3339 completed a displaced single-step? (Doing so usually affects
3340 the PC, so do it here, before we set stop_pc.) */
3341 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3342
3343 /* If we either finished a single-step or hit a breakpoint, but
3344 the user wanted this thread to be stopped, pretend we got a
3345 SIG0 (generic unsignaled stop). */
3346
3347 if (ecs->event_thread->stop_requested
3348 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3349 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3350 }
3351
3352 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3353
3354 if (debug_infrun)
3355 {
3356 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3357 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3358 struct cleanup *old_chain = save_inferior_ptid ();
3359
3360 inferior_ptid = ecs->ptid;
3361
3362 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3363 paddress (gdbarch, stop_pc));
3364 if (target_stopped_by_watchpoint ())
3365 {
3366 CORE_ADDR addr;
3367 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3368
3369 if (target_stopped_data_address (&current_target, &addr))
3370 fprintf_unfiltered (gdb_stdlog,
3371 "infrun: stopped data address = %s\n",
3372 paddress (gdbarch, addr));
3373 else
3374 fprintf_unfiltered (gdb_stdlog,
3375 "infrun: (no data address available)\n");
3376 }
3377
3378 do_cleanups (old_chain);
3379 }
3380
3381 if (stepping_past_singlestep_breakpoint)
3382 {
3383 gdb_assert (singlestep_breakpoints_inserted_p);
3384 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3385 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3386
3387 stepping_past_singlestep_breakpoint = 0;
3388
3389 /* We've either finished single-stepping past the single-step
3390 breakpoint, or stopped for some other reason. It would be nice if
3391 we could tell, but we can't reliably. */
3392 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3393 {
3394 if (debug_infrun)
3395 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3396 /* Pull the single step breakpoints out of the target. */
3397 remove_single_step_breakpoints ();
3398 singlestep_breakpoints_inserted_p = 0;
3399
3400 ecs->random_signal = 0;
3401 ecs->event_thread->trap_expected = 0;
3402
3403 context_switch (saved_singlestep_ptid);
3404 if (deprecated_context_hook)
3405 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3406
3407 resume (1, TARGET_SIGNAL_0);
3408 prepare_to_wait (ecs);
3409 return;
3410 }
3411 }
3412
3413 if (!ptid_equal (deferred_step_ptid, null_ptid))
3414 {
3415 /* In non-stop mode, there's never a deferred_step_ptid set. */
3416 gdb_assert (!non_stop);
3417
3418 /* If we stopped for some other reason than single-stepping, ignore
3419 the fact that we were supposed to switch back. */
3420 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3421 {
3422 if (debug_infrun)
3423 fprintf_unfiltered (gdb_stdlog,
3424 "infrun: handling deferred step\n");
3425
3426 /* Pull the single step breakpoints out of the target. */
3427 if (singlestep_breakpoints_inserted_p)
3428 {
3429 remove_single_step_breakpoints ();
3430 singlestep_breakpoints_inserted_p = 0;
3431 }
3432
3433 /* Note: We do not call context_switch at this point, as the
3434 context is already set up for stepping the original thread. */
3435 switch_to_thread (deferred_step_ptid);
3436 deferred_step_ptid = null_ptid;
3437 /* Suppress spurious "Switching to ..." message. */
3438 previous_inferior_ptid = inferior_ptid;
3439
3440 resume (1, TARGET_SIGNAL_0);
3441 prepare_to_wait (ecs);
3442 return;
3443 }
3444
3445 deferred_step_ptid = null_ptid;
3446 }
3447
3448 /* See if a thread hit a thread-specific breakpoint that was meant for
3449 another thread. If so, then step that thread past the breakpoint,
3450 and continue it. */
3451
3452 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3453 {
3454 int thread_hop_needed = 0;
3455 struct address_space *aspace =
3456 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3457
3458 /* Check if a regular breakpoint has been hit before checking
3459 for a potential single step breakpoint. Otherwise, GDB will
3460 not see this breakpoint hit when stepping onto breakpoints. */
3461 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3462 {
3463 ecs->random_signal = 0;
3464 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3465 thread_hop_needed = 1;
3466 }
3467 else if (singlestep_breakpoints_inserted_p)
3468 {
3469 /* We have not context switched yet, so this should be true
3470 no matter which thread hit the singlestep breakpoint. */
3471 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3472 if (debug_infrun)
3473 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3474 "trap for %s\n",
3475 target_pid_to_str (ecs->ptid));
3476
3477 ecs->random_signal = 0;
3478 /* The call to in_thread_list is necessary because PTIDs sometimes
3479 change when we go from single-threaded to multi-threaded. If
3480 the singlestep_ptid is still in the list, assume that it is
3481 really different from ecs->ptid. */
3482 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3483 && in_thread_list (singlestep_ptid))
3484 {
3485 /* If the PC of the thread we were trying to single-step
3486 has changed, discard this event (which we were going
3487 to ignore anyway), and pretend we saw that thread
3488 trap. This prevents us continuously moving the
3489 single-step breakpoint forward, one instruction at a
3490 time. If the PC has changed, then the thread we were
3491 trying to single-step has trapped or been signalled,
3492 but the event has not been reported to GDB yet.
3493
3494 There might be some cases where this loses signal
3495 information, if a signal has arrived at exactly the
3496 same time that the PC changed, but this is the best
3497 we can do with the information available. Perhaps we
3498 should arrange to report all events for all threads
3499 when they stop, or to re-poll the remote looking for
3500 this particular thread (i.e. temporarily enable
3501 schedlock). */
3502
3503 CORE_ADDR new_singlestep_pc
3504 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3505
3506 if (new_singlestep_pc != singlestep_pc)
3507 {
3508 enum target_signal stop_signal;
3509
3510 if (debug_infrun)
3511 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3512 " but expected thread advanced also\n");
3513
3514 /* The current context still belongs to
3515 singlestep_ptid. Don't swap here, since that's
3516 the context we want to use. Just fudge our
3517 state and continue. */
3518 stop_signal = ecs->event_thread->stop_signal;
3519 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3520 ecs->ptid = singlestep_ptid;
3521 ecs->event_thread = find_thread_ptid (ecs->ptid);
3522 ecs->event_thread->stop_signal = stop_signal;
3523 stop_pc = new_singlestep_pc;
3524 }
3525 else
3526 {
3527 if (debug_infrun)
3528 fprintf_unfiltered (gdb_stdlog,
3529 "infrun: unexpected thread\n");
3530
3531 thread_hop_needed = 1;
3532 stepping_past_singlestep_breakpoint = 1;
3533 saved_singlestep_ptid = singlestep_ptid;
3534 }
3535 }
3536 }
3537
3538 if (thread_hop_needed)
3539 {
3540 struct regcache *thread_regcache;
3541 int remove_status = 0;
3542
3543 if (debug_infrun)
3544 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3545
3546 /* Switch context before touching inferior memory, the
3547 previous thread may have exited. */
3548 if (!ptid_equal (inferior_ptid, ecs->ptid))
3549 context_switch (ecs->ptid);
3550
3551 /* Saw a breakpoint, but it was hit by the wrong thread.
3552 Just continue. */
3553
3554 if (singlestep_breakpoints_inserted_p)
3555 {
3556 /* Pull the single step breakpoints out of the target. */
3557 remove_single_step_breakpoints ();
3558 singlestep_breakpoints_inserted_p = 0;
3559 }
3560
3561 /* If the arch can displace step, don't remove the
3562 breakpoints. */
3563 thread_regcache = get_thread_regcache (ecs->ptid);
3564 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3565 remove_status = remove_breakpoints ();
3566
3567 /* Did we fail to remove breakpoints? If so, try
3568 to set the PC past the bp. (There's at least
3569 one situation in which we can fail to remove
3570 the bp's: On HP-UX's that use ttrace, we can't
3571 change the address space of a vforking child
3572 process until the child exits (well, okay, not
3573 then either :-) or execs. */
3574 if (remove_status != 0)
3575 error (_("Cannot step over breakpoint hit in wrong thread"));
3576 else
3577 { /* Single step */
3578 if (!non_stop)
3579 {
3580 /* Only need to require the next event from this
3581 thread in all-stop mode. */
3582 waiton_ptid = ecs->ptid;
3583 infwait_state = infwait_thread_hop_state;
3584 }
3585
3586 ecs->event_thread->stepping_over_breakpoint = 1;
3587 keep_going (ecs);
3588 return;
3589 }
3590 }
3591 else if (singlestep_breakpoints_inserted_p)
3592 {
3593 sw_single_step_trap_p = 1;
3594 ecs->random_signal = 0;
3595 }
3596 }
3597 else
3598 ecs->random_signal = 1;
3599
3600 /* See if something interesting happened to the non-current thread. If
3601 so, then switch to that thread. */
3602 if (!ptid_equal (ecs->ptid, inferior_ptid))
3603 {
3604 if (debug_infrun)
3605 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3606
3607 context_switch (ecs->ptid);
3608
3609 if (deprecated_context_hook)
3610 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3611 }
3612
3613 /* At this point, get hold of the now-current thread's frame. */
3614 frame = get_current_frame ();
3615 gdbarch = get_frame_arch (frame);
3616
3617 if (singlestep_breakpoints_inserted_p)
3618 {
3619 /* Pull the single step breakpoints out of the target. */
3620 remove_single_step_breakpoints ();
3621 singlestep_breakpoints_inserted_p = 0;
3622 }
3623
3624 if (stepped_after_stopped_by_watchpoint)
3625 stopped_by_watchpoint = 0;
3626 else
3627 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3628
3629 /* If necessary, step over this watchpoint. We'll be back to display
3630 it in a moment. */
3631 if (stopped_by_watchpoint
3632 && (target_have_steppable_watchpoint
3633 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3634 {
3635 /* At this point, we are stopped at an instruction which has
3636 attempted to write to a piece of memory under control of
3637 a watchpoint. The instruction hasn't actually executed
3638 yet. If we were to evaluate the watchpoint expression
3639 now, we would get the old value, and therefore no change
3640 would seem to have occurred.
3641
3642 In order to make watchpoints work `right', we really need
3643 to complete the memory write, and then evaluate the
3644 watchpoint expression. We do this by single-stepping the
3645 target.
3646
3647 It may not be necessary to disable the watchpoint to stop over
3648 it. For example, the PA can (with some kernel cooperation)
3649 single step over a watchpoint without disabling the watchpoint.
3650
3651 It is far more common to need to disable a watchpoint to step
3652 the inferior over it. If we have non-steppable watchpoints,
3653 we must disable the current watchpoint; it's simplest to
3654 disable all watchpoints and breakpoints. */
3655 int hw_step = 1;
3656
3657 if (!target_have_steppable_watchpoint)
3658 remove_breakpoints ();
3659 /* Single step */
3660 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3661 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3662 waiton_ptid = ecs->ptid;
3663 if (target_have_steppable_watchpoint)
3664 infwait_state = infwait_step_watch_state;
3665 else
3666 infwait_state = infwait_nonstep_watch_state;
3667 prepare_to_wait (ecs);
3668 return;
3669 }
3670
3671 ecs->stop_func_start = 0;
3672 ecs->stop_func_end = 0;
3673 ecs->stop_func_name = 0;
3674 /* Don't care about return value; stop_func_start and stop_func_name
3675 will both be 0 if it doesn't work. */
3676 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3677 &ecs->stop_func_start, &ecs->stop_func_end);
3678 ecs->stop_func_start
3679 += gdbarch_deprecated_function_start_offset (gdbarch);
3680 ecs->event_thread->stepping_over_breakpoint = 0;
3681 bpstat_clear (&ecs->event_thread->stop_bpstat);
3682 ecs->event_thread->stop_step = 0;
3683 stop_print_frame = 1;
3684 ecs->random_signal = 0;
3685 stopped_by_random_signal = 0;
3686
3687 /* Hide inlined functions starting here, unless we just performed stepi or
3688 nexti. After stepi and nexti, always show the innermost frame (not any
3689 inline function call sites). */
3690 if (ecs->event_thread->step_range_end != 1)
3691 skip_inline_frames (ecs->ptid);
3692
3693 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3694 && ecs->event_thread->trap_expected
3695 && gdbarch_single_step_through_delay_p (gdbarch)
3696 && currently_stepping (ecs->event_thread))
3697 {
3698 /* We're trying to step off a breakpoint. Turns out that we're
3699 also on an instruction that needs to be stepped multiple
3700 times before it's been fully executing. E.g., architectures
3701 with a delay slot. It needs to be stepped twice, once for
3702 the instruction and once for the delay slot. */
3703 int step_through_delay
3704 = gdbarch_single_step_through_delay (gdbarch, frame);
3705 if (debug_infrun && step_through_delay)
3706 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3707 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3708 {
3709 /* The user issued a continue when stopped at a breakpoint.
3710 Set up for another trap and get out of here. */
3711 ecs->event_thread->stepping_over_breakpoint = 1;
3712 keep_going (ecs);
3713 return;
3714 }
3715 else if (step_through_delay)
3716 {
3717 /* The user issued a step when stopped at a breakpoint.
3718 Maybe we should stop, maybe we should not - the delay
3719 slot *might* correspond to a line of source. In any
3720 case, don't decide that here, just set
3721 ecs->stepping_over_breakpoint, making sure we
3722 single-step again before breakpoints are re-inserted. */
3723 ecs->event_thread->stepping_over_breakpoint = 1;
3724 }
3725 }
3726
3727 /* Look at the cause of the stop, and decide what to do.
3728 The alternatives are:
3729 1) stop_stepping and return; to really stop and return to the debugger,
3730 2) keep_going and return to start up again
3731 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3732 3) set ecs->random_signal to 1, and the decision between 1 and 2
3733 will be made according to the signal handling tables. */
3734
3735 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3736 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3737 || stop_soon == STOP_QUIETLY_REMOTE)
3738 {
3739 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3740 {
3741 if (debug_infrun)
3742 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3743 stop_print_frame = 0;
3744 stop_stepping (ecs);
3745 return;
3746 }
3747
3748 /* This is originated from start_remote(), start_inferior() and
3749 shared libraries hook functions. */
3750 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3751 {
3752 if (debug_infrun)
3753 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3754 stop_stepping (ecs);
3755 return;
3756 }
3757
3758 /* This originates from attach_command(). We need to overwrite
3759 the stop_signal here, because some kernels don't ignore a
3760 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3761 See more comments in inferior.h. On the other hand, if we
3762 get a non-SIGSTOP, report it to the user - assume the backend
3763 will handle the SIGSTOP if it should show up later.
3764
3765 Also consider that the attach is complete when we see a
3766 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3767 target extended-remote report it instead of a SIGSTOP
3768 (e.g. gdbserver). We already rely on SIGTRAP being our
3769 signal, so this is no exception.
3770
3771 Also consider that the attach is complete when we see a
3772 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3773 the target to stop all threads of the inferior, in case the
3774 low level attach operation doesn't stop them implicitly. If
3775 they weren't stopped implicitly, then the stub will report a
3776 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3777 other than GDB's request. */
3778 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3779 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3780 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3781 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3782 {
3783 stop_stepping (ecs);
3784 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3785 return;
3786 }
3787
3788 /* See if there is a breakpoint at the current PC. */
3789 ecs->event_thread->stop_bpstat
3790 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3791 stop_pc, ecs->ptid);
3792
3793 /* Following in case break condition called a
3794 function. */
3795 stop_print_frame = 1;
3796
3797 /* This is where we handle "moribund" watchpoints. Unlike
3798 software breakpoints traps, hardware watchpoint traps are
3799 always distinguishable from random traps. If no high-level
3800 watchpoint is associated with the reported stop data address
3801 anymore, then the bpstat does not explain the signal ---
3802 simply make sure to ignore it if `stopped_by_watchpoint' is
3803 set. */
3804
3805 if (debug_infrun
3806 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3807 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3808 && stopped_by_watchpoint)
3809 fprintf_unfiltered (gdb_stdlog, "\
3810 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3811
3812 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3813 at one stage in the past included checks for an inferior
3814 function call's call dummy's return breakpoint. The original
3815 comment, that went with the test, read:
3816
3817 ``End of a stack dummy. Some systems (e.g. Sony news) give
3818 another signal besides SIGTRAP, so check here as well as
3819 above.''
3820
3821 If someone ever tries to get call dummys on a
3822 non-executable stack to work (where the target would stop
3823 with something like a SIGSEGV), then those tests might need
3824 to be re-instated. Given, however, that the tests were only
3825 enabled when momentary breakpoints were not being used, I
3826 suspect that it won't be the case.
3827
3828 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3829 be necessary for call dummies on a non-executable stack on
3830 SPARC. */
3831
3832 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3833 ecs->random_signal
3834 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3835 || stopped_by_watchpoint
3836 || ecs->event_thread->trap_expected
3837 || (ecs->event_thread->step_range_end
3838 && ecs->event_thread->step_resume_breakpoint == NULL));
3839 else
3840 {
3841 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3842 if (!ecs->random_signal)
3843 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3844 }
3845 }
3846
3847 /* When we reach this point, we've pretty much decided
3848 that the reason for stopping must've been a random
3849 (unexpected) signal. */
3850
3851 else
3852 ecs->random_signal = 1;
3853
3854 process_event_stop_test:
3855
3856 /* Re-fetch current thread's frame in case we did a
3857 "goto process_event_stop_test" above. */
3858 frame = get_current_frame ();
3859 gdbarch = get_frame_arch (frame);
3860
3861 /* For the program's own signals, act according to
3862 the signal handling tables. */
3863
3864 if (ecs->random_signal)
3865 {
3866 /* Signal not for debugging purposes. */
3867 int printed = 0;
3868 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3869
3870 if (debug_infrun)
3871 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3872 ecs->event_thread->stop_signal);
3873
3874 stopped_by_random_signal = 1;
3875
3876 if (signal_print[ecs->event_thread->stop_signal])
3877 {
3878 printed = 1;
3879 target_terminal_ours_for_output ();
3880 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3881 }
3882 /* Always stop on signals if we're either just gaining control
3883 of the program, or the user explicitly requested this thread
3884 to remain stopped. */
3885 if (stop_soon != NO_STOP_QUIETLY
3886 || ecs->event_thread->stop_requested
3887 || (!inf->detaching
3888 && signal_stop_state (ecs->event_thread->stop_signal)))
3889 {
3890 stop_stepping (ecs);
3891 return;
3892 }
3893 /* If not going to stop, give terminal back
3894 if we took it away. */
3895 else if (printed)
3896 target_terminal_inferior ();
3897
3898 /* Clear the signal if it should not be passed. */
3899 if (signal_program[ecs->event_thread->stop_signal] == 0)
3900 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3901
3902 if (ecs->event_thread->prev_pc == stop_pc
3903 && ecs->event_thread->trap_expected
3904 && ecs->event_thread->step_resume_breakpoint == NULL)
3905 {
3906 /* We were just starting a new sequence, attempting to
3907 single-step off of a breakpoint and expecting a SIGTRAP.
3908 Instead this signal arrives. This signal will take us out
3909 of the stepping range so GDB needs to remember to, when
3910 the signal handler returns, resume stepping off that
3911 breakpoint. */
3912 /* To simplify things, "continue" is forced to use the same
3913 code paths as single-step - set a breakpoint at the
3914 signal return address and then, once hit, step off that
3915 breakpoint. */
3916 if (debug_infrun)
3917 fprintf_unfiltered (gdb_stdlog,
3918 "infrun: signal arrived while stepping over "
3919 "breakpoint\n");
3920
3921 insert_step_resume_breakpoint_at_frame (frame);
3922 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3923 keep_going (ecs);
3924 return;
3925 }
3926
3927 if (ecs->event_thread->step_range_end != 0
3928 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3929 && (ecs->event_thread->step_range_start <= stop_pc
3930 && stop_pc < ecs->event_thread->step_range_end)
3931 && frame_id_eq (get_stack_frame_id (frame),
3932 ecs->event_thread->step_stack_frame_id)
3933 && ecs->event_thread->step_resume_breakpoint == NULL)
3934 {
3935 /* The inferior is about to take a signal that will take it
3936 out of the single step range. Set a breakpoint at the
3937 current PC (which is presumably where the signal handler
3938 will eventually return) and then allow the inferior to
3939 run free.
3940
3941 Note that this is only needed for a signal delivered
3942 while in the single-step range. Nested signals aren't a
3943 problem as they eventually all return. */
3944 if (debug_infrun)
3945 fprintf_unfiltered (gdb_stdlog,
3946 "infrun: signal may take us out of "
3947 "single-step range\n");
3948
3949 insert_step_resume_breakpoint_at_frame (frame);
3950 keep_going (ecs);
3951 return;
3952 }
3953
3954 /* Note: step_resume_breakpoint may be non-NULL. This occures
3955 when either there's a nested signal, or when there's a
3956 pending signal enabled just as the signal handler returns
3957 (leaving the inferior at the step-resume-breakpoint without
3958 actually executing it). Either way continue until the
3959 breakpoint is really hit. */
3960 keep_going (ecs);
3961 return;
3962 }
3963
3964 /* Handle cases caused by hitting a breakpoint. */
3965 {
3966 CORE_ADDR jmp_buf_pc;
3967 struct bpstat_what what;
3968
3969 what = bpstat_what (ecs->event_thread->stop_bpstat);
3970
3971 if (what.call_dummy)
3972 {
3973 stop_stack_dummy = 1;
3974 }
3975
3976 switch (what.main_action)
3977 {
3978 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3979 /* If we hit the breakpoint at longjmp while stepping, we
3980 install a momentary breakpoint at the target of the
3981 jmp_buf. */
3982
3983 if (debug_infrun)
3984 fprintf_unfiltered (gdb_stdlog,
3985 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3986
3987 ecs->event_thread->stepping_over_breakpoint = 1;
3988
3989 if (!gdbarch_get_longjmp_target_p (gdbarch)
3990 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3991 {
3992 if (debug_infrun)
3993 fprintf_unfiltered (gdb_stdlog, "\
3994 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3995 keep_going (ecs);
3996 return;
3997 }
3998
3999 /* We're going to replace the current step-resume breakpoint
4000 with a longjmp-resume breakpoint. */
4001 delete_step_resume_breakpoint (ecs->event_thread);
4002
4003 /* Insert a breakpoint at resume address. */
4004 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4005
4006 keep_going (ecs);
4007 return;
4008
4009 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4010 if (debug_infrun)
4011 fprintf_unfiltered (gdb_stdlog,
4012 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4013
4014 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4015 delete_step_resume_breakpoint (ecs->event_thread);
4016
4017 ecs->event_thread->stop_step = 1;
4018 print_stop_reason (END_STEPPING_RANGE, 0);
4019 stop_stepping (ecs);
4020 return;
4021
4022 case BPSTAT_WHAT_SINGLE:
4023 if (debug_infrun)
4024 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4025 ecs->event_thread->stepping_over_breakpoint = 1;
4026 /* Still need to check other stuff, at least the case
4027 where we are stepping and step out of the right range. */
4028 break;
4029
4030 case BPSTAT_WHAT_STOP_NOISY:
4031 if (debug_infrun)
4032 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4033 stop_print_frame = 1;
4034
4035 /* We are about to nuke the step_resume_breakpointt via the
4036 cleanup chain, so no need to worry about it here. */
4037
4038 stop_stepping (ecs);
4039 return;
4040
4041 case BPSTAT_WHAT_STOP_SILENT:
4042 if (debug_infrun)
4043 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4044 stop_print_frame = 0;
4045
4046 /* We are about to nuke the step_resume_breakpoin via the
4047 cleanup chain, so no need to worry about it here. */
4048
4049 stop_stepping (ecs);
4050 return;
4051
4052 case BPSTAT_WHAT_STEP_RESUME:
4053 if (debug_infrun)
4054 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4055
4056 delete_step_resume_breakpoint (ecs->event_thread);
4057 if (ecs->event_thread->step_after_step_resume_breakpoint)
4058 {
4059 /* Back when the step-resume breakpoint was inserted, we
4060 were trying to single-step off a breakpoint. Go back
4061 to doing that. */
4062 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4063 ecs->event_thread->stepping_over_breakpoint = 1;
4064 keep_going (ecs);
4065 return;
4066 }
4067 if (stop_pc == ecs->stop_func_start
4068 && execution_direction == EXEC_REVERSE)
4069 {
4070 /* We are stepping over a function call in reverse, and
4071 just hit the step-resume breakpoint at the start
4072 address of the function. Go back to single-stepping,
4073 which should take us back to the function call. */
4074 ecs->event_thread->stepping_over_breakpoint = 1;
4075 keep_going (ecs);
4076 return;
4077 }
4078 break;
4079
4080 case BPSTAT_WHAT_CHECK_SHLIBS:
4081 {
4082 if (debug_infrun)
4083 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4084
4085 /* Check for any newly added shared libraries if we're
4086 supposed to be adding them automatically. Switch
4087 terminal for any messages produced by
4088 breakpoint_re_set. */
4089 target_terminal_ours_for_output ();
4090 /* NOTE: cagney/2003-11-25: Make certain that the target
4091 stack's section table is kept up-to-date. Architectures,
4092 (e.g., PPC64), use the section table to perform
4093 operations such as address => section name and hence
4094 require the table to contain all sections (including
4095 those found in shared libraries). */
4096 #ifdef SOLIB_ADD
4097 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4098 #else
4099 solib_add (NULL, 0, &current_target, auto_solib_add);
4100 #endif
4101 target_terminal_inferior ();
4102
4103 /* If requested, stop when the dynamic linker notifies
4104 gdb of events. This allows the user to get control
4105 and place breakpoints in initializer routines for
4106 dynamically loaded objects (among other things). */
4107 if (stop_on_solib_events || stop_stack_dummy)
4108 {
4109 stop_stepping (ecs);
4110 return;
4111 }
4112 else
4113 {
4114 /* We want to step over this breakpoint, then keep going. */
4115 ecs->event_thread->stepping_over_breakpoint = 1;
4116 break;
4117 }
4118 }
4119 break;
4120
4121 case BPSTAT_WHAT_CHECK_JIT:
4122 if (debug_infrun)
4123 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4124
4125 /* Switch terminal for any messages produced by breakpoint_re_set. */
4126 target_terminal_ours_for_output ();
4127
4128 jit_event_handler (gdbarch);
4129
4130 target_terminal_inferior ();
4131
4132 /* We want to step over this breakpoint, then keep going. */
4133 ecs->event_thread->stepping_over_breakpoint = 1;
4134
4135 break;
4136
4137 case BPSTAT_WHAT_LAST:
4138 /* Not a real code, but listed here to shut up gcc -Wall. */
4139
4140 case BPSTAT_WHAT_KEEP_CHECKING:
4141 break;
4142 }
4143 }
4144
4145 /* We come here if we hit a breakpoint but should not
4146 stop for it. Possibly we also were stepping
4147 and should stop for that. So fall through and
4148 test for stepping. But, if not stepping,
4149 do not stop. */
4150
4151 /* In all-stop mode, if we're currently stepping but have stopped in
4152 some other thread, we need to switch back to the stepped thread. */
4153 if (!non_stop)
4154 {
4155 struct thread_info *tp;
4156 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4157 ecs->event_thread);
4158 if (tp)
4159 {
4160 /* However, if the current thread is blocked on some internal
4161 breakpoint, and we simply need to step over that breakpoint
4162 to get it going again, do that first. */
4163 if ((ecs->event_thread->trap_expected
4164 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4165 || ecs->event_thread->stepping_over_breakpoint)
4166 {
4167 keep_going (ecs);
4168 return;
4169 }
4170
4171 /* If the stepping thread exited, then don't try to switch
4172 back and resume it, which could fail in several different
4173 ways depending on the target. Instead, just keep going.
4174
4175 We can find a stepping dead thread in the thread list in
4176 two cases:
4177
4178 - The target supports thread exit events, and when the
4179 target tries to delete the thread from the thread list,
4180 inferior_ptid pointed at the exiting thread. In such
4181 case, calling delete_thread does not really remove the
4182 thread from the list; instead, the thread is left listed,
4183 with 'exited' state.
4184
4185 - The target's debug interface does not support thread
4186 exit events, and so we have no idea whatsoever if the
4187 previously stepping thread is still alive. For that
4188 reason, we need to synchronously query the target
4189 now. */
4190 if (is_exited (tp->ptid)
4191 || !target_thread_alive (tp->ptid))
4192 {
4193 if (debug_infrun)
4194 fprintf_unfiltered (gdb_stdlog, "\
4195 infrun: not switching back to stepped thread, it has vanished\n");
4196
4197 delete_thread (tp->ptid);
4198 keep_going (ecs);
4199 return;
4200 }
4201
4202 /* Otherwise, we no longer expect a trap in the current thread.
4203 Clear the trap_expected flag before switching back -- this is
4204 what keep_going would do as well, if we called it. */
4205 ecs->event_thread->trap_expected = 0;
4206
4207 if (debug_infrun)
4208 fprintf_unfiltered (gdb_stdlog,
4209 "infrun: switching back to stepped thread\n");
4210
4211 ecs->event_thread = tp;
4212 ecs->ptid = tp->ptid;
4213 context_switch (ecs->ptid);
4214 keep_going (ecs);
4215 return;
4216 }
4217 }
4218
4219 /* Are we stepping to get the inferior out of the dynamic linker's
4220 hook (and possibly the dld itself) after catching a shlib
4221 event? */
4222 if (ecs->event_thread->stepping_through_solib_after_catch)
4223 {
4224 #if defined(SOLIB_ADD)
4225 /* Have we reached our destination? If not, keep going. */
4226 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4227 {
4228 if (debug_infrun)
4229 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4230 ecs->event_thread->stepping_over_breakpoint = 1;
4231 keep_going (ecs);
4232 return;
4233 }
4234 #endif
4235 if (debug_infrun)
4236 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4237 /* Else, stop and report the catchpoint(s) whose triggering
4238 caused us to begin stepping. */
4239 ecs->event_thread->stepping_through_solib_after_catch = 0;
4240 bpstat_clear (&ecs->event_thread->stop_bpstat);
4241 ecs->event_thread->stop_bpstat
4242 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4243 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4244 stop_print_frame = 1;
4245 stop_stepping (ecs);
4246 return;
4247 }
4248
4249 if (ecs->event_thread->step_resume_breakpoint)
4250 {
4251 if (debug_infrun)
4252 fprintf_unfiltered (gdb_stdlog,
4253 "infrun: step-resume breakpoint is inserted\n");
4254
4255 /* Having a step-resume breakpoint overrides anything
4256 else having to do with stepping commands until
4257 that breakpoint is reached. */
4258 keep_going (ecs);
4259 return;
4260 }
4261
4262 if (ecs->event_thread->step_range_end == 0)
4263 {
4264 if (debug_infrun)
4265 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4266 /* Likewise if we aren't even stepping. */
4267 keep_going (ecs);
4268 return;
4269 }
4270
4271 /* Re-fetch current thread's frame in case the code above caused
4272 the frame cache to be re-initialized, making our FRAME variable
4273 a dangling pointer. */
4274 frame = get_current_frame ();
4275
4276 /* If stepping through a line, keep going if still within it.
4277
4278 Note that step_range_end is the address of the first instruction
4279 beyond the step range, and NOT the address of the last instruction
4280 within it!
4281
4282 Note also that during reverse execution, we may be stepping
4283 through a function epilogue and therefore must detect when
4284 the current-frame changes in the middle of a line. */
4285
4286 if (stop_pc >= ecs->event_thread->step_range_start
4287 && stop_pc < ecs->event_thread->step_range_end
4288 && (execution_direction != EXEC_REVERSE
4289 || frame_id_eq (get_frame_id (frame),
4290 ecs->event_thread->step_frame_id)))
4291 {
4292 if (debug_infrun)
4293 fprintf_unfiltered
4294 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4295 paddress (gdbarch, ecs->event_thread->step_range_start),
4296 paddress (gdbarch, ecs->event_thread->step_range_end));
4297
4298 /* When stepping backward, stop at beginning of line range
4299 (unless it's the function entry point, in which case
4300 keep going back to the call point). */
4301 if (stop_pc == ecs->event_thread->step_range_start
4302 && stop_pc != ecs->stop_func_start
4303 && execution_direction == EXEC_REVERSE)
4304 {
4305 ecs->event_thread->stop_step = 1;
4306 print_stop_reason (END_STEPPING_RANGE, 0);
4307 stop_stepping (ecs);
4308 }
4309 else
4310 keep_going (ecs);
4311
4312 return;
4313 }
4314
4315 /* We stepped out of the stepping range. */
4316
4317 /* If we are stepping at the source level and entered the runtime
4318 loader dynamic symbol resolution code...
4319
4320 EXEC_FORWARD: we keep on single stepping until we exit the run
4321 time loader code and reach the callee's address.
4322
4323 EXEC_REVERSE: we've already executed the callee (backward), and
4324 the runtime loader code is handled just like any other
4325 undebuggable function call. Now we need only keep stepping
4326 backward through the trampoline code, and that's handled further
4327 down, so there is nothing for us to do here. */
4328
4329 if (execution_direction != EXEC_REVERSE
4330 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4331 && in_solib_dynsym_resolve_code (stop_pc))
4332 {
4333 CORE_ADDR pc_after_resolver =
4334 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4335
4336 if (debug_infrun)
4337 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4338
4339 if (pc_after_resolver)
4340 {
4341 /* Set up a step-resume breakpoint at the address
4342 indicated by SKIP_SOLIB_RESOLVER. */
4343 struct symtab_and_line sr_sal;
4344 init_sal (&sr_sal);
4345 sr_sal.pc = pc_after_resolver;
4346 sr_sal.pspace = get_frame_program_space (frame);
4347
4348 insert_step_resume_breakpoint_at_sal (gdbarch,
4349 sr_sal, null_frame_id);
4350 }
4351
4352 keep_going (ecs);
4353 return;
4354 }
4355
4356 if (ecs->event_thread->step_range_end != 1
4357 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4358 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4359 && get_frame_type (frame) == SIGTRAMP_FRAME)
4360 {
4361 if (debug_infrun)
4362 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4363 /* The inferior, while doing a "step" or "next", has ended up in
4364 a signal trampoline (either by a signal being delivered or by
4365 the signal handler returning). Just single-step until the
4366 inferior leaves the trampoline (either by calling the handler
4367 or returning). */
4368 keep_going (ecs);
4369 return;
4370 }
4371
4372 /* Check for subroutine calls. The check for the current frame
4373 equalling the step ID is not necessary - the check of the
4374 previous frame's ID is sufficient - but it is a common case and
4375 cheaper than checking the previous frame's ID.
4376
4377 NOTE: frame_id_eq will never report two invalid frame IDs as
4378 being equal, so to get into this block, both the current and
4379 previous frame must have valid frame IDs. */
4380 /* The outer_frame_id check is a heuristic to detect stepping
4381 through startup code. If we step over an instruction which
4382 sets the stack pointer from an invalid value to a valid value,
4383 we may detect that as a subroutine call from the mythical
4384 "outermost" function. This could be fixed by marking
4385 outermost frames as !stack_p,code_p,special_p. Then the
4386 initial outermost frame, before sp was valid, would
4387 have code_addr == &_start. See the comment in frame_id_eq
4388 for more. */
4389 if (!frame_id_eq (get_stack_frame_id (frame),
4390 ecs->event_thread->step_stack_frame_id)
4391 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4392 ecs->event_thread->step_stack_frame_id)
4393 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4394 outer_frame_id)
4395 || step_start_function != find_pc_function (stop_pc))))
4396 {
4397 CORE_ADDR real_stop_pc;
4398
4399 if (debug_infrun)
4400 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4401
4402 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4403 || ((ecs->event_thread->step_range_end == 1)
4404 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4405 ecs->stop_func_start)))
4406 {
4407 /* I presume that step_over_calls is only 0 when we're
4408 supposed to be stepping at the assembly language level
4409 ("stepi"). Just stop. */
4410 /* Also, maybe we just did a "nexti" inside a prolog, so we
4411 thought it was a subroutine call but it was not. Stop as
4412 well. FENN */
4413 /* And this works the same backward as frontward. MVS */
4414 ecs->event_thread->stop_step = 1;
4415 print_stop_reason (END_STEPPING_RANGE, 0);
4416 stop_stepping (ecs);
4417 return;
4418 }
4419
4420 /* Reverse stepping through solib trampolines. */
4421
4422 if (execution_direction == EXEC_REVERSE
4423 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4424 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4425 || (ecs->stop_func_start == 0
4426 && in_solib_dynsym_resolve_code (stop_pc))))
4427 {
4428 /* Any solib trampoline code can be handled in reverse
4429 by simply continuing to single-step. We have already
4430 executed the solib function (backwards), and a few
4431 steps will take us back through the trampoline to the
4432 caller. */
4433 keep_going (ecs);
4434 return;
4435 }
4436
4437 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4438 {
4439 /* We're doing a "next".
4440
4441 Normal (forward) execution: set a breakpoint at the
4442 callee's return address (the address at which the caller
4443 will resume).
4444
4445 Reverse (backward) execution. set the step-resume
4446 breakpoint at the start of the function that we just
4447 stepped into (backwards), and continue to there. When we
4448 get there, we'll need to single-step back to the caller. */
4449
4450 if (execution_direction == EXEC_REVERSE)
4451 {
4452 struct symtab_and_line sr_sal;
4453
4454 /* Normal function call return (static or dynamic). */
4455 init_sal (&sr_sal);
4456 sr_sal.pc = ecs->stop_func_start;
4457 sr_sal.pspace = get_frame_program_space (frame);
4458 insert_step_resume_breakpoint_at_sal (gdbarch,
4459 sr_sal, null_frame_id);
4460 }
4461 else
4462 insert_step_resume_breakpoint_at_caller (frame);
4463
4464 keep_going (ecs);
4465 return;
4466 }
4467
4468 /* If we are in a function call trampoline (a stub between the
4469 calling routine and the real function), locate the real
4470 function. That's what tells us (a) whether we want to step
4471 into it at all, and (b) what prologue we want to run to the
4472 end of, if we do step into it. */
4473 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4474 if (real_stop_pc == 0)
4475 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4476 if (real_stop_pc != 0)
4477 ecs->stop_func_start = real_stop_pc;
4478
4479 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4480 {
4481 struct symtab_and_line sr_sal;
4482 init_sal (&sr_sal);
4483 sr_sal.pc = ecs->stop_func_start;
4484 sr_sal.pspace = get_frame_program_space (frame);
4485
4486 insert_step_resume_breakpoint_at_sal (gdbarch,
4487 sr_sal, null_frame_id);
4488 keep_going (ecs);
4489 return;
4490 }
4491
4492 /* If we have line number information for the function we are
4493 thinking of stepping into, step into it.
4494
4495 If there are several symtabs at that PC (e.g. with include
4496 files), just want to know whether *any* of them have line
4497 numbers. find_pc_line handles this. */
4498 {
4499 struct symtab_and_line tmp_sal;
4500
4501 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4502 tmp_sal.pspace = get_frame_program_space (frame);
4503 if (tmp_sal.line != 0)
4504 {
4505 if (execution_direction == EXEC_REVERSE)
4506 handle_step_into_function_backward (gdbarch, ecs);
4507 else
4508 handle_step_into_function (gdbarch, ecs);
4509 return;
4510 }
4511 }
4512
4513 /* If we have no line number and the step-stop-if-no-debug is
4514 set, we stop the step so that the user has a chance to switch
4515 in assembly mode. */
4516 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4517 && step_stop_if_no_debug)
4518 {
4519 ecs->event_thread->stop_step = 1;
4520 print_stop_reason (END_STEPPING_RANGE, 0);
4521 stop_stepping (ecs);
4522 return;
4523 }
4524
4525 if (execution_direction == EXEC_REVERSE)
4526 {
4527 /* Set a breakpoint at callee's start address.
4528 From there we can step once and be back in the caller. */
4529 struct symtab_and_line sr_sal;
4530 init_sal (&sr_sal);
4531 sr_sal.pc = ecs->stop_func_start;
4532 sr_sal.pspace = get_frame_program_space (frame);
4533 insert_step_resume_breakpoint_at_sal (gdbarch,
4534 sr_sal, null_frame_id);
4535 }
4536 else
4537 /* Set a breakpoint at callee's return address (the address
4538 at which the caller will resume). */
4539 insert_step_resume_breakpoint_at_caller (frame);
4540
4541 keep_going (ecs);
4542 return;
4543 }
4544
4545 /* Reverse stepping through solib trampolines. */
4546
4547 if (execution_direction == EXEC_REVERSE
4548 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4549 {
4550 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4551 || (ecs->stop_func_start == 0
4552 && in_solib_dynsym_resolve_code (stop_pc)))
4553 {
4554 /* Any solib trampoline code can be handled in reverse
4555 by simply continuing to single-step. We have already
4556 executed the solib function (backwards), and a few
4557 steps will take us back through the trampoline to the
4558 caller. */
4559 keep_going (ecs);
4560 return;
4561 }
4562 else if (in_solib_dynsym_resolve_code (stop_pc))
4563 {
4564 /* Stepped backward into the solib dynsym resolver.
4565 Set a breakpoint at its start and continue, then
4566 one more step will take us out. */
4567 struct symtab_and_line sr_sal;
4568 init_sal (&sr_sal);
4569 sr_sal.pc = ecs->stop_func_start;
4570 sr_sal.pspace = get_frame_program_space (frame);
4571 insert_step_resume_breakpoint_at_sal (gdbarch,
4572 sr_sal, null_frame_id);
4573 keep_going (ecs);
4574 return;
4575 }
4576 }
4577
4578 /* If we're in the return path from a shared library trampoline,
4579 we want to proceed through the trampoline when stepping. */
4580 if (gdbarch_in_solib_return_trampoline (gdbarch,
4581 stop_pc, ecs->stop_func_name))
4582 {
4583 /* Determine where this trampoline returns. */
4584 CORE_ADDR real_stop_pc;
4585 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4586
4587 if (debug_infrun)
4588 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4589
4590 /* Only proceed through if we know where it's going. */
4591 if (real_stop_pc)
4592 {
4593 /* And put the step-breakpoint there and go until there. */
4594 struct symtab_and_line sr_sal;
4595
4596 init_sal (&sr_sal); /* initialize to zeroes */
4597 sr_sal.pc = real_stop_pc;
4598 sr_sal.section = find_pc_overlay (sr_sal.pc);
4599 sr_sal.pspace = get_frame_program_space (frame);
4600
4601 /* Do not specify what the fp should be when we stop since
4602 on some machines the prologue is where the new fp value
4603 is established. */
4604 insert_step_resume_breakpoint_at_sal (gdbarch,
4605 sr_sal, null_frame_id);
4606
4607 /* Restart without fiddling with the step ranges or
4608 other state. */
4609 keep_going (ecs);
4610 return;
4611 }
4612 }
4613
4614 stop_pc_sal = find_pc_line (stop_pc, 0);
4615
4616 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4617 the trampoline processing logic, however, there are some trampolines
4618 that have no names, so we should do trampoline handling first. */
4619 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4620 && ecs->stop_func_name == NULL
4621 && stop_pc_sal.line == 0)
4622 {
4623 if (debug_infrun)
4624 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4625
4626 /* The inferior just stepped into, or returned to, an
4627 undebuggable function (where there is no debugging information
4628 and no line number corresponding to the address where the
4629 inferior stopped). Since we want to skip this kind of code,
4630 we keep going until the inferior returns from this
4631 function - unless the user has asked us not to (via
4632 set step-mode) or we no longer know how to get back
4633 to the call site. */
4634 if (step_stop_if_no_debug
4635 || !frame_id_p (frame_unwind_caller_id (frame)))
4636 {
4637 /* If we have no line number and the step-stop-if-no-debug
4638 is set, we stop the step so that the user has a chance to
4639 switch in assembly mode. */
4640 ecs->event_thread->stop_step = 1;
4641 print_stop_reason (END_STEPPING_RANGE, 0);
4642 stop_stepping (ecs);
4643 return;
4644 }
4645 else
4646 {
4647 /* Set a breakpoint at callee's return address (the address
4648 at which the caller will resume). */
4649 insert_step_resume_breakpoint_at_caller (frame);
4650 keep_going (ecs);
4651 return;
4652 }
4653 }
4654
4655 if (ecs->event_thread->step_range_end == 1)
4656 {
4657 /* It is stepi or nexti. We always want to stop stepping after
4658 one instruction. */
4659 if (debug_infrun)
4660 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4661 ecs->event_thread->stop_step = 1;
4662 print_stop_reason (END_STEPPING_RANGE, 0);
4663 stop_stepping (ecs);
4664 return;
4665 }
4666
4667 if (stop_pc_sal.line == 0)
4668 {
4669 /* We have no line number information. That means to stop
4670 stepping (does this always happen right after one instruction,
4671 when we do "s" in a function with no line numbers,
4672 or can this happen as a result of a return or longjmp?). */
4673 if (debug_infrun)
4674 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4675 ecs->event_thread->stop_step = 1;
4676 print_stop_reason (END_STEPPING_RANGE, 0);
4677 stop_stepping (ecs);
4678 return;
4679 }
4680
4681 /* Look for "calls" to inlined functions, part one. If the inline
4682 frame machinery detected some skipped call sites, we have entered
4683 a new inline function. */
4684
4685 if (frame_id_eq (get_frame_id (get_current_frame ()),
4686 ecs->event_thread->step_frame_id)
4687 && inline_skipped_frames (ecs->ptid))
4688 {
4689 struct symtab_and_line call_sal;
4690
4691 if (debug_infrun)
4692 fprintf_unfiltered (gdb_stdlog,
4693 "infrun: stepped into inlined function\n");
4694
4695 find_frame_sal (get_current_frame (), &call_sal);
4696
4697 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4698 {
4699 /* For "step", we're going to stop. But if the call site
4700 for this inlined function is on the same source line as
4701 we were previously stepping, go down into the function
4702 first. Otherwise stop at the call site. */
4703
4704 if (call_sal.line == ecs->event_thread->current_line
4705 && call_sal.symtab == ecs->event_thread->current_symtab)
4706 step_into_inline_frame (ecs->ptid);
4707
4708 ecs->event_thread->stop_step = 1;
4709 print_stop_reason (END_STEPPING_RANGE, 0);
4710 stop_stepping (ecs);
4711 return;
4712 }
4713 else
4714 {
4715 /* For "next", we should stop at the call site if it is on a
4716 different source line. Otherwise continue through the
4717 inlined function. */
4718 if (call_sal.line == ecs->event_thread->current_line
4719 && call_sal.symtab == ecs->event_thread->current_symtab)
4720 keep_going (ecs);
4721 else
4722 {
4723 ecs->event_thread->stop_step = 1;
4724 print_stop_reason (END_STEPPING_RANGE, 0);
4725 stop_stepping (ecs);
4726 }
4727 return;
4728 }
4729 }
4730
4731 /* Look for "calls" to inlined functions, part two. If we are still
4732 in the same real function we were stepping through, but we have
4733 to go further up to find the exact frame ID, we are stepping
4734 through a more inlined call beyond its call site. */
4735
4736 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4737 && !frame_id_eq (get_frame_id (get_current_frame ()),
4738 ecs->event_thread->step_frame_id)
4739 && stepped_in_from (get_current_frame (),
4740 ecs->event_thread->step_frame_id))
4741 {
4742 if (debug_infrun)
4743 fprintf_unfiltered (gdb_stdlog,
4744 "infrun: stepping through inlined function\n");
4745
4746 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4747 keep_going (ecs);
4748 else
4749 {
4750 ecs->event_thread->stop_step = 1;
4751 print_stop_reason (END_STEPPING_RANGE, 0);
4752 stop_stepping (ecs);
4753 }
4754 return;
4755 }
4756
4757 if ((stop_pc == stop_pc_sal.pc)
4758 && (ecs->event_thread->current_line != stop_pc_sal.line
4759 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4760 {
4761 /* We are at the start of a different line. So stop. Note that
4762 we don't stop if we step into the middle of a different line.
4763 That is said to make things like for (;;) statements work
4764 better. */
4765 if (debug_infrun)
4766 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4767 ecs->event_thread->stop_step = 1;
4768 print_stop_reason (END_STEPPING_RANGE, 0);
4769 stop_stepping (ecs);
4770 return;
4771 }
4772
4773 /* We aren't done stepping.
4774
4775 Optimize by setting the stepping range to the line.
4776 (We might not be in the original line, but if we entered a
4777 new line in mid-statement, we continue stepping. This makes
4778 things like for(;;) statements work better.) */
4779
4780 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4781 ecs->event_thread->step_range_end = stop_pc_sal.end;
4782 set_step_info (frame, stop_pc_sal);
4783
4784 if (debug_infrun)
4785 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4786 keep_going (ecs);
4787 }
4788
4789 /* Is thread TP in the middle of single-stepping? */
4790
4791 static int
4792 currently_stepping (struct thread_info *tp)
4793 {
4794 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4795 || tp->trap_expected
4796 || tp->stepping_through_solib_after_catch
4797 || bpstat_should_step ());
4798 }
4799
4800 /* Returns true if any thread *but* the one passed in "data" is in the
4801 middle of stepping or of handling a "next". */
4802
4803 static int
4804 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4805 {
4806 if (tp == data)
4807 return 0;
4808
4809 return (tp->step_range_end
4810 || tp->trap_expected
4811 || tp->stepping_through_solib_after_catch);
4812 }
4813
4814 /* Inferior has stepped into a subroutine call with source code that
4815 we should not step over. Do step to the first line of code in
4816 it. */
4817
4818 static void
4819 handle_step_into_function (struct gdbarch *gdbarch,
4820 struct execution_control_state *ecs)
4821 {
4822 struct symtab *s;
4823 struct symtab_and_line stop_func_sal, sr_sal;
4824
4825 s = find_pc_symtab (stop_pc);
4826 if (s && s->language != language_asm)
4827 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4828 ecs->stop_func_start);
4829
4830 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4831 /* Use the step_resume_break to step until the end of the prologue,
4832 even if that involves jumps (as it seems to on the vax under
4833 4.2). */
4834 /* If the prologue ends in the middle of a source line, continue to
4835 the end of that source line (if it is still within the function).
4836 Otherwise, just go to end of prologue. */
4837 if (stop_func_sal.end
4838 && stop_func_sal.pc != ecs->stop_func_start
4839 && stop_func_sal.end < ecs->stop_func_end)
4840 ecs->stop_func_start = stop_func_sal.end;
4841
4842 /* Architectures which require breakpoint adjustment might not be able
4843 to place a breakpoint at the computed address. If so, the test
4844 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4845 ecs->stop_func_start to an address at which a breakpoint may be
4846 legitimately placed.
4847
4848 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4849 made, GDB will enter an infinite loop when stepping through
4850 optimized code consisting of VLIW instructions which contain
4851 subinstructions corresponding to different source lines. On
4852 FR-V, it's not permitted to place a breakpoint on any but the
4853 first subinstruction of a VLIW instruction. When a breakpoint is
4854 set, GDB will adjust the breakpoint address to the beginning of
4855 the VLIW instruction. Thus, we need to make the corresponding
4856 adjustment here when computing the stop address. */
4857
4858 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4859 {
4860 ecs->stop_func_start
4861 = gdbarch_adjust_breakpoint_address (gdbarch,
4862 ecs->stop_func_start);
4863 }
4864
4865 if (ecs->stop_func_start == stop_pc)
4866 {
4867 /* We are already there: stop now. */
4868 ecs->event_thread->stop_step = 1;
4869 print_stop_reason (END_STEPPING_RANGE, 0);
4870 stop_stepping (ecs);
4871 return;
4872 }
4873 else
4874 {
4875 /* Put the step-breakpoint there and go until there. */
4876 init_sal (&sr_sal); /* initialize to zeroes */
4877 sr_sal.pc = ecs->stop_func_start;
4878 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4879 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4880
4881 /* Do not specify what the fp should be when we stop since on
4882 some machines the prologue is where the new fp value is
4883 established. */
4884 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4885
4886 /* And make sure stepping stops right away then. */
4887 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4888 }
4889 keep_going (ecs);
4890 }
4891
4892 /* Inferior has stepped backward into a subroutine call with source
4893 code that we should not step over. Do step to the beginning of the
4894 last line of code in it. */
4895
4896 static void
4897 handle_step_into_function_backward (struct gdbarch *gdbarch,
4898 struct execution_control_state *ecs)
4899 {
4900 struct symtab *s;
4901 struct symtab_and_line stop_func_sal, sr_sal;
4902
4903 s = find_pc_symtab (stop_pc);
4904 if (s && s->language != language_asm)
4905 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4906 ecs->stop_func_start);
4907
4908 stop_func_sal = find_pc_line (stop_pc, 0);
4909
4910 /* OK, we're just going to keep stepping here. */
4911 if (stop_func_sal.pc == stop_pc)
4912 {
4913 /* We're there already. Just stop stepping now. */
4914 ecs->event_thread->stop_step = 1;
4915 print_stop_reason (END_STEPPING_RANGE, 0);
4916 stop_stepping (ecs);
4917 }
4918 else
4919 {
4920 /* Else just reset the step range and keep going.
4921 No step-resume breakpoint, they don't work for
4922 epilogues, which can have multiple entry paths. */
4923 ecs->event_thread->step_range_start = stop_func_sal.pc;
4924 ecs->event_thread->step_range_end = stop_func_sal.end;
4925 keep_going (ecs);
4926 }
4927 return;
4928 }
4929
4930 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4931 This is used to both functions and to skip over code. */
4932
4933 static void
4934 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4935 struct symtab_and_line sr_sal,
4936 struct frame_id sr_id)
4937 {
4938 /* There should never be more than one step-resume or longjmp-resume
4939 breakpoint per thread, so we should never be setting a new
4940 step_resume_breakpoint when one is already active. */
4941 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4942
4943 if (debug_infrun)
4944 fprintf_unfiltered (gdb_stdlog,
4945 "infrun: inserting step-resume breakpoint at %s\n",
4946 paddress (gdbarch, sr_sal.pc));
4947
4948 inferior_thread ()->step_resume_breakpoint
4949 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4950 }
4951
4952 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4953 to skip a potential signal handler.
4954
4955 This is called with the interrupted function's frame. The signal
4956 handler, when it returns, will resume the interrupted function at
4957 RETURN_FRAME.pc. */
4958
4959 static void
4960 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4961 {
4962 struct symtab_and_line sr_sal;
4963 struct gdbarch *gdbarch;
4964
4965 gdb_assert (return_frame != NULL);
4966 init_sal (&sr_sal); /* initialize to zeros */
4967
4968 gdbarch = get_frame_arch (return_frame);
4969 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4970 sr_sal.section = find_pc_overlay (sr_sal.pc);
4971 sr_sal.pspace = get_frame_program_space (return_frame);
4972
4973 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4974 get_stack_frame_id (return_frame));
4975 }
4976
4977 /* Similar to insert_step_resume_breakpoint_at_frame, except
4978 but a breakpoint at the previous frame's PC. This is used to
4979 skip a function after stepping into it (for "next" or if the called
4980 function has no debugging information).
4981
4982 The current function has almost always been reached by single
4983 stepping a call or return instruction. NEXT_FRAME belongs to the
4984 current function, and the breakpoint will be set at the caller's
4985 resume address.
4986
4987 This is a separate function rather than reusing
4988 insert_step_resume_breakpoint_at_frame in order to avoid
4989 get_prev_frame, which may stop prematurely (see the implementation
4990 of frame_unwind_caller_id for an example). */
4991
4992 static void
4993 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4994 {
4995 struct symtab_and_line sr_sal;
4996 struct gdbarch *gdbarch;
4997
4998 /* We shouldn't have gotten here if we don't know where the call site
4999 is. */
5000 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5001
5002 init_sal (&sr_sal); /* initialize to zeros */
5003
5004 gdbarch = frame_unwind_caller_arch (next_frame);
5005 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5006 frame_unwind_caller_pc (next_frame));
5007 sr_sal.section = find_pc_overlay (sr_sal.pc);
5008 sr_sal.pspace = frame_unwind_program_space (next_frame);
5009
5010 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5011 frame_unwind_caller_id (next_frame));
5012 }
5013
5014 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5015 new breakpoint at the target of a jmp_buf. The handling of
5016 longjmp-resume uses the same mechanisms used for handling
5017 "step-resume" breakpoints. */
5018
5019 static void
5020 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5021 {
5022 /* There should never be more than one step-resume or longjmp-resume
5023 breakpoint per thread, so we should never be setting a new
5024 longjmp_resume_breakpoint when one is already active. */
5025 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5026
5027 if (debug_infrun)
5028 fprintf_unfiltered (gdb_stdlog,
5029 "infrun: inserting longjmp-resume breakpoint at %s\n",
5030 paddress (gdbarch, pc));
5031
5032 inferior_thread ()->step_resume_breakpoint =
5033 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5034 }
5035
5036 static void
5037 stop_stepping (struct execution_control_state *ecs)
5038 {
5039 if (debug_infrun)
5040 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5041
5042 /* Let callers know we don't want to wait for the inferior anymore. */
5043 ecs->wait_some_more = 0;
5044 }
5045
5046 /* This function handles various cases where we need to continue
5047 waiting for the inferior. */
5048 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5049
5050 static void
5051 keep_going (struct execution_control_state *ecs)
5052 {
5053 /* Make sure normal_stop is called if we get a QUIT handled before
5054 reaching resume. */
5055 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5056
5057 /* Save the pc before execution, to compare with pc after stop. */
5058 ecs->event_thread->prev_pc
5059 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5060
5061 /* If we did not do break;, it means we should keep running the
5062 inferior and not return to debugger. */
5063
5064 if (ecs->event_thread->trap_expected
5065 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5066 {
5067 /* We took a signal (which we are supposed to pass through to
5068 the inferior, else we'd not get here) and we haven't yet
5069 gotten our trap. Simply continue. */
5070
5071 discard_cleanups (old_cleanups);
5072 resume (currently_stepping (ecs->event_thread),
5073 ecs->event_thread->stop_signal);
5074 }
5075 else
5076 {
5077 /* Either the trap was not expected, but we are continuing
5078 anyway (the user asked that this signal be passed to the
5079 child)
5080 -- or --
5081 The signal was SIGTRAP, e.g. it was our signal, but we
5082 decided we should resume from it.
5083
5084 We're going to run this baby now!
5085
5086 Note that insert_breakpoints won't try to re-insert
5087 already inserted breakpoints. Therefore, we don't
5088 care if breakpoints were already inserted, or not. */
5089
5090 if (ecs->event_thread->stepping_over_breakpoint)
5091 {
5092 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5093 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5094 /* Since we can't do a displaced step, we have to remove
5095 the breakpoint while we step it. To keep things
5096 simple, we remove them all. */
5097 remove_breakpoints ();
5098 }
5099 else
5100 {
5101 struct gdb_exception e;
5102 /* Stop stepping when inserting breakpoints
5103 has failed. */
5104 TRY_CATCH (e, RETURN_MASK_ERROR)
5105 {
5106 insert_breakpoints ();
5107 }
5108 if (e.reason < 0)
5109 {
5110 exception_print (gdb_stderr, e);
5111 stop_stepping (ecs);
5112 return;
5113 }
5114 }
5115
5116 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5117
5118 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5119 specifies that such a signal should be delivered to the
5120 target program).
5121
5122 Typically, this would occure when a user is debugging a
5123 target monitor on a simulator: the target monitor sets a
5124 breakpoint; the simulator encounters this break-point and
5125 halts the simulation handing control to GDB; GDB, noteing
5126 that the break-point isn't valid, returns control back to the
5127 simulator; the simulator then delivers the hardware
5128 equivalent of a SIGNAL_TRAP to the program being debugged. */
5129
5130 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5131 && !signal_program[ecs->event_thread->stop_signal])
5132 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5133
5134 discard_cleanups (old_cleanups);
5135 resume (currently_stepping (ecs->event_thread),
5136 ecs->event_thread->stop_signal);
5137 }
5138
5139 prepare_to_wait (ecs);
5140 }
5141
5142 /* This function normally comes after a resume, before
5143 handle_inferior_event exits. It takes care of any last bits of
5144 housekeeping, and sets the all-important wait_some_more flag. */
5145
5146 static void
5147 prepare_to_wait (struct execution_control_state *ecs)
5148 {
5149 if (debug_infrun)
5150 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5151
5152 /* This is the old end of the while loop. Let everybody know we
5153 want to wait for the inferior some more and get called again
5154 soon. */
5155 ecs->wait_some_more = 1;
5156 }
5157
5158 /* Print why the inferior has stopped. We always print something when
5159 the inferior exits, or receives a signal. The rest of the cases are
5160 dealt with later on in normal_stop() and print_it_typical(). Ideally
5161 there should be a call to this function from handle_inferior_event()
5162 each time stop_stepping() is called.*/
5163 static void
5164 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5165 {
5166 switch (stop_reason)
5167 {
5168 case END_STEPPING_RANGE:
5169 /* We are done with a step/next/si/ni command. */
5170 /* For now print nothing. */
5171 /* Print a message only if not in the middle of doing a "step n"
5172 operation for n > 1 */
5173 if (!inferior_thread ()->step_multi
5174 || !inferior_thread ()->stop_step)
5175 if (ui_out_is_mi_like_p (uiout))
5176 ui_out_field_string
5177 (uiout, "reason",
5178 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5179 break;
5180 case SIGNAL_EXITED:
5181 /* The inferior was terminated by a signal. */
5182 annotate_signalled ();
5183 if (ui_out_is_mi_like_p (uiout))
5184 ui_out_field_string
5185 (uiout, "reason",
5186 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5187 ui_out_text (uiout, "\nProgram terminated with signal ");
5188 annotate_signal_name ();
5189 ui_out_field_string (uiout, "signal-name",
5190 target_signal_to_name (stop_info));
5191 annotate_signal_name_end ();
5192 ui_out_text (uiout, ", ");
5193 annotate_signal_string ();
5194 ui_out_field_string (uiout, "signal-meaning",
5195 target_signal_to_string (stop_info));
5196 annotate_signal_string_end ();
5197 ui_out_text (uiout, ".\n");
5198 ui_out_text (uiout, "The program no longer exists.\n");
5199 break;
5200 case EXITED:
5201 /* The inferior program is finished. */
5202 annotate_exited (stop_info);
5203 if (stop_info)
5204 {
5205 if (ui_out_is_mi_like_p (uiout))
5206 ui_out_field_string (uiout, "reason",
5207 async_reason_lookup (EXEC_ASYNC_EXITED));
5208 ui_out_text (uiout, "\nProgram exited with code ");
5209 ui_out_field_fmt (uiout, "exit-code", "0%o",
5210 (unsigned int) stop_info);
5211 ui_out_text (uiout, ".\n");
5212 }
5213 else
5214 {
5215 if (ui_out_is_mi_like_p (uiout))
5216 ui_out_field_string
5217 (uiout, "reason",
5218 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5219 ui_out_text (uiout, "\nProgram exited normally.\n");
5220 }
5221 /* Support the --return-child-result option. */
5222 return_child_result_value = stop_info;
5223 break;
5224 case SIGNAL_RECEIVED:
5225 /* Signal received. The signal table tells us to print about
5226 it. */
5227 annotate_signal ();
5228
5229 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5230 {
5231 struct thread_info *t = inferior_thread ();
5232
5233 ui_out_text (uiout, "\n[");
5234 ui_out_field_string (uiout, "thread-name",
5235 target_pid_to_str (t->ptid));
5236 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5237 ui_out_text (uiout, " stopped");
5238 }
5239 else
5240 {
5241 ui_out_text (uiout, "\nProgram received signal ");
5242 annotate_signal_name ();
5243 if (ui_out_is_mi_like_p (uiout))
5244 ui_out_field_string
5245 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5246 ui_out_field_string (uiout, "signal-name",
5247 target_signal_to_name (stop_info));
5248 annotate_signal_name_end ();
5249 ui_out_text (uiout, ", ");
5250 annotate_signal_string ();
5251 ui_out_field_string (uiout, "signal-meaning",
5252 target_signal_to_string (stop_info));
5253 annotate_signal_string_end ();
5254 }
5255 ui_out_text (uiout, ".\n");
5256 break;
5257 case NO_HISTORY:
5258 /* Reverse execution: target ran out of history info. */
5259 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5260 break;
5261 default:
5262 internal_error (__FILE__, __LINE__,
5263 _("print_stop_reason: unrecognized enum value"));
5264 break;
5265 }
5266 }
5267 \f
5268
5269 /* Here to return control to GDB when the inferior stops for real.
5270 Print appropriate messages, remove breakpoints, give terminal our modes.
5271
5272 STOP_PRINT_FRAME nonzero means print the executing frame
5273 (pc, function, args, file, line number and line text).
5274 BREAKPOINTS_FAILED nonzero means stop was due to error
5275 attempting to insert breakpoints. */
5276
5277 void
5278 normal_stop (void)
5279 {
5280 struct target_waitstatus last;
5281 ptid_t last_ptid;
5282 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5283
5284 get_last_target_status (&last_ptid, &last);
5285
5286 /* If an exception is thrown from this point on, make sure to
5287 propagate GDB's knowledge of the executing state to the
5288 frontend/user running state. A QUIT is an easy exception to see
5289 here, so do this before any filtered output. */
5290 if (!non_stop)
5291 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5292 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5293 && last.kind != TARGET_WAITKIND_EXITED)
5294 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5295
5296 /* In non-stop mode, we don't want GDB to switch threads behind the
5297 user's back, to avoid races where the user is typing a command to
5298 apply to thread x, but GDB switches to thread y before the user
5299 finishes entering the command. */
5300
5301 /* As with the notification of thread events, we want to delay
5302 notifying the user that we've switched thread context until
5303 the inferior actually stops.
5304
5305 There's no point in saying anything if the inferior has exited.
5306 Note that SIGNALLED here means "exited with a signal", not
5307 "received a signal". */
5308 if (!non_stop
5309 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5310 && target_has_execution
5311 && last.kind != TARGET_WAITKIND_SIGNALLED
5312 && last.kind != TARGET_WAITKIND_EXITED)
5313 {
5314 target_terminal_ours_for_output ();
5315 printf_filtered (_("[Switching to %s]\n"),
5316 target_pid_to_str (inferior_ptid));
5317 annotate_thread_changed ();
5318 previous_inferior_ptid = inferior_ptid;
5319 }
5320
5321 if (!breakpoints_always_inserted_mode () && target_has_execution)
5322 {
5323 if (remove_breakpoints ())
5324 {
5325 target_terminal_ours_for_output ();
5326 printf_filtered (_("\
5327 Cannot remove breakpoints because program is no longer writable.\n\
5328 Further execution is probably impossible.\n"));
5329 }
5330 }
5331
5332 /* If an auto-display called a function and that got a signal,
5333 delete that auto-display to avoid an infinite recursion. */
5334
5335 if (stopped_by_random_signal)
5336 disable_current_display ();
5337
5338 /* Don't print a message if in the middle of doing a "step n"
5339 operation for n > 1 */
5340 if (target_has_execution
5341 && last.kind != TARGET_WAITKIND_SIGNALLED
5342 && last.kind != TARGET_WAITKIND_EXITED
5343 && inferior_thread ()->step_multi
5344 && inferior_thread ()->stop_step)
5345 goto done;
5346
5347 target_terminal_ours ();
5348
5349 /* Set the current source location. This will also happen if we
5350 display the frame below, but the current SAL will be incorrect
5351 during a user hook-stop function. */
5352 if (has_stack_frames () && !stop_stack_dummy)
5353 set_current_sal_from_frame (get_current_frame (), 1);
5354
5355 /* Let the user/frontend see the threads as stopped. */
5356 do_cleanups (old_chain);
5357
5358 /* Look up the hook_stop and run it (CLI internally handles problem
5359 of stop_command's pre-hook not existing). */
5360 if (stop_command)
5361 catch_errors (hook_stop_stub, stop_command,
5362 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5363
5364 if (!has_stack_frames ())
5365 goto done;
5366
5367 if (last.kind == TARGET_WAITKIND_SIGNALLED
5368 || last.kind == TARGET_WAITKIND_EXITED)
5369 goto done;
5370
5371 /* Select innermost stack frame - i.e., current frame is frame 0,
5372 and current location is based on that.
5373 Don't do this on return from a stack dummy routine,
5374 or if the program has exited. */
5375
5376 if (!stop_stack_dummy)
5377 {
5378 select_frame (get_current_frame ());
5379
5380 /* Print current location without a level number, if
5381 we have changed functions or hit a breakpoint.
5382 Print source line if we have one.
5383 bpstat_print() contains the logic deciding in detail
5384 what to print, based on the event(s) that just occurred. */
5385
5386 /* If --batch-silent is enabled then there's no need to print the current
5387 source location, and to try risks causing an error message about
5388 missing source files. */
5389 if (stop_print_frame && !batch_silent)
5390 {
5391 int bpstat_ret;
5392 int source_flag;
5393 int do_frame_printing = 1;
5394 struct thread_info *tp = inferior_thread ();
5395
5396 bpstat_ret = bpstat_print (tp->stop_bpstat);
5397 switch (bpstat_ret)
5398 {
5399 case PRINT_UNKNOWN:
5400 /* If we had hit a shared library event breakpoint,
5401 bpstat_print would print out this message. If we hit
5402 an OS-level shared library event, do the same
5403 thing. */
5404 if (last.kind == TARGET_WAITKIND_LOADED)
5405 {
5406 printf_filtered (_("Stopped due to shared library event\n"));
5407 source_flag = SRC_LINE; /* something bogus */
5408 do_frame_printing = 0;
5409 break;
5410 }
5411
5412 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5413 (or should) carry around the function and does (or
5414 should) use that when doing a frame comparison. */
5415 if (tp->stop_step
5416 && frame_id_eq (tp->step_frame_id,
5417 get_frame_id (get_current_frame ()))
5418 && step_start_function == find_pc_function (stop_pc))
5419 source_flag = SRC_LINE; /* finished step, just print source line */
5420 else
5421 source_flag = SRC_AND_LOC; /* print location and source line */
5422 break;
5423 case PRINT_SRC_AND_LOC:
5424 source_flag = SRC_AND_LOC; /* print location and source line */
5425 break;
5426 case PRINT_SRC_ONLY:
5427 source_flag = SRC_LINE;
5428 break;
5429 case PRINT_NOTHING:
5430 source_flag = SRC_LINE; /* something bogus */
5431 do_frame_printing = 0;
5432 break;
5433 default:
5434 internal_error (__FILE__, __LINE__, _("Unknown value."));
5435 }
5436
5437 /* The behavior of this routine with respect to the source
5438 flag is:
5439 SRC_LINE: Print only source line
5440 LOCATION: Print only location
5441 SRC_AND_LOC: Print location and source line */
5442 if (do_frame_printing)
5443 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5444
5445 /* Display the auto-display expressions. */
5446 do_displays ();
5447 }
5448 }
5449
5450 /* Save the function value return registers, if we care.
5451 We might be about to restore their previous contents. */
5452 if (inferior_thread ()->proceed_to_finish)
5453 {
5454 /* This should not be necessary. */
5455 if (stop_registers)
5456 regcache_xfree (stop_registers);
5457
5458 /* NB: The copy goes through to the target picking up the value of
5459 all the registers. */
5460 stop_registers = regcache_dup (get_current_regcache ());
5461 }
5462
5463 if (stop_stack_dummy)
5464 {
5465 /* Pop the empty frame that contains the stack dummy.
5466 This also restores inferior state prior to the call
5467 (struct inferior_thread_state). */
5468 struct frame_info *frame = get_current_frame ();
5469 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5470 frame_pop (frame);
5471 /* frame_pop() calls reinit_frame_cache as the last thing it does
5472 which means there's currently no selected frame. We don't need
5473 to re-establish a selected frame if the dummy call returns normally,
5474 that will be done by restore_inferior_status. However, we do have
5475 to handle the case where the dummy call is returning after being
5476 stopped (e.g. the dummy call previously hit a breakpoint). We
5477 can't know which case we have so just always re-establish a
5478 selected frame here. */
5479 select_frame (get_current_frame ());
5480 }
5481
5482 done:
5483 annotate_stopped ();
5484
5485 /* Suppress the stop observer if we're in the middle of:
5486
5487 - a step n (n > 1), as there still more steps to be done.
5488
5489 - a "finish" command, as the observer will be called in
5490 finish_command_continuation, so it can include the inferior
5491 function's return value.
5492
5493 - calling an inferior function, as we pretend we inferior didn't
5494 run at all. The return value of the call is handled by the
5495 expression evaluator, through call_function_by_hand. */
5496
5497 if (!target_has_execution
5498 || last.kind == TARGET_WAITKIND_SIGNALLED
5499 || last.kind == TARGET_WAITKIND_EXITED
5500 || (!inferior_thread ()->step_multi
5501 && !(inferior_thread ()->stop_bpstat
5502 && inferior_thread ()->proceed_to_finish)
5503 && !inferior_thread ()->in_infcall))
5504 {
5505 if (!ptid_equal (inferior_ptid, null_ptid))
5506 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5507 stop_print_frame);
5508 else
5509 observer_notify_normal_stop (NULL, stop_print_frame);
5510 }
5511
5512 if (target_has_execution)
5513 {
5514 if (last.kind != TARGET_WAITKIND_SIGNALLED
5515 && last.kind != TARGET_WAITKIND_EXITED)
5516 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5517 Delete any breakpoint that is to be deleted at the next stop. */
5518 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5519 }
5520
5521 /* Try to get rid of automatically added inferiors that are no
5522 longer needed. Keeping those around slows down things linearly.
5523 Note that this never removes the current inferior. */
5524 prune_inferiors ();
5525 }
5526
5527 static int
5528 hook_stop_stub (void *cmd)
5529 {
5530 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5531 return (0);
5532 }
5533 \f
5534 int
5535 signal_stop_state (int signo)
5536 {
5537 return signal_stop[signo];
5538 }
5539
5540 int
5541 signal_print_state (int signo)
5542 {
5543 return signal_print[signo];
5544 }
5545
5546 int
5547 signal_pass_state (int signo)
5548 {
5549 return signal_program[signo];
5550 }
5551
5552 int
5553 signal_stop_update (int signo, int state)
5554 {
5555 int ret = signal_stop[signo];
5556 signal_stop[signo] = state;
5557 return ret;
5558 }
5559
5560 int
5561 signal_print_update (int signo, int state)
5562 {
5563 int ret = signal_print[signo];
5564 signal_print[signo] = state;
5565 return ret;
5566 }
5567
5568 int
5569 signal_pass_update (int signo, int state)
5570 {
5571 int ret = signal_program[signo];
5572 signal_program[signo] = state;
5573 return ret;
5574 }
5575
5576 static void
5577 sig_print_header (void)
5578 {
5579 printf_filtered (_("\
5580 Signal Stop\tPrint\tPass to program\tDescription\n"));
5581 }
5582
5583 static void
5584 sig_print_info (enum target_signal oursig)
5585 {
5586 const char *name = target_signal_to_name (oursig);
5587 int name_padding = 13 - strlen (name);
5588
5589 if (name_padding <= 0)
5590 name_padding = 0;
5591
5592 printf_filtered ("%s", name);
5593 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5594 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5595 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5596 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5597 printf_filtered ("%s\n", target_signal_to_string (oursig));
5598 }
5599
5600 /* Specify how various signals in the inferior should be handled. */
5601
5602 static void
5603 handle_command (char *args, int from_tty)
5604 {
5605 char **argv;
5606 int digits, wordlen;
5607 int sigfirst, signum, siglast;
5608 enum target_signal oursig;
5609 int allsigs;
5610 int nsigs;
5611 unsigned char *sigs;
5612 struct cleanup *old_chain;
5613
5614 if (args == NULL)
5615 {
5616 error_no_arg (_("signal to handle"));
5617 }
5618
5619 /* Allocate and zero an array of flags for which signals to handle. */
5620
5621 nsigs = (int) TARGET_SIGNAL_LAST;
5622 sigs = (unsigned char *) alloca (nsigs);
5623 memset (sigs, 0, nsigs);
5624
5625 /* Break the command line up into args. */
5626
5627 argv = gdb_buildargv (args);
5628 old_chain = make_cleanup_freeargv (argv);
5629
5630 /* Walk through the args, looking for signal oursigs, signal names, and
5631 actions. Signal numbers and signal names may be interspersed with
5632 actions, with the actions being performed for all signals cumulatively
5633 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5634
5635 while (*argv != NULL)
5636 {
5637 wordlen = strlen (*argv);
5638 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5639 {;
5640 }
5641 allsigs = 0;
5642 sigfirst = siglast = -1;
5643
5644 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5645 {
5646 /* Apply action to all signals except those used by the
5647 debugger. Silently skip those. */
5648 allsigs = 1;
5649 sigfirst = 0;
5650 siglast = nsigs - 1;
5651 }
5652 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5653 {
5654 SET_SIGS (nsigs, sigs, signal_stop);
5655 SET_SIGS (nsigs, sigs, signal_print);
5656 }
5657 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5658 {
5659 UNSET_SIGS (nsigs, sigs, signal_program);
5660 }
5661 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5662 {
5663 SET_SIGS (nsigs, sigs, signal_print);
5664 }
5665 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5666 {
5667 SET_SIGS (nsigs, sigs, signal_program);
5668 }
5669 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5670 {
5671 UNSET_SIGS (nsigs, sigs, signal_stop);
5672 }
5673 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5674 {
5675 SET_SIGS (nsigs, sigs, signal_program);
5676 }
5677 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5678 {
5679 UNSET_SIGS (nsigs, sigs, signal_print);
5680 UNSET_SIGS (nsigs, sigs, signal_stop);
5681 }
5682 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5683 {
5684 UNSET_SIGS (nsigs, sigs, signal_program);
5685 }
5686 else if (digits > 0)
5687 {
5688 /* It is numeric. The numeric signal refers to our own
5689 internal signal numbering from target.h, not to host/target
5690 signal number. This is a feature; users really should be
5691 using symbolic names anyway, and the common ones like
5692 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5693
5694 sigfirst = siglast = (int)
5695 target_signal_from_command (atoi (*argv));
5696 if ((*argv)[digits] == '-')
5697 {
5698 siglast = (int)
5699 target_signal_from_command (atoi ((*argv) + digits + 1));
5700 }
5701 if (sigfirst > siglast)
5702 {
5703 /* Bet he didn't figure we'd think of this case... */
5704 signum = sigfirst;
5705 sigfirst = siglast;
5706 siglast = signum;
5707 }
5708 }
5709 else
5710 {
5711 oursig = target_signal_from_name (*argv);
5712 if (oursig != TARGET_SIGNAL_UNKNOWN)
5713 {
5714 sigfirst = siglast = (int) oursig;
5715 }
5716 else
5717 {
5718 /* Not a number and not a recognized flag word => complain. */
5719 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5720 }
5721 }
5722
5723 /* If any signal numbers or symbol names were found, set flags for
5724 which signals to apply actions to. */
5725
5726 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5727 {
5728 switch ((enum target_signal) signum)
5729 {
5730 case TARGET_SIGNAL_TRAP:
5731 case TARGET_SIGNAL_INT:
5732 if (!allsigs && !sigs[signum])
5733 {
5734 if (query (_("%s is used by the debugger.\n\
5735 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5736 {
5737 sigs[signum] = 1;
5738 }
5739 else
5740 {
5741 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5742 gdb_flush (gdb_stdout);
5743 }
5744 }
5745 break;
5746 case TARGET_SIGNAL_0:
5747 case TARGET_SIGNAL_DEFAULT:
5748 case TARGET_SIGNAL_UNKNOWN:
5749 /* Make sure that "all" doesn't print these. */
5750 break;
5751 default:
5752 sigs[signum] = 1;
5753 break;
5754 }
5755 }
5756
5757 argv++;
5758 }
5759
5760 for (signum = 0; signum < nsigs; signum++)
5761 if (sigs[signum])
5762 {
5763 target_notice_signals (inferior_ptid);
5764
5765 if (from_tty)
5766 {
5767 /* Show the results. */
5768 sig_print_header ();
5769 for (; signum < nsigs; signum++)
5770 if (sigs[signum])
5771 sig_print_info (signum);
5772 }
5773
5774 break;
5775 }
5776
5777 do_cleanups (old_chain);
5778 }
5779
5780 static void
5781 xdb_handle_command (char *args, int from_tty)
5782 {
5783 char **argv;
5784 struct cleanup *old_chain;
5785
5786 if (args == NULL)
5787 error_no_arg (_("xdb command"));
5788
5789 /* Break the command line up into args. */
5790
5791 argv = gdb_buildargv (args);
5792 old_chain = make_cleanup_freeargv (argv);
5793 if (argv[1] != (char *) NULL)
5794 {
5795 char *argBuf;
5796 int bufLen;
5797
5798 bufLen = strlen (argv[0]) + 20;
5799 argBuf = (char *) xmalloc (bufLen);
5800 if (argBuf)
5801 {
5802 int validFlag = 1;
5803 enum target_signal oursig;
5804
5805 oursig = target_signal_from_name (argv[0]);
5806 memset (argBuf, 0, bufLen);
5807 if (strcmp (argv[1], "Q") == 0)
5808 sprintf (argBuf, "%s %s", argv[0], "noprint");
5809 else
5810 {
5811 if (strcmp (argv[1], "s") == 0)
5812 {
5813 if (!signal_stop[oursig])
5814 sprintf (argBuf, "%s %s", argv[0], "stop");
5815 else
5816 sprintf (argBuf, "%s %s", argv[0], "nostop");
5817 }
5818 else if (strcmp (argv[1], "i") == 0)
5819 {
5820 if (!signal_program[oursig])
5821 sprintf (argBuf, "%s %s", argv[0], "pass");
5822 else
5823 sprintf (argBuf, "%s %s", argv[0], "nopass");
5824 }
5825 else if (strcmp (argv[1], "r") == 0)
5826 {
5827 if (!signal_print[oursig])
5828 sprintf (argBuf, "%s %s", argv[0], "print");
5829 else
5830 sprintf (argBuf, "%s %s", argv[0], "noprint");
5831 }
5832 else
5833 validFlag = 0;
5834 }
5835 if (validFlag)
5836 handle_command (argBuf, from_tty);
5837 else
5838 printf_filtered (_("Invalid signal handling flag.\n"));
5839 if (argBuf)
5840 xfree (argBuf);
5841 }
5842 }
5843 do_cleanups (old_chain);
5844 }
5845
5846 /* Print current contents of the tables set by the handle command.
5847 It is possible we should just be printing signals actually used
5848 by the current target (but for things to work right when switching
5849 targets, all signals should be in the signal tables). */
5850
5851 static void
5852 signals_info (char *signum_exp, int from_tty)
5853 {
5854 enum target_signal oursig;
5855 sig_print_header ();
5856
5857 if (signum_exp)
5858 {
5859 /* First see if this is a symbol name. */
5860 oursig = target_signal_from_name (signum_exp);
5861 if (oursig == TARGET_SIGNAL_UNKNOWN)
5862 {
5863 /* No, try numeric. */
5864 oursig =
5865 target_signal_from_command (parse_and_eval_long (signum_exp));
5866 }
5867 sig_print_info (oursig);
5868 return;
5869 }
5870
5871 printf_filtered ("\n");
5872 /* These ugly casts brought to you by the native VAX compiler. */
5873 for (oursig = TARGET_SIGNAL_FIRST;
5874 (int) oursig < (int) TARGET_SIGNAL_LAST;
5875 oursig = (enum target_signal) ((int) oursig + 1))
5876 {
5877 QUIT;
5878
5879 if (oursig != TARGET_SIGNAL_UNKNOWN
5880 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5881 sig_print_info (oursig);
5882 }
5883
5884 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5885 }
5886
5887 /* The $_siginfo convenience variable is a bit special. We don't know
5888 for sure the type of the value until we actually have a chance to
5889 fetch the data. The type can change depending on gdbarch, so it it
5890 also dependent on which thread you have selected.
5891
5892 1. making $_siginfo be an internalvar that creates a new value on
5893 access.
5894
5895 2. making the value of $_siginfo be an lval_computed value. */
5896
5897 /* This function implements the lval_computed support for reading a
5898 $_siginfo value. */
5899
5900 static void
5901 siginfo_value_read (struct value *v)
5902 {
5903 LONGEST transferred;
5904
5905 transferred =
5906 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5907 NULL,
5908 value_contents_all_raw (v),
5909 value_offset (v),
5910 TYPE_LENGTH (value_type (v)));
5911
5912 if (transferred != TYPE_LENGTH (value_type (v)))
5913 error (_("Unable to read siginfo"));
5914 }
5915
5916 /* This function implements the lval_computed support for writing a
5917 $_siginfo value. */
5918
5919 static void
5920 siginfo_value_write (struct value *v, struct value *fromval)
5921 {
5922 LONGEST transferred;
5923
5924 transferred = target_write (&current_target,
5925 TARGET_OBJECT_SIGNAL_INFO,
5926 NULL,
5927 value_contents_all_raw (fromval),
5928 value_offset (v),
5929 TYPE_LENGTH (value_type (fromval)));
5930
5931 if (transferred != TYPE_LENGTH (value_type (fromval)))
5932 error (_("Unable to write siginfo"));
5933 }
5934
5935 static struct lval_funcs siginfo_value_funcs =
5936 {
5937 siginfo_value_read,
5938 siginfo_value_write
5939 };
5940
5941 /* Return a new value with the correct type for the siginfo object of
5942 the current thread using architecture GDBARCH. Return a void value
5943 if there's no object available. */
5944
5945 static struct value *
5946 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5947 {
5948 if (target_has_stack
5949 && !ptid_equal (inferior_ptid, null_ptid)
5950 && gdbarch_get_siginfo_type_p (gdbarch))
5951 {
5952 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5953 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5954 }
5955
5956 return allocate_value (builtin_type (gdbarch)->builtin_void);
5957 }
5958
5959 \f
5960 /* Inferior thread state.
5961 These are details related to the inferior itself, and don't include
5962 things like what frame the user had selected or what gdb was doing
5963 with the target at the time.
5964 For inferior function calls these are things we want to restore
5965 regardless of whether the function call successfully completes
5966 or the dummy frame has to be manually popped. */
5967
5968 struct inferior_thread_state
5969 {
5970 enum target_signal stop_signal;
5971 CORE_ADDR stop_pc;
5972 struct regcache *registers;
5973 };
5974
5975 struct inferior_thread_state *
5976 save_inferior_thread_state (void)
5977 {
5978 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5979 struct thread_info *tp = inferior_thread ();
5980
5981 inf_state->stop_signal = tp->stop_signal;
5982 inf_state->stop_pc = stop_pc;
5983
5984 inf_state->registers = regcache_dup (get_current_regcache ());
5985
5986 return inf_state;
5987 }
5988
5989 /* Restore inferior session state to INF_STATE. */
5990
5991 void
5992 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5993 {
5994 struct thread_info *tp = inferior_thread ();
5995
5996 tp->stop_signal = inf_state->stop_signal;
5997 stop_pc = inf_state->stop_pc;
5998
5999 /* The inferior can be gone if the user types "print exit(0)"
6000 (and perhaps other times). */
6001 if (target_has_execution)
6002 /* NB: The register write goes through to the target. */
6003 regcache_cpy (get_current_regcache (), inf_state->registers);
6004 regcache_xfree (inf_state->registers);
6005 xfree (inf_state);
6006 }
6007
6008 static void
6009 do_restore_inferior_thread_state_cleanup (void *state)
6010 {
6011 restore_inferior_thread_state (state);
6012 }
6013
6014 struct cleanup *
6015 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6016 {
6017 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6018 }
6019
6020 void
6021 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6022 {
6023 regcache_xfree (inf_state->registers);
6024 xfree (inf_state);
6025 }
6026
6027 struct regcache *
6028 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6029 {
6030 return inf_state->registers;
6031 }
6032
6033 /* Session related state for inferior function calls.
6034 These are the additional bits of state that need to be restored
6035 when an inferior function call successfully completes. */
6036
6037 struct inferior_status
6038 {
6039 bpstat stop_bpstat;
6040 int stop_step;
6041 int stop_stack_dummy;
6042 int stopped_by_random_signal;
6043 int stepping_over_breakpoint;
6044 CORE_ADDR step_range_start;
6045 CORE_ADDR step_range_end;
6046 struct frame_id step_frame_id;
6047 struct frame_id step_stack_frame_id;
6048 enum step_over_calls_kind step_over_calls;
6049 CORE_ADDR step_resume_break_address;
6050 int stop_after_trap;
6051 int stop_soon;
6052
6053 /* ID if the selected frame when the inferior function call was made. */
6054 struct frame_id selected_frame_id;
6055
6056 int proceed_to_finish;
6057 int in_infcall;
6058 };
6059
6060 /* Save all of the information associated with the inferior<==>gdb
6061 connection. */
6062
6063 struct inferior_status *
6064 save_inferior_status (void)
6065 {
6066 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6067 struct thread_info *tp = inferior_thread ();
6068 struct inferior *inf = current_inferior ();
6069
6070 inf_status->stop_step = tp->stop_step;
6071 inf_status->stop_stack_dummy = stop_stack_dummy;
6072 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6073 inf_status->stepping_over_breakpoint = tp->trap_expected;
6074 inf_status->step_range_start = tp->step_range_start;
6075 inf_status->step_range_end = tp->step_range_end;
6076 inf_status->step_frame_id = tp->step_frame_id;
6077 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6078 inf_status->step_over_calls = tp->step_over_calls;
6079 inf_status->stop_after_trap = stop_after_trap;
6080 inf_status->stop_soon = inf->stop_soon;
6081 /* Save original bpstat chain here; replace it with copy of chain.
6082 If caller's caller is walking the chain, they'll be happier if we
6083 hand them back the original chain when restore_inferior_status is
6084 called. */
6085 inf_status->stop_bpstat = tp->stop_bpstat;
6086 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6087 inf_status->proceed_to_finish = tp->proceed_to_finish;
6088 inf_status->in_infcall = tp->in_infcall;
6089
6090 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6091
6092 return inf_status;
6093 }
6094
6095 static int
6096 restore_selected_frame (void *args)
6097 {
6098 struct frame_id *fid = (struct frame_id *) args;
6099 struct frame_info *frame;
6100
6101 frame = frame_find_by_id (*fid);
6102
6103 /* If inf_status->selected_frame_id is NULL, there was no previously
6104 selected frame. */
6105 if (frame == NULL)
6106 {
6107 warning (_("Unable to restore previously selected frame."));
6108 return 0;
6109 }
6110
6111 select_frame (frame);
6112
6113 return (1);
6114 }
6115
6116 /* Restore inferior session state to INF_STATUS. */
6117
6118 void
6119 restore_inferior_status (struct inferior_status *inf_status)
6120 {
6121 struct thread_info *tp = inferior_thread ();
6122 struct inferior *inf = current_inferior ();
6123
6124 tp->stop_step = inf_status->stop_step;
6125 stop_stack_dummy = inf_status->stop_stack_dummy;
6126 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6127 tp->trap_expected = inf_status->stepping_over_breakpoint;
6128 tp->step_range_start = inf_status->step_range_start;
6129 tp->step_range_end = inf_status->step_range_end;
6130 tp->step_frame_id = inf_status->step_frame_id;
6131 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6132 tp->step_over_calls = inf_status->step_over_calls;
6133 stop_after_trap = inf_status->stop_after_trap;
6134 inf->stop_soon = inf_status->stop_soon;
6135 bpstat_clear (&tp->stop_bpstat);
6136 tp->stop_bpstat = inf_status->stop_bpstat;
6137 inf_status->stop_bpstat = NULL;
6138 tp->proceed_to_finish = inf_status->proceed_to_finish;
6139 tp->in_infcall = inf_status->in_infcall;
6140
6141 if (target_has_stack)
6142 {
6143 /* The point of catch_errors is that if the stack is clobbered,
6144 walking the stack might encounter a garbage pointer and
6145 error() trying to dereference it. */
6146 if (catch_errors
6147 (restore_selected_frame, &inf_status->selected_frame_id,
6148 "Unable to restore previously selected frame:\n",
6149 RETURN_MASK_ERROR) == 0)
6150 /* Error in restoring the selected frame. Select the innermost
6151 frame. */
6152 select_frame (get_current_frame ());
6153 }
6154
6155 xfree (inf_status);
6156 }
6157
6158 static void
6159 do_restore_inferior_status_cleanup (void *sts)
6160 {
6161 restore_inferior_status (sts);
6162 }
6163
6164 struct cleanup *
6165 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6166 {
6167 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6168 }
6169
6170 void
6171 discard_inferior_status (struct inferior_status *inf_status)
6172 {
6173 /* See save_inferior_status for info on stop_bpstat. */
6174 bpstat_clear (&inf_status->stop_bpstat);
6175 xfree (inf_status);
6176 }
6177 \f
6178 int
6179 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6180 {
6181 struct target_waitstatus last;
6182 ptid_t last_ptid;
6183
6184 get_last_target_status (&last_ptid, &last);
6185
6186 if (last.kind != TARGET_WAITKIND_FORKED)
6187 return 0;
6188
6189 if (!ptid_equal (last_ptid, pid))
6190 return 0;
6191
6192 *child_pid = last.value.related_pid;
6193 return 1;
6194 }
6195
6196 int
6197 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6198 {
6199 struct target_waitstatus last;
6200 ptid_t last_ptid;
6201
6202 get_last_target_status (&last_ptid, &last);
6203
6204 if (last.kind != TARGET_WAITKIND_VFORKED)
6205 return 0;
6206
6207 if (!ptid_equal (last_ptid, pid))
6208 return 0;
6209
6210 *child_pid = last.value.related_pid;
6211 return 1;
6212 }
6213
6214 int
6215 inferior_has_execd (ptid_t pid, char **execd_pathname)
6216 {
6217 struct target_waitstatus last;
6218 ptid_t last_ptid;
6219
6220 get_last_target_status (&last_ptid, &last);
6221
6222 if (last.kind != TARGET_WAITKIND_EXECD)
6223 return 0;
6224
6225 if (!ptid_equal (last_ptid, pid))
6226 return 0;
6227
6228 *execd_pathname = xstrdup (last.value.execd_pathname);
6229 return 1;
6230 }
6231
6232 int
6233 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6234 {
6235 struct target_waitstatus last;
6236 ptid_t last_ptid;
6237
6238 get_last_target_status (&last_ptid, &last);
6239
6240 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6241 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6242 return 0;
6243
6244 if (!ptid_equal (last_ptid, pid))
6245 return 0;
6246
6247 *syscall_number = last.value.syscall_number;
6248 return 1;
6249 }
6250
6251 /* Oft used ptids */
6252 ptid_t null_ptid;
6253 ptid_t minus_one_ptid;
6254
6255 /* Create a ptid given the necessary PID, LWP, and TID components. */
6256
6257 ptid_t
6258 ptid_build (int pid, long lwp, long tid)
6259 {
6260 ptid_t ptid;
6261
6262 ptid.pid = pid;
6263 ptid.lwp = lwp;
6264 ptid.tid = tid;
6265 return ptid;
6266 }
6267
6268 /* Create a ptid from just a pid. */
6269
6270 ptid_t
6271 pid_to_ptid (int pid)
6272 {
6273 return ptid_build (pid, 0, 0);
6274 }
6275
6276 /* Fetch the pid (process id) component from a ptid. */
6277
6278 int
6279 ptid_get_pid (ptid_t ptid)
6280 {
6281 return ptid.pid;
6282 }
6283
6284 /* Fetch the lwp (lightweight process) component from a ptid. */
6285
6286 long
6287 ptid_get_lwp (ptid_t ptid)
6288 {
6289 return ptid.lwp;
6290 }
6291
6292 /* Fetch the tid (thread id) component from a ptid. */
6293
6294 long
6295 ptid_get_tid (ptid_t ptid)
6296 {
6297 return ptid.tid;
6298 }
6299
6300 /* ptid_equal() is used to test equality of two ptids. */
6301
6302 int
6303 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6304 {
6305 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6306 && ptid1.tid == ptid2.tid);
6307 }
6308
6309 /* Returns true if PTID represents a process. */
6310
6311 int
6312 ptid_is_pid (ptid_t ptid)
6313 {
6314 if (ptid_equal (minus_one_ptid, ptid))
6315 return 0;
6316 if (ptid_equal (null_ptid, ptid))
6317 return 0;
6318
6319 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6320 }
6321
6322 int
6323 ptid_match (ptid_t ptid, ptid_t filter)
6324 {
6325 /* Since both parameters have the same type, prevent easy mistakes
6326 from happening. */
6327 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6328 && !ptid_equal (ptid, null_ptid)
6329 && !ptid_is_pid (ptid));
6330
6331 if (ptid_equal (filter, minus_one_ptid))
6332 return 1;
6333 if (ptid_is_pid (filter)
6334 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6335 return 1;
6336 else if (ptid_equal (ptid, filter))
6337 return 1;
6338
6339 return 0;
6340 }
6341
6342 /* restore_inferior_ptid() will be used by the cleanup machinery
6343 to restore the inferior_ptid value saved in a call to
6344 save_inferior_ptid(). */
6345
6346 static void
6347 restore_inferior_ptid (void *arg)
6348 {
6349 ptid_t *saved_ptid_ptr = arg;
6350 inferior_ptid = *saved_ptid_ptr;
6351 xfree (arg);
6352 }
6353
6354 /* Save the value of inferior_ptid so that it may be restored by a
6355 later call to do_cleanups(). Returns the struct cleanup pointer
6356 needed for later doing the cleanup. */
6357
6358 struct cleanup *
6359 save_inferior_ptid (void)
6360 {
6361 ptid_t *saved_ptid_ptr;
6362
6363 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6364 *saved_ptid_ptr = inferior_ptid;
6365 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6366 }
6367 \f
6368
6369 /* User interface for reverse debugging:
6370 Set exec-direction / show exec-direction commands
6371 (returns error unless target implements to_set_exec_direction method). */
6372
6373 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6374 static const char exec_forward[] = "forward";
6375 static const char exec_reverse[] = "reverse";
6376 static const char *exec_direction = exec_forward;
6377 static const char *exec_direction_names[] = {
6378 exec_forward,
6379 exec_reverse,
6380 NULL
6381 };
6382
6383 static void
6384 set_exec_direction_func (char *args, int from_tty,
6385 struct cmd_list_element *cmd)
6386 {
6387 if (target_can_execute_reverse)
6388 {
6389 if (!strcmp (exec_direction, exec_forward))
6390 execution_direction = EXEC_FORWARD;
6391 else if (!strcmp (exec_direction, exec_reverse))
6392 execution_direction = EXEC_REVERSE;
6393 }
6394 }
6395
6396 static void
6397 show_exec_direction_func (struct ui_file *out, int from_tty,
6398 struct cmd_list_element *cmd, const char *value)
6399 {
6400 switch (execution_direction) {
6401 case EXEC_FORWARD:
6402 fprintf_filtered (out, _("Forward.\n"));
6403 break;
6404 case EXEC_REVERSE:
6405 fprintf_filtered (out, _("Reverse.\n"));
6406 break;
6407 case EXEC_ERROR:
6408 default:
6409 fprintf_filtered (out,
6410 _("Forward (target `%s' does not support exec-direction).\n"),
6411 target_shortname);
6412 break;
6413 }
6414 }
6415
6416 /* User interface for non-stop mode. */
6417
6418 int non_stop = 0;
6419 static int non_stop_1 = 0;
6420
6421 static void
6422 set_non_stop (char *args, int from_tty,
6423 struct cmd_list_element *c)
6424 {
6425 if (target_has_execution)
6426 {
6427 non_stop_1 = non_stop;
6428 error (_("Cannot change this setting while the inferior is running."));
6429 }
6430
6431 non_stop = non_stop_1;
6432 }
6433
6434 static void
6435 show_non_stop (struct ui_file *file, int from_tty,
6436 struct cmd_list_element *c, const char *value)
6437 {
6438 fprintf_filtered (file,
6439 _("Controlling the inferior in non-stop mode is %s.\n"),
6440 value);
6441 }
6442
6443 static void
6444 show_schedule_multiple (struct ui_file *file, int from_tty,
6445 struct cmd_list_element *c, const char *value)
6446 {
6447 fprintf_filtered (file, _("\
6448 Resuming the execution of threads of all processes is %s.\n"), value);
6449 }
6450
6451 void
6452 _initialize_infrun (void)
6453 {
6454 int i;
6455 int numsigs;
6456 struct cmd_list_element *c;
6457
6458 add_info ("signals", signals_info, _("\
6459 What debugger does when program gets various signals.\n\
6460 Specify a signal as argument to print info on that signal only."));
6461 add_info_alias ("handle", "signals", 0);
6462
6463 add_com ("handle", class_run, handle_command, _("\
6464 Specify how to handle a signal.\n\
6465 Args are signals and actions to apply to those signals.\n\
6466 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6467 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6468 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6469 The special arg \"all\" is recognized to mean all signals except those\n\
6470 used by the debugger, typically SIGTRAP and SIGINT.\n\
6471 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6472 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6473 Stop means reenter debugger if this signal happens (implies print).\n\
6474 Print means print a message if this signal happens.\n\
6475 Pass means let program see this signal; otherwise program doesn't know.\n\
6476 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6477 Pass and Stop may be combined."));
6478 if (xdb_commands)
6479 {
6480 add_com ("lz", class_info, signals_info, _("\
6481 What debugger does when program gets various signals.\n\
6482 Specify a signal as argument to print info on that signal only."));
6483 add_com ("z", class_run, xdb_handle_command, _("\
6484 Specify how to handle a signal.\n\
6485 Args are signals and actions to apply to those signals.\n\
6486 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6487 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6488 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6489 The special arg \"all\" is recognized to mean all signals except those\n\
6490 used by the debugger, typically SIGTRAP and SIGINT.\n\
6491 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6492 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6493 nopass), \"Q\" (noprint)\n\
6494 Stop means reenter debugger if this signal happens (implies print).\n\
6495 Print means print a message if this signal happens.\n\
6496 Pass means let program see this signal; otherwise program doesn't know.\n\
6497 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6498 Pass and Stop may be combined."));
6499 }
6500
6501 if (!dbx_commands)
6502 stop_command = add_cmd ("stop", class_obscure,
6503 not_just_help_class_command, _("\
6504 There is no `stop' command, but you can set a hook on `stop'.\n\
6505 This allows you to set a list of commands to be run each time execution\n\
6506 of the program stops."), &cmdlist);
6507
6508 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6509 Set inferior debugging."), _("\
6510 Show inferior debugging."), _("\
6511 When non-zero, inferior specific debugging is enabled."),
6512 NULL,
6513 show_debug_infrun,
6514 &setdebuglist, &showdebuglist);
6515
6516 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6517 Set displaced stepping debugging."), _("\
6518 Show displaced stepping debugging."), _("\
6519 When non-zero, displaced stepping specific debugging is enabled."),
6520 NULL,
6521 show_debug_displaced,
6522 &setdebuglist, &showdebuglist);
6523
6524 add_setshow_boolean_cmd ("non-stop", no_class,
6525 &non_stop_1, _("\
6526 Set whether gdb controls the inferior in non-stop mode."), _("\
6527 Show whether gdb controls the inferior in non-stop mode."), _("\
6528 When debugging a multi-threaded program and this setting is\n\
6529 off (the default, also called all-stop mode), when one thread stops\n\
6530 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6531 all other threads in the program while you interact with the thread of\n\
6532 interest. When you continue or step a thread, you can allow the other\n\
6533 threads to run, or have them remain stopped, but while you inspect any\n\
6534 thread's state, all threads stop.\n\
6535 \n\
6536 In non-stop mode, when one thread stops, other threads can continue\n\
6537 to run freely. You'll be able to step each thread independently,\n\
6538 leave it stopped or free to run as needed."),
6539 set_non_stop,
6540 show_non_stop,
6541 &setlist,
6542 &showlist);
6543
6544 numsigs = (int) TARGET_SIGNAL_LAST;
6545 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6546 signal_print = (unsigned char *)
6547 xmalloc (sizeof (signal_print[0]) * numsigs);
6548 signal_program = (unsigned char *)
6549 xmalloc (sizeof (signal_program[0]) * numsigs);
6550 for (i = 0; i < numsigs; i++)
6551 {
6552 signal_stop[i] = 1;
6553 signal_print[i] = 1;
6554 signal_program[i] = 1;
6555 }
6556
6557 /* Signals caused by debugger's own actions
6558 should not be given to the program afterwards. */
6559 signal_program[TARGET_SIGNAL_TRAP] = 0;
6560 signal_program[TARGET_SIGNAL_INT] = 0;
6561
6562 /* Signals that are not errors should not normally enter the debugger. */
6563 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6564 signal_print[TARGET_SIGNAL_ALRM] = 0;
6565 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6566 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6567 signal_stop[TARGET_SIGNAL_PROF] = 0;
6568 signal_print[TARGET_SIGNAL_PROF] = 0;
6569 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6570 signal_print[TARGET_SIGNAL_CHLD] = 0;
6571 signal_stop[TARGET_SIGNAL_IO] = 0;
6572 signal_print[TARGET_SIGNAL_IO] = 0;
6573 signal_stop[TARGET_SIGNAL_POLL] = 0;
6574 signal_print[TARGET_SIGNAL_POLL] = 0;
6575 signal_stop[TARGET_SIGNAL_URG] = 0;
6576 signal_print[TARGET_SIGNAL_URG] = 0;
6577 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6578 signal_print[TARGET_SIGNAL_WINCH] = 0;
6579
6580 /* These signals are used internally by user-level thread
6581 implementations. (See signal(5) on Solaris.) Like the above
6582 signals, a healthy program receives and handles them as part of
6583 its normal operation. */
6584 signal_stop[TARGET_SIGNAL_LWP] = 0;
6585 signal_print[TARGET_SIGNAL_LWP] = 0;
6586 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6587 signal_print[TARGET_SIGNAL_WAITING] = 0;
6588 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6589 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6590
6591 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6592 &stop_on_solib_events, _("\
6593 Set stopping for shared library events."), _("\
6594 Show stopping for shared library events."), _("\
6595 If nonzero, gdb will give control to the user when the dynamic linker\n\
6596 notifies gdb of shared library events. The most common event of interest\n\
6597 to the user would be loading/unloading of a new library."),
6598 NULL,
6599 show_stop_on_solib_events,
6600 &setlist, &showlist);
6601
6602 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6603 follow_fork_mode_kind_names,
6604 &follow_fork_mode_string, _("\
6605 Set debugger response to a program call of fork or vfork."), _("\
6606 Show debugger response to a program call of fork or vfork."), _("\
6607 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6608 parent - the original process is debugged after a fork\n\
6609 child - the new process is debugged after a fork\n\
6610 The unfollowed process will continue to run.\n\
6611 By default, the debugger will follow the parent process."),
6612 NULL,
6613 show_follow_fork_mode_string,
6614 &setlist, &showlist);
6615
6616 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6617 follow_exec_mode_names,
6618 &follow_exec_mode_string, _("\
6619 Set debugger response to a program call of exec."), _("\
6620 Show debugger response to a program call of exec."), _("\
6621 An exec call replaces the program image of a process.\n\
6622 \n\
6623 follow-exec-mode can be:\n\
6624 \n\
6625 new - the debugger creates a new inferior and rebinds the process \n\
6626 to this new inferior. The program the process was running before\n\
6627 the exec call can be restarted afterwards by restarting the original\n\
6628 inferior.\n\
6629 \n\
6630 same - the debugger keeps the process bound to the same inferior.\n\
6631 The new executable image replaces the previous executable loaded in\n\
6632 the inferior. Restarting the inferior after the exec call restarts\n\
6633 the executable the process was running after the exec call.\n\
6634 \n\
6635 By default, the debugger will use the same inferior."),
6636 NULL,
6637 show_follow_exec_mode_string,
6638 &setlist, &showlist);
6639
6640 add_setshow_enum_cmd ("scheduler-locking", class_run,
6641 scheduler_enums, &scheduler_mode, _("\
6642 Set mode for locking scheduler during execution."), _("\
6643 Show mode for locking scheduler during execution."), _("\
6644 off == no locking (threads may preempt at any time)\n\
6645 on == full locking (no thread except the current thread may run)\n\
6646 step == scheduler locked during every single-step operation.\n\
6647 In this mode, no other thread may run during a step command.\n\
6648 Other threads may run while stepping over a function call ('next')."),
6649 set_schedlock_func, /* traps on target vector */
6650 show_scheduler_mode,
6651 &setlist, &showlist);
6652
6653 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6654 Set mode for resuming threads of all processes."), _("\
6655 Show mode for resuming threads of all processes."), _("\
6656 When on, execution commands (such as 'continue' or 'next') resume all\n\
6657 threads of all processes. When off (which is the default), execution\n\
6658 commands only resume the threads of the current process. The set of\n\
6659 threads that are resumed is further refined by the scheduler-locking\n\
6660 mode (see help set scheduler-locking)."),
6661 NULL,
6662 show_schedule_multiple,
6663 &setlist, &showlist);
6664
6665 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6666 Set mode of the step operation."), _("\
6667 Show mode of the step operation."), _("\
6668 When set, doing a step over a function without debug line information\n\
6669 will stop at the first instruction of that function. Otherwise, the\n\
6670 function is skipped and the step command stops at a different source line."),
6671 NULL,
6672 show_step_stop_if_no_debug,
6673 &setlist, &showlist);
6674
6675 add_setshow_enum_cmd ("displaced-stepping", class_run,
6676 can_use_displaced_stepping_enum,
6677 &can_use_displaced_stepping, _("\
6678 Set debugger's willingness to use displaced stepping."), _("\
6679 Show debugger's willingness to use displaced stepping."), _("\
6680 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6681 supported by the target architecture. If off, gdb will not use displaced\n\
6682 stepping to step over breakpoints, even if such is supported by the target\n\
6683 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6684 if the target architecture supports it and non-stop mode is active, but will not\n\
6685 use it in all-stop mode (see help set non-stop)."),
6686 NULL,
6687 show_can_use_displaced_stepping,
6688 &setlist, &showlist);
6689
6690 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6691 &exec_direction, _("Set direction of execution.\n\
6692 Options are 'forward' or 'reverse'."),
6693 _("Show direction of execution (forward/reverse)."),
6694 _("Tells gdb whether to execute forward or backward."),
6695 set_exec_direction_func, show_exec_direction_func,
6696 &setlist, &showlist);
6697
6698 /* Set/show detach-on-fork: user-settable mode. */
6699
6700 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6701 Set whether gdb will detach the child of a fork."), _("\
6702 Show whether gdb will detach the child of a fork."), _("\
6703 Tells gdb whether to detach the child of a fork."),
6704 NULL, NULL, &setlist, &showlist);
6705
6706 /* ptid initializations */
6707 null_ptid = ptid_build (0, 0, 0);
6708 minus_one_ptid = ptid_build (-1, 0, 0);
6709 inferior_ptid = null_ptid;
6710 target_last_wait_ptid = minus_one_ptid;
6711
6712 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6713 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6714 observer_attach_thread_exit (infrun_thread_thread_exit);
6715 observer_attach_inferior_exit (infrun_inferior_exit);
6716
6717 /* Explicitly create without lookup, since that tries to create a
6718 value with a void typed value, and when we get here, gdbarch
6719 isn't initialized yet. At this point, we're quite sure there
6720 isn't another convenience variable of the same name. */
6721 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6722 }
This page took 0.190566 seconds and 4 git commands to generate.