025ba0ae1a08773afb025f86fed92c0f35588f14
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Tables of how to react to signals; the user sets them. */
183
184 static unsigned char *signal_stop;
185 static unsigned char *signal_print;
186 static unsigned char *signal_program;
187
188 #define SET_SIGS(nsigs,sigs,flags) \
189 do { \
190 int signum = (nsigs); \
191 while (signum-- > 0) \
192 if ((sigs)[signum]) \
193 (flags)[signum] = 1; \
194 } while (0)
195
196 #define UNSET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 0; \
202 } while (0)
203
204 /* Value to pass to target_resume() to cause all threads to resume */
205
206 #define RESUME_ALL minus_one_ptid
207
208 /* Command list pointer for the "stop" placeholder. */
209
210 static struct cmd_list_element *stop_command;
211
212 /* Function inferior was in as of last step command. */
213
214 static struct symbol *step_start_function;
215
216 /* Nonzero if we want to give control to the user when we're notified
217 of shared library events by the dynamic linker. */
218 static int stop_on_solib_events;
219 static void
220 show_stop_on_solib_events (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
224 value);
225 }
226
227 /* Nonzero means expecting a trace trap
228 and should stop the inferior and return silently when it happens. */
229
230 int stop_after_trap;
231
232 /* Save register contents here when executing a "finish" command or are
233 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
234 Thus this contains the return value from the called function (assuming
235 values are returned in a register). */
236
237 struct regcache *stop_registers;
238
239 /* Nonzero after stop if current stack frame should be printed. */
240
241 static int stop_print_frame;
242
243 /* This is a cached copy of the pid/waitstatus of the last event
244 returned by target_wait()/deprecated_target_wait_hook(). This
245 information is returned by get_last_target_status(). */
246 static ptid_t target_last_wait_ptid;
247 static struct target_waitstatus target_last_waitstatus;
248
249 static void context_switch (ptid_t ptid);
250
251 void init_thread_stepping_state (struct thread_info *tss);
252
253 void init_infwait_state (void);
254
255 static const char follow_fork_mode_child[] = "child";
256 static const char follow_fork_mode_parent[] = "parent";
257
258 static const char *follow_fork_mode_kind_names[] = {
259 follow_fork_mode_child,
260 follow_fork_mode_parent,
261 NULL
262 };
263
264 static const char *follow_fork_mode_string = follow_fork_mode_parent;
265 static void
266 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
267 struct cmd_list_element *c, const char *value)
268 {
269 fprintf_filtered (file, _("\
270 Debugger response to a program call of fork or vfork is \"%s\".\n"),
271 value);
272 }
273 \f
274
275 /* Tell the target to follow the fork we're stopped at. Returns true
276 if the inferior should be resumed; false, if the target for some
277 reason decided it's best not to resume. */
278
279 static int
280 follow_fork (void)
281 {
282 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
283 int should_resume = 1;
284 struct thread_info *tp;
285
286 /* Copy user stepping state to the new inferior thread. FIXME: the
287 followed fork child thread should have a copy of most of the
288 parent thread structure's run control related fields, not just these.
289 Initialized to avoid "may be used uninitialized" warnings from gcc. */
290 struct breakpoint *step_resume_breakpoint = NULL;
291 CORE_ADDR step_range_start = 0;
292 CORE_ADDR step_range_end = 0;
293 struct frame_id step_frame_id = { 0 };
294
295 if (!non_stop)
296 {
297 ptid_t wait_ptid;
298 struct target_waitstatus wait_status;
299
300 /* Get the last target status returned by target_wait(). */
301 get_last_target_status (&wait_ptid, &wait_status);
302
303 /* If not stopped at a fork event, then there's nothing else to
304 do. */
305 if (wait_status.kind != TARGET_WAITKIND_FORKED
306 && wait_status.kind != TARGET_WAITKIND_VFORKED)
307 return 1;
308
309 /* Check if we switched over from WAIT_PTID, since the event was
310 reported. */
311 if (!ptid_equal (wait_ptid, minus_one_ptid)
312 && !ptid_equal (inferior_ptid, wait_ptid))
313 {
314 /* We did. Switch back to WAIT_PTID thread, to tell the
315 target to follow it (in either direction). We'll
316 afterwards refuse to resume, and inform the user what
317 happened. */
318 switch_to_thread (wait_ptid);
319 should_resume = 0;
320 }
321 }
322
323 tp = inferior_thread ();
324
325 /* If there were any forks/vforks that were caught and are now to be
326 followed, then do so now. */
327 switch (tp->pending_follow.kind)
328 {
329 case TARGET_WAITKIND_FORKED:
330 case TARGET_WAITKIND_VFORKED:
331 {
332 ptid_t parent, child;
333
334 /* If the user did a next/step, etc, over a fork call,
335 preserve the stepping state in the fork child. */
336 if (follow_child && should_resume)
337 {
338 step_resume_breakpoint
339 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
340 step_range_start = tp->step_range_start;
341 step_range_end = tp->step_range_end;
342 step_frame_id = tp->step_frame_id;
343
344 /* For now, delete the parent's sr breakpoint, otherwise,
345 parent/child sr breakpoints are considered duplicates,
346 and the child version will not be installed. Remove
347 this when the breakpoints module becomes aware of
348 inferiors and address spaces. */
349 delete_step_resume_breakpoint (tp);
350 tp->step_range_start = 0;
351 tp->step_range_end = 0;
352 tp->step_frame_id = null_frame_id;
353 }
354
355 parent = inferior_ptid;
356 child = tp->pending_follow.value.related_pid;
357
358 /* Tell the target to do whatever is necessary to follow
359 either parent or child. */
360 if (target_follow_fork (follow_child))
361 {
362 /* Target refused to follow, or there's some other reason
363 we shouldn't resume. */
364 should_resume = 0;
365 }
366 else
367 {
368 /* This pending follow fork event is now handled, one way
369 or another. The previous selected thread may be gone
370 from the lists by now, but if it is still around, need
371 to clear the pending follow request. */
372 tp = find_thread_ptid (parent);
373 if (tp)
374 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
375
376 /* This makes sure we don't try to apply the "Switched
377 over from WAIT_PID" logic above. */
378 nullify_last_target_wait_ptid ();
379
380 /* If we followed the child, switch to it... */
381 if (follow_child)
382 {
383 switch_to_thread (child);
384
385 /* ... and preserve the stepping state, in case the
386 user was stepping over the fork call. */
387 if (should_resume)
388 {
389 tp = inferior_thread ();
390 tp->step_resume_breakpoint = step_resume_breakpoint;
391 tp->step_range_start = step_range_start;
392 tp->step_range_end = step_range_end;
393 tp->step_frame_id = step_frame_id;
394 }
395 else
396 {
397 /* If we get here, it was because we're trying to
398 resume from a fork catchpoint, but, the user
399 has switched threads away from the thread that
400 forked. In that case, the resume command
401 issued is most likely not applicable to the
402 child, so just warn, and refuse to resume. */
403 warning (_("\
404 Not resuming: switched threads before following fork child.\n"));
405 }
406
407 /* Reset breakpoints in the child as appropriate. */
408 follow_inferior_reset_breakpoints ();
409 }
410 else
411 switch_to_thread (parent);
412 }
413 }
414 break;
415 case TARGET_WAITKIND_SPURIOUS:
416 /* Nothing to follow. */
417 break;
418 default:
419 internal_error (__FILE__, __LINE__,
420 "Unexpected pending_follow.kind %d\n",
421 tp->pending_follow.kind);
422 break;
423 }
424
425 return should_resume;
426 }
427
428 void
429 follow_inferior_reset_breakpoints (void)
430 {
431 struct thread_info *tp = inferior_thread ();
432
433 /* Was there a step_resume breakpoint? (There was if the user
434 did a "next" at the fork() call.) If so, explicitly reset its
435 thread number.
436
437 step_resumes are a form of bp that are made to be per-thread.
438 Since we created the step_resume bp when the parent process
439 was being debugged, and now are switching to the child process,
440 from the breakpoint package's viewpoint, that's a switch of
441 "threads". We must update the bp's notion of which thread
442 it is for, or it'll be ignored when it triggers. */
443
444 if (tp->step_resume_breakpoint)
445 breakpoint_re_set_thread (tp->step_resume_breakpoint);
446
447 /* Reinsert all breakpoints in the child. The user may have set
448 breakpoints after catching the fork, in which case those
449 were never set in the child, but only in the parent. This makes
450 sure the inserted breakpoints match the breakpoint list. */
451
452 breakpoint_re_set ();
453 insert_breakpoints ();
454 }
455
456 /* The child has exited or execed: resume threads of the parent the
457 user wanted to be executing. */
458
459 static int
460 proceed_after_vfork_done (struct thread_info *thread,
461 void *arg)
462 {
463 int pid = * (int *) arg;
464
465 if (ptid_get_pid (thread->ptid) == pid
466 && is_running (thread->ptid)
467 && !is_executing (thread->ptid)
468 && !thread->stop_requested
469 && thread->stop_signal == TARGET_SIGNAL_0)
470 {
471 if (debug_infrun)
472 fprintf_unfiltered (gdb_stdlog,
473 "infrun: resuming vfork parent thread %s\n",
474 target_pid_to_str (thread->ptid));
475
476 switch_to_thread (thread->ptid);
477 clear_proceed_status ();
478 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
479 }
480
481 return 0;
482 }
483
484 /* Called whenever we notice an exec or exit event, to handle
485 detaching or resuming a vfork parent. */
486
487 static void
488 handle_vfork_child_exec_or_exit (int exec)
489 {
490 struct inferior *inf = current_inferior ();
491
492 if (inf->vfork_parent)
493 {
494 int resume_parent = -1;
495
496 /* This exec or exit marks the end of the shared memory region
497 between the parent and the child. If the user wanted to
498 detach from the parent, now is the time. */
499
500 if (inf->vfork_parent->pending_detach)
501 {
502 struct thread_info *tp;
503 struct cleanup *old_chain;
504 struct program_space *pspace;
505 struct address_space *aspace;
506
507 /* follow-fork child, detach-on-fork on */
508
509 old_chain = make_cleanup_restore_current_thread ();
510
511 /* We're letting loose of the parent. */
512 tp = any_live_thread_of_process (inf->vfork_parent->pid);
513 switch_to_thread (tp->ptid);
514
515 /* We're about to detach from the parent, which implicitly
516 removes breakpoints from its address space. There's a
517 catch here: we want to reuse the spaces for the child,
518 but, parent/child are still sharing the pspace at this
519 point, although the exec in reality makes the kernel give
520 the child a fresh set of new pages. The problem here is
521 that the breakpoints module being unaware of this, would
522 likely chose the child process to write to the parent
523 address space. Swapping the child temporarily away from
524 the spaces has the desired effect. Yes, this is "sort
525 of" a hack. */
526
527 pspace = inf->pspace;
528 aspace = inf->aspace;
529 inf->aspace = NULL;
530 inf->pspace = NULL;
531
532 if (debug_infrun || info_verbose)
533 {
534 target_terminal_ours ();
535
536 if (exec)
537 fprintf_filtered (gdb_stdlog,
538 "Detaching vfork parent process %d after child exec.\n",
539 inf->vfork_parent->pid);
540 else
541 fprintf_filtered (gdb_stdlog,
542 "Detaching vfork parent process %d after child exit.\n",
543 inf->vfork_parent->pid);
544 }
545
546 target_detach (NULL, 0);
547
548 /* Put it back. */
549 inf->pspace = pspace;
550 inf->aspace = aspace;
551
552 do_cleanups (old_chain);
553 }
554 else if (exec)
555 {
556 /* We're staying attached to the parent, so, really give the
557 child a new address space. */
558 inf->pspace = add_program_space (maybe_new_address_space ());
559 inf->aspace = inf->pspace->aspace;
560 inf->removable = 1;
561 set_current_program_space (inf->pspace);
562
563 resume_parent = inf->vfork_parent->pid;
564
565 /* Break the bonds. */
566 inf->vfork_parent->vfork_child = NULL;
567 }
568 else
569 {
570 struct cleanup *old_chain;
571 struct program_space *pspace;
572
573 /* If this is a vfork child exiting, then the pspace and
574 aspaces were shared with the parent. Since we're
575 reporting the process exit, we'll be mourning all that is
576 found in the address space, and switching to null_ptid,
577 preparing to start a new inferior. But, since we don't
578 want to clobber the parent's address/program spaces, we
579 go ahead and create a new one for this exiting
580 inferior. */
581
582 /* Switch to null_ptid, so that clone_program_space doesn't want
583 to read the selected frame of a dead process. */
584 old_chain = save_inferior_ptid ();
585 inferior_ptid = null_ptid;
586
587 /* This inferior is dead, so avoid giving the breakpoints
588 module the option to write through to it (cloning a
589 program space resets breakpoints). */
590 inf->aspace = NULL;
591 inf->pspace = NULL;
592 pspace = add_program_space (maybe_new_address_space ());
593 set_current_program_space (pspace);
594 inf->removable = 1;
595 clone_program_space (pspace, inf->vfork_parent->pspace);
596 inf->pspace = pspace;
597 inf->aspace = pspace->aspace;
598
599 /* Put back inferior_ptid. We'll continue mourning this
600 inferior. */
601 do_cleanups (old_chain);
602
603 resume_parent = inf->vfork_parent->pid;
604 /* Break the bonds. */
605 inf->vfork_parent->vfork_child = NULL;
606 }
607
608 inf->vfork_parent = NULL;
609
610 gdb_assert (current_program_space == inf->pspace);
611
612 if (non_stop && resume_parent != -1)
613 {
614 /* If the user wanted the parent to be running, let it go
615 free now. */
616 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
617
618 if (debug_infrun)
619 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
620 resume_parent);
621
622 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
623
624 do_cleanups (old_chain);
625 }
626 }
627 }
628
629 /* Enum strings for "set|show displaced-stepping". */
630
631 static const char follow_exec_mode_new[] = "new";
632 static const char follow_exec_mode_same[] = "same";
633 static const char *follow_exec_mode_names[] =
634 {
635 follow_exec_mode_new,
636 follow_exec_mode_same,
637 NULL,
638 };
639
640 static const char *follow_exec_mode_string = follow_exec_mode_same;
641 static void
642 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
643 struct cmd_list_element *c, const char *value)
644 {
645 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
646 }
647
648 /* EXECD_PATHNAME is assumed to be non-NULL. */
649
650 static void
651 follow_exec (ptid_t pid, char *execd_pathname)
652 {
653 struct thread_info *th = inferior_thread ();
654 struct inferior *inf = current_inferior ();
655
656 /* This is an exec event that we actually wish to pay attention to.
657 Refresh our symbol table to the newly exec'd program, remove any
658 momentary bp's, etc.
659
660 If there are breakpoints, they aren't really inserted now,
661 since the exec() transformed our inferior into a fresh set
662 of instructions.
663
664 We want to preserve symbolic breakpoints on the list, since
665 we have hopes that they can be reset after the new a.out's
666 symbol table is read.
667
668 However, any "raw" breakpoints must be removed from the list
669 (e.g., the solib bp's), since their address is probably invalid
670 now.
671
672 And, we DON'T want to call delete_breakpoints() here, since
673 that may write the bp's "shadow contents" (the instruction
674 value that was overwritten witha TRAP instruction). Since
675 we now have a new a.out, those shadow contents aren't valid. */
676
677 mark_breakpoints_out ();
678
679 update_breakpoints_after_exec ();
680
681 /* If there was one, it's gone now. We cannot truly step-to-next
682 statement through an exec(). */
683 th->step_resume_breakpoint = NULL;
684 th->step_range_start = 0;
685 th->step_range_end = 0;
686
687 /* The target reports the exec event to the main thread, even if
688 some other thread does the exec, and even if the main thread was
689 already stopped --- if debugging in non-stop mode, it's possible
690 the user had the main thread held stopped in the previous image
691 --- release it now. This is the same behavior as step-over-exec
692 with scheduler-locking on in all-stop mode. */
693 th->stop_requested = 0;
694
695 /* What is this a.out's name? */
696 printf_unfiltered (_("%s is executing new program: %s\n"),
697 target_pid_to_str (inferior_ptid),
698 execd_pathname);
699
700 /* We've followed the inferior through an exec. Therefore, the
701 inferior has essentially been killed & reborn. */
702
703 gdb_flush (gdb_stdout);
704
705 breakpoint_init_inferior (inf_execd);
706
707 if (gdb_sysroot && *gdb_sysroot)
708 {
709 char *name = alloca (strlen (gdb_sysroot)
710 + strlen (execd_pathname)
711 + 1);
712
713 strcpy (name, gdb_sysroot);
714 strcat (name, execd_pathname);
715 execd_pathname = name;
716 }
717
718 /* Reset the shared library package. This ensures that we get a
719 shlib event when the child reaches "_start", at which point the
720 dld will have had a chance to initialize the child. */
721 /* Also, loading a symbol file below may trigger symbol lookups, and
722 we don't want those to be satisfied by the libraries of the
723 previous incarnation of this process. */
724 no_shared_libraries (NULL, 0);
725
726 if (follow_exec_mode_string == follow_exec_mode_new)
727 {
728 struct program_space *pspace;
729
730 /* The user wants to keep the old inferior and program spaces
731 around. Create a new fresh one, and switch to it. */
732
733 inf = add_inferior (current_inferior ()->pid);
734 pspace = add_program_space (maybe_new_address_space ());
735 inf->pspace = pspace;
736 inf->aspace = pspace->aspace;
737
738 exit_inferior_num_silent (current_inferior ()->num);
739
740 set_current_inferior (inf);
741 set_current_program_space (pspace);
742 }
743
744 gdb_assert (current_program_space == inf->pspace);
745
746 /* That a.out is now the one to use. */
747 exec_file_attach (execd_pathname, 0);
748
749 /* Load the main file's symbols. */
750 symbol_file_add_main (execd_pathname, 0);
751
752 #ifdef SOLIB_CREATE_INFERIOR_HOOK
753 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
754 #else
755 solib_create_inferior_hook (0);
756 #endif
757
758 jit_inferior_created_hook ();
759
760 /* Reinsert all breakpoints. (Those which were symbolic have
761 been reset to the proper address in the new a.out, thanks
762 to symbol_file_command...) */
763 insert_breakpoints ();
764
765 /* The next resume of this inferior should bring it to the shlib
766 startup breakpoints. (If the user had also set bp's on
767 "main" from the old (parent) process, then they'll auto-
768 matically get reset there in the new process.) */
769 }
770
771 /* Non-zero if we just simulating a single-step. This is needed
772 because we cannot remove the breakpoints in the inferior process
773 until after the `wait' in `wait_for_inferior'. */
774 static int singlestep_breakpoints_inserted_p = 0;
775
776 /* The thread we inserted single-step breakpoints for. */
777 static ptid_t singlestep_ptid;
778
779 /* PC when we started this single-step. */
780 static CORE_ADDR singlestep_pc;
781
782 /* If another thread hit the singlestep breakpoint, we save the original
783 thread here so that we can resume single-stepping it later. */
784 static ptid_t saved_singlestep_ptid;
785 static int stepping_past_singlestep_breakpoint;
786
787 /* If not equal to null_ptid, this means that after stepping over breakpoint
788 is finished, we need to switch to deferred_step_ptid, and step it.
789
790 The use case is when one thread has hit a breakpoint, and then the user
791 has switched to another thread and issued 'step'. We need to step over
792 breakpoint in the thread which hit the breakpoint, but then continue
793 stepping the thread user has selected. */
794 static ptid_t deferred_step_ptid;
795 \f
796 /* Displaced stepping. */
797
798 /* In non-stop debugging mode, we must take special care to manage
799 breakpoints properly; in particular, the traditional strategy for
800 stepping a thread past a breakpoint it has hit is unsuitable.
801 'Displaced stepping' is a tactic for stepping one thread past a
802 breakpoint it has hit while ensuring that other threads running
803 concurrently will hit the breakpoint as they should.
804
805 The traditional way to step a thread T off a breakpoint in a
806 multi-threaded program in all-stop mode is as follows:
807
808 a0) Initially, all threads are stopped, and breakpoints are not
809 inserted.
810 a1) We single-step T, leaving breakpoints uninserted.
811 a2) We insert breakpoints, and resume all threads.
812
813 In non-stop debugging, however, this strategy is unsuitable: we
814 don't want to have to stop all threads in the system in order to
815 continue or step T past a breakpoint. Instead, we use displaced
816 stepping:
817
818 n0) Initially, T is stopped, other threads are running, and
819 breakpoints are inserted.
820 n1) We copy the instruction "under" the breakpoint to a separate
821 location, outside the main code stream, making any adjustments
822 to the instruction, register, and memory state as directed by
823 T's architecture.
824 n2) We single-step T over the instruction at its new location.
825 n3) We adjust the resulting register and memory state as directed
826 by T's architecture. This includes resetting T's PC to point
827 back into the main instruction stream.
828 n4) We resume T.
829
830 This approach depends on the following gdbarch methods:
831
832 - gdbarch_max_insn_length and gdbarch_displaced_step_location
833 indicate where to copy the instruction, and how much space must
834 be reserved there. We use these in step n1.
835
836 - gdbarch_displaced_step_copy_insn copies a instruction to a new
837 address, and makes any necessary adjustments to the instruction,
838 register contents, and memory. We use this in step n1.
839
840 - gdbarch_displaced_step_fixup adjusts registers and memory after
841 we have successfuly single-stepped the instruction, to yield the
842 same effect the instruction would have had if we had executed it
843 at its original address. We use this in step n3.
844
845 - gdbarch_displaced_step_free_closure provides cleanup.
846
847 The gdbarch_displaced_step_copy_insn and
848 gdbarch_displaced_step_fixup functions must be written so that
849 copying an instruction with gdbarch_displaced_step_copy_insn,
850 single-stepping across the copied instruction, and then applying
851 gdbarch_displaced_insn_fixup should have the same effects on the
852 thread's memory and registers as stepping the instruction in place
853 would have. Exactly which responsibilities fall to the copy and
854 which fall to the fixup is up to the author of those functions.
855
856 See the comments in gdbarch.sh for details.
857
858 Note that displaced stepping and software single-step cannot
859 currently be used in combination, although with some care I think
860 they could be made to. Software single-step works by placing
861 breakpoints on all possible subsequent instructions; if the
862 displaced instruction is a PC-relative jump, those breakpoints
863 could fall in very strange places --- on pages that aren't
864 executable, or at addresses that are not proper instruction
865 boundaries. (We do generally let other threads run while we wait
866 to hit the software single-step breakpoint, and they might
867 encounter such a corrupted instruction.) One way to work around
868 this would be to have gdbarch_displaced_step_copy_insn fully
869 simulate the effect of PC-relative instructions (and return NULL)
870 on architectures that use software single-stepping.
871
872 In non-stop mode, we can have independent and simultaneous step
873 requests, so more than one thread may need to simultaneously step
874 over a breakpoint. The current implementation assumes there is
875 only one scratch space per process. In this case, we have to
876 serialize access to the scratch space. If thread A wants to step
877 over a breakpoint, but we are currently waiting for some other
878 thread to complete a displaced step, we leave thread A stopped and
879 place it in the displaced_step_request_queue. Whenever a displaced
880 step finishes, we pick the next thread in the queue and start a new
881 displaced step operation on it. See displaced_step_prepare and
882 displaced_step_fixup for details. */
883
884 struct displaced_step_request
885 {
886 ptid_t ptid;
887 struct displaced_step_request *next;
888 };
889
890 /* Per-inferior displaced stepping state. */
891 struct displaced_step_inferior_state
892 {
893 /* Pointer to next in linked list. */
894 struct displaced_step_inferior_state *next;
895
896 /* The process this displaced step state refers to. */
897 int pid;
898
899 /* A queue of pending displaced stepping requests. One entry per
900 thread that needs to do a displaced step. */
901 struct displaced_step_request *step_request_queue;
902
903 /* If this is not null_ptid, this is the thread carrying out a
904 displaced single-step in process PID. This thread's state will
905 require fixing up once it has completed its step. */
906 ptid_t step_ptid;
907
908 /* The architecture the thread had when we stepped it. */
909 struct gdbarch *step_gdbarch;
910
911 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
912 for post-step cleanup. */
913 struct displaced_step_closure *step_closure;
914
915 /* The address of the original instruction, and the copy we
916 made. */
917 CORE_ADDR step_original, step_copy;
918
919 /* Saved contents of copy area. */
920 gdb_byte *step_saved_copy;
921 };
922
923 /* The list of states of processes involved in displaced stepping
924 presently. */
925 static struct displaced_step_inferior_state *displaced_step_inferior_states;
926
927 /* Get the displaced stepping state of process PID. */
928
929 static struct displaced_step_inferior_state *
930 get_displaced_stepping_state (int pid)
931 {
932 struct displaced_step_inferior_state *state;
933
934 for (state = displaced_step_inferior_states;
935 state != NULL;
936 state = state->next)
937 if (state->pid == pid)
938 return state;
939
940 return NULL;
941 }
942
943 /* Add a new displaced stepping state for process PID to the displaced
944 stepping state list, or return a pointer to an already existing
945 entry, if it already exists. Never returns NULL. */
946
947 static struct displaced_step_inferior_state *
948 add_displaced_stepping_state (int pid)
949 {
950 struct displaced_step_inferior_state *state;
951
952 for (state = displaced_step_inferior_states;
953 state != NULL;
954 state = state->next)
955 if (state->pid == pid)
956 return state;
957
958 state = xcalloc (1, sizeof (*state));
959 state->pid = pid;
960 state->next = displaced_step_inferior_states;
961 displaced_step_inferior_states = state;
962
963 return state;
964 }
965
966 /* Remove the displaced stepping state of process PID. */
967
968 static void
969 remove_displaced_stepping_state (int pid)
970 {
971 struct displaced_step_inferior_state *it, **prev_next_p;
972
973 gdb_assert (pid != 0);
974
975 it = displaced_step_inferior_states;
976 prev_next_p = &displaced_step_inferior_states;
977 while (it)
978 {
979 if (it->pid == pid)
980 {
981 *prev_next_p = it->next;
982 xfree (it);
983 return;
984 }
985
986 prev_next_p = &it->next;
987 it = *prev_next_p;
988 }
989 }
990
991 static void
992 infrun_inferior_exit (struct inferior *inf)
993 {
994 remove_displaced_stepping_state (inf->pid);
995 }
996
997 /* Enum strings for "set|show displaced-stepping". */
998
999 static const char can_use_displaced_stepping_auto[] = "auto";
1000 static const char can_use_displaced_stepping_on[] = "on";
1001 static const char can_use_displaced_stepping_off[] = "off";
1002 static const char *can_use_displaced_stepping_enum[] =
1003 {
1004 can_use_displaced_stepping_auto,
1005 can_use_displaced_stepping_on,
1006 can_use_displaced_stepping_off,
1007 NULL,
1008 };
1009
1010 /* If ON, and the architecture supports it, GDB will use displaced
1011 stepping to step over breakpoints. If OFF, or if the architecture
1012 doesn't support it, GDB will instead use the traditional
1013 hold-and-step approach. If AUTO (which is the default), GDB will
1014 decide which technique to use to step over breakpoints depending on
1015 which of all-stop or non-stop mode is active --- displaced stepping
1016 in non-stop mode; hold-and-step in all-stop mode. */
1017
1018 static const char *can_use_displaced_stepping =
1019 can_use_displaced_stepping_auto;
1020
1021 static void
1022 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1023 struct cmd_list_element *c,
1024 const char *value)
1025 {
1026 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1027 fprintf_filtered (file, _("\
1028 Debugger's willingness to use displaced stepping to step over \
1029 breakpoints is %s (currently %s).\n"),
1030 value, non_stop ? "on" : "off");
1031 else
1032 fprintf_filtered (file, _("\
1033 Debugger's willingness to use displaced stepping to step over \
1034 breakpoints is %s.\n"), value);
1035 }
1036
1037 /* Return non-zero if displaced stepping can/should be used to step
1038 over breakpoints. */
1039
1040 static int
1041 use_displaced_stepping (struct gdbarch *gdbarch)
1042 {
1043 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1044 && non_stop)
1045 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1046 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1047 && !RECORD_IS_USED);
1048 }
1049
1050 /* Clean out any stray displaced stepping state. */
1051 static void
1052 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1053 {
1054 /* Indicate that there is no cleanup pending. */
1055 displaced->step_ptid = null_ptid;
1056
1057 if (displaced->step_closure)
1058 {
1059 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1060 displaced->step_closure);
1061 displaced->step_closure = NULL;
1062 }
1063 }
1064
1065 static void
1066 displaced_step_clear_cleanup (void *arg)
1067 {
1068 struct displaced_step_inferior_state *state = arg;
1069
1070 displaced_step_clear (state);
1071 }
1072
1073 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1074 void
1075 displaced_step_dump_bytes (struct ui_file *file,
1076 const gdb_byte *buf,
1077 size_t len)
1078 {
1079 int i;
1080
1081 for (i = 0; i < len; i++)
1082 fprintf_unfiltered (file, "%02x ", buf[i]);
1083 fputs_unfiltered ("\n", file);
1084 }
1085
1086 /* Prepare to single-step, using displaced stepping.
1087
1088 Note that we cannot use displaced stepping when we have a signal to
1089 deliver. If we have a signal to deliver and an instruction to step
1090 over, then after the step, there will be no indication from the
1091 target whether the thread entered a signal handler or ignored the
1092 signal and stepped over the instruction successfully --- both cases
1093 result in a simple SIGTRAP. In the first case we mustn't do a
1094 fixup, and in the second case we must --- but we can't tell which.
1095 Comments in the code for 'random signals' in handle_inferior_event
1096 explain how we handle this case instead.
1097
1098 Returns 1 if preparing was successful -- this thread is going to be
1099 stepped now; or 0 if displaced stepping this thread got queued. */
1100 static int
1101 displaced_step_prepare (ptid_t ptid)
1102 {
1103 struct cleanup *old_cleanups, *ignore_cleanups;
1104 struct regcache *regcache = get_thread_regcache (ptid);
1105 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1106 CORE_ADDR original, copy;
1107 ULONGEST len;
1108 struct displaced_step_closure *closure;
1109 struct displaced_step_inferior_state *displaced;
1110
1111 /* We should never reach this function if the architecture does not
1112 support displaced stepping. */
1113 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1114
1115 /* We have to displaced step one thread at a time, as we only have
1116 access to a single scratch space per inferior. */
1117
1118 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1119
1120 if (!ptid_equal (displaced->step_ptid, null_ptid))
1121 {
1122 /* Already waiting for a displaced step to finish. Defer this
1123 request and place in queue. */
1124 struct displaced_step_request *req, *new_req;
1125
1126 if (debug_displaced)
1127 fprintf_unfiltered (gdb_stdlog,
1128 "displaced: defering step of %s\n",
1129 target_pid_to_str (ptid));
1130
1131 new_req = xmalloc (sizeof (*new_req));
1132 new_req->ptid = ptid;
1133 new_req->next = NULL;
1134
1135 if (displaced->step_request_queue)
1136 {
1137 for (req = displaced->step_request_queue;
1138 req && req->next;
1139 req = req->next)
1140 ;
1141 req->next = new_req;
1142 }
1143 else
1144 displaced->step_request_queue = new_req;
1145
1146 return 0;
1147 }
1148 else
1149 {
1150 if (debug_displaced)
1151 fprintf_unfiltered (gdb_stdlog,
1152 "displaced: stepping %s now\n",
1153 target_pid_to_str (ptid));
1154 }
1155
1156 displaced_step_clear (displaced);
1157
1158 old_cleanups = save_inferior_ptid ();
1159 inferior_ptid = ptid;
1160
1161 original = regcache_read_pc (regcache);
1162
1163 copy = gdbarch_displaced_step_location (gdbarch);
1164 len = gdbarch_max_insn_length (gdbarch);
1165
1166 /* Save the original contents of the copy area. */
1167 displaced->step_saved_copy = xmalloc (len);
1168 ignore_cleanups = make_cleanup (free_current_contents,
1169 &displaced->step_saved_copy);
1170 read_memory (copy, displaced->step_saved_copy, len);
1171 if (debug_displaced)
1172 {
1173 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1174 paddress (gdbarch, copy));
1175 displaced_step_dump_bytes (gdb_stdlog,
1176 displaced->step_saved_copy,
1177 len);
1178 };
1179
1180 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1181 original, copy, regcache);
1182
1183 /* We don't support the fully-simulated case at present. */
1184 gdb_assert (closure);
1185
1186 /* Save the information we need to fix things up if the step
1187 succeeds. */
1188 displaced->step_ptid = ptid;
1189 displaced->step_gdbarch = gdbarch;
1190 displaced->step_closure = closure;
1191 displaced->step_original = original;
1192 displaced->step_copy = copy;
1193
1194 make_cleanup (displaced_step_clear_cleanup, displaced);
1195
1196 /* Resume execution at the copy. */
1197 regcache_write_pc (regcache, copy);
1198
1199 discard_cleanups (ignore_cleanups);
1200
1201 do_cleanups (old_cleanups);
1202
1203 if (debug_displaced)
1204 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1205 paddress (gdbarch, copy));
1206
1207 return 1;
1208 }
1209
1210 static void
1211 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1212 {
1213 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1214
1215 inferior_ptid = ptid;
1216 write_memory (memaddr, myaddr, len);
1217 do_cleanups (ptid_cleanup);
1218 }
1219
1220 static void
1221 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1222 {
1223 struct cleanup *old_cleanups;
1224 struct displaced_step_inferior_state *displaced
1225 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1226
1227 /* Was any thread of this process doing a displaced step? */
1228 if (displaced == NULL)
1229 return;
1230
1231 /* Was this event for the pid we displaced? */
1232 if (ptid_equal (displaced->step_ptid, null_ptid)
1233 || ! ptid_equal (displaced->step_ptid, event_ptid))
1234 return;
1235
1236 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1237
1238 /* Restore the contents of the copy area. */
1239 {
1240 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1241
1242 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1243 displaced->step_saved_copy, len);
1244 if (debug_displaced)
1245 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1246 paddress (displaced->step_gdbarch,
1247 displaced->step_copy));
1248 }
1249
1250 /* Did the instruction complete successfully? */
1251 if (signal == TARGET_SIGNAL_TRAP)
1252 {
1253 /* Fix up the resulting state. */
1254 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1255 displaced->step_closure,
1256 displaced->step_original,
1257 displaced->step_copy,
1258 get_thread_regcache (displaced->step_ptid));
1259 }
1260 else
1261 {
1262 /* Since the instruction didn't complete, all we can do is
1263 relocate the PC. */
1264 struct regcache *regcache = get_thread_regcache (event_ptid);
1265 CORE_ADDR pc = regcache_read_pc (regcache);
1266
1267 pc = displaced->step_original + (pc - displaced->step_copy);
1268 regcache_write_pc (regcache, pc);
1269 }
1270
1271 do_cleanups (old_cleanups);
1272
1273 displaced->step_ptid = null_ptid;
1274
1275 /* Are there any pending displaced stepping requests? If so, run
1276 one now. Leave the state object around, since we're likely to
1277 need it again soon. */
1278 while (displaced->step_request_queue)
1279 {
1280 struct displaced_step_request *head;
1281 ptid_t ptid;
1282 struct regcache *regcache;
1283 struct gdbarch *gdbarch;
1284 CORE_ADDR actual_pc;
1285 struct address_space *aspace;
1286
1287 head = displaced->step_request_queue;
1288 ptid = head->ptid;
1289 displaced->step_request_queue = head->next;
1290 xfree (head);
1291
1292 context_switch (ptid);
1293
1294 regcache = get_thread_regcache (ptid);
1295 actual_pc = regcache_read_pc (regcache);
1296 aspace = get_regcache_aspace (regcache);
1297
1298 if (breakpoint_here_p (aspace, actual_pc))
1299 {
1300 if (debug_displaced)
1301 fprintf_unfiltered (gdb_stdlog,
1302 "displaced: stepping queued %s now\n",
1303 target_pid_to_str (ptid));
1304
1305 displaced_step_prepare (ptid);
1306
1307 gdbarch = get_regcache_arch (regcache);
1308
1309 if (debug_displaced)
1310 {
1311 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1312 gdb_byte buf[4];
1313
1314 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1315 paddress (gdbarch, actual_pc));
1316 read_memory (actual_pc, buf, sizeof (buf));
1317 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1318 }
1319
1320 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1321 displaced->step_closure))
1322 target_resume (ptid, 1, TARGET_SIGNAL_0);
1323 else
1324 target_resume (ptid, 0, TARGET_SIGNAL_0);
1325
1326 /* Done, we're stepping a thread. */
1327 break;
1328 }
1329 else
1330 {
1331 int step;
1332 struct thread_info *tp = inferior_thread ();
1333
1334 /* The breakpoint we were sitting under has since been
1335 removed. */
1336 tp->trap_expected = 0;
1337
1338 /* Go back to what we were trying to do. */
1339 step = currently_stepping (tp);
1340
1341 if (debug_displaced)
1342 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1343 target_pid_to_str (tp->ptid), step);
1344
1345 target_resume (ptid, step, TARGET_SIGNAL_0);
1346 tp->stop_signal = TARGET_SIGNAL_0;
1347
1348 /* This request was discarded. See if there's any other
1349 thread waiting for its turn. */
1350 }
1351 }
1352 }
1353
1354 /* Update global variables holding ptids to hold NEW_PTID if they were
1355 holding OLD_PTID. */
1356 static void
1357 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1358 {
1359 struct displaced_step_request *it;
1360 struct displaced_step_inferior_state *displaced;
1361
1362 if (ptid_equal (inferior_ptid, old_ptid))
1363 inferior_ptid = new_ptid;
1364
1365 if (ptid_equal (singlestep_ptid, old_ptid))
1366 singlestep_ptid = new_ptid;
1367
1368 if (ptid_equal (deferred_step_ptid, old_ptid))
1369 deferred_step_ptid = new_ptid;
1370
1371 for (displaced = displaced_step_inferior_states;
1372 displaced;
1373 displaced = displaced->next)
1374 {
1375 if (ptid_equal (displaced->step_ptid, old_ptid))
1376 displaced->step_ptid = new_ptid;
1377
1378 for (it = displaced->step_request_queue; it; it = it->next)
1379 if (ptid_equal (it->ptid, old_ptid))
1380 it->ptid = new_ptid;
1381 }
1382 }
1383
1384 \f
1385 /* Resuming. */
1386
1387 /* Things to clean up if we QUIT out of resume (). */
1388 static void
1389 resume_cleanups (void *ignore)
1390 {
1391 normal_stop ();
1392 }
1393
1394 static const char schedlock_off[] = "off";
1395 static const char schedlock_on[] = "on";
1396 static const char schedlock_step[] = "step";
1397 static const char *scheduler_enums[] = {
1398 schedlock_off,
1399 schedlock_on,
1400 schedlock_step,
1401 NULL
1402 };
1403 static const char *scheduler_mode = schedlock_off;
1404 static void
1405 show_scheduler_mode (struct ui_file *file, int from_tty,
1406 struct cmd_list_element *c, const char *value)
1407 {
1408 fprintf_filtered (file, _("\
1409 Mode for locking scheduler during execution is \"%s\".\n"),
1410 value);
1411 }
1412
1413 static void
1414 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1415 {
1416 if (!target_can_lock_scheduler)
1417 {
1418 scheduler_mode = schedlock_off;
1419 error (_("Target '%s' cannot support this command."), target_shortname);
1420 }
1421 }
1422
1423 /* If SCHEDULER_MODE is on, then set it back to off. Warn the user
1424 about the change. */
1425
1426 void
1427 reset_schedlock (void)
1428 {
1429 if (scheduler_mode == schedlock_on)
1430 {
1431 warning ("Resetting scheduler-lock mode to 'off'");
1432 scheduler_mode = schedlock_off;
1433 }
1434 }
1435
1436 /* True if execution commands resume all threads of all processes by
1437 default; otherwise, resume only threads of the current inferior
1438 process. */
1439 int sched_multi = 0;
1440
1441 /* Try to setup for software single stepping over the specified location.
1442 Return 1 if target_resume() should use hardware single step.
1443
1444 GDBARCH the current gdbarch.
1445 PC the location to step over. */
1446
1447 static int
1448 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1449 {
1450 int hw_step = 1;
1451
1452 if (gdbarch_software_single_step_p (gdbarch)
1453 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1454 {
1455 hw_step = 0;
1456 /* Do not pull these breakpoints until after a `wait' in
1457 `wait_for_inferior' */
1458 singlestep_breakpoints_inserted_p = 1;
1459 singlestep_ptid = inferior_ptid;
1460 singlestep_pc = pc;
1461 }
1462 return hw_step;
1463 }
1464
1465 /* Resume the inferior, but allow a QUIT. This is useful if the user
1466 wants to interrupt some lengthy single-stepping operation
1467 (for child processes, the SIGINT goes to the inferior, and so
1468 we get a SIGINT random_signal, but for remote debugging and perhaps
1469 other targets, that's not true).
1470
1471 STEP nonzero if we should step (zero to continue instead).
1472 SIG is the signal to give the inferior (zero for none). */
1473 void
1474 resume (int step, enum target_signal sig)
1475 {
1476 int should_resume = 1;
1477 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1478 struct regcache *regcache = get_current_regcache ();
1479 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1480 struct thread_info *tp = inferior_thread ();
1481 CORE_ADDR pc = regcache_read_pc (regcache);
1482 struct address_space *aspace = get_regcache_aspace (regcache);
1483
1484 QUIT;
1485
1486 if (debug_infrun)
1487 fprintf_unfiltered (gdb_stdlog,
1488 "infrun: resume (step=%d, signal=%d), "
1489 "trap_expected=%d\n",
1490 step, sig, tp->trap_expected);
1491
1492 /* Normally, by the time we reach `resume', the breakpoints are either
1493 removed or inserted, as appropriate. The exception is if we're sitting
1494 at a permanent breakpoint; we need to step over it, but permanent
1495 breakpoints can't be removed. So we have to test for it here. */
1496 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1497 {
1498 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1499 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1500 else
1501 error (_("\
1502 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1503 how to step past a permanent breakpoint on this architecture. Try using\n\
1504 a command like `return' or `jump' to continue execution."));
1505 }
1506
1507 /* If enabled, step over breakpoints by executing a copy of the
1508 instruction at a different address.
1509
1510 We can't use displaced stepping when we have a signal to deliver;
1511 the comments for displaced_step_prepare explain why. The
1512 comments in the handle_inferior event for dealing with 'random
1513 signals' explain what we do instead. */
1514 if (use_displaced_stepping (gdbarch)
1515 && (tp->trap_expected
1516 || (step && gdbarch_software_single_step_p (gdbarch)))
1517 && sig == TARGET_SIGNAL_0)
1518 {
1519 struct displaced_step_inferior_state *displaced;
1520
1521 if (!displaced_step_prepare (inferior_ptid))
1522 {
1523 /* Got placed in displaced stepping queue. Will be resumed
1524 later when all the currently queued displaced stepping
1525 requests finish. The thread is not executing at this point,
1526 and the call to set_executing will be made later. But we
1527 need to call set_running here, since from frontend point of view,
1528 the thread is running. */
1529 set_running (inferior_ptid, 1);
1530 discard_cleanups (old_cleanups);
1531 return;
1532 }
1533
1534 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1535 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1536 displaced->step_closure);
1537 }
1538
1539 /* Do we need to do it the hard way, w/temp breakpoints? */
1540 else if (step)
1541 step = maybe_software_singlestep (gdbarch, pc);
1542
1543 if (should_resume)
1544 {
1545 ptid_t resume_ptid;
1546
1547 /* If STEP is set, it's a request to use hardware stepping
1548 facilities. But in that case, we should never
1549 use singlestep breakpoint. */
1550 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1551
1552 /* Decide the set of threads to ask the target to resume. Start
1553 by assuming everything will be resumed, than narrow the set
1554 by applying increasingly restricting conditions. */
1555
1556 /* By default, resume all threads of all processes. */
1557 resume_ptid = RESUME_ALL;
1558
1559 /* Maybe resume only all threads of the current process. */
1560 if (!sched_multi && target_supports_multi_process ())
1561 {
1562 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1563 }
1564
1565 /* Maybe resume a single thread after all. */
1566 if (singlestep_breakpoints_inserted_p
1567 && stepping_past_singlestep_breakpoint)
1568 {
1569 /* The situation here is as follows. In thread T1 we wanted to
1570 single-step. Lacking hardware single-stepping we've
1571 set breakpoint at the PC of the next instruction -- call it
1572 P. After resuming, we've hit that breakpoint in thread T2.
1573 Now we've removed original breakpoint, inserted breakpoint
1574 at P+1, and try to step to advance T2 past breakpoint.
1575 We need to step only T2, as if T1 is allowed to freely run,
1576 it can run past P, and if other threads are allowed to run,
1577 they can hit breakpoint at P+1, and nested hits of single-step
1578 breakpoints is not something we'd want -- that's complicated
1579 to support, and has no value. */
1580 resume_ptid = inferior_ptid;
1581 }
1582 else if ((step || singlestep_breakpoints_inserted_p)
1583 && tp->trap_expected)
1584 {
1585 /* We're allowing a thread to run past a breakpoint it has
1586 hit, by single-stepping the thread with the breakpoint
1587 removed. In which case, we need to single-step only this
1588 thread, and keep others stopped, as they can miss this
1589 breakpoint if allowed to run.
1590
1591 The current code actually removes all breakpoints when
1592 doing this, not just the one being stepped over, so if we
1593 let other threads run, we can actually miss any
1594 breakpoint, not just the one at PC. */
1595 resume_ptid = inferior_ptid;
1596 }
1597 else if (non_stop)
1598 {
1599 /* With non-stop mode on, threads are always handled
1600 individually. */
1601 resume_ptid = inferior_ptid;
1602 }
1603 else if ((scheduler_mode == schedlock_on)
1604 || (scheduler_mode == schedlock_step
1605 && (step || singlestep_breakpoints_inserted_p)))
1606 {
1607 /* User-settable 'scheduler' mode requires solo thread resume. */
1608 resume_ptid = inferior_ptid;
1609 }
1610
1611 if (gdbarch_cannot_step_breakpoint (gdbarch))
1612 {
1613 /* Most targets can step a breakpoint instruction, thus
1614 executing it normally. But if this one cannot, just
1615 continue and we will hit it anyway. */
1616 if (step && breakpoint_inserted_here_p (aspace, pc))
1617 step = 0;
1618 }
1619
1620 if (debug_displaced
1621 && use_displaced_stepping (gdbarch)
1622 && tp->trap_expected)
1623 {
1624 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1625 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1626 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1627 gdb_byte buf[4];
1628
1629 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1630 paddress (resume_gdbarch, actual_pc));
1631 read_memory (actual_pc, buf, sizeof (buf));
1632 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1633 }
1634
1635 /* Install inferior's terminal modes. */
1636 target_terminal_inferior ();
1637
1638 /* Avoid confusing the next resume, if the next stop/resume
1639 happens to apply to another thread. */
1640 tp->stop_signal = TARGET_SIGNAL_0;
1641
1642 target_resume (resume_ptid, step, sig);
1643 }
1644
1645 discard_cleanups (old_cleanups);
1646 }
1647 \f
1648 /* Proceeding. */
1649
1650 /* Clear out all variables saying what to do when inferior is continued.
1651 First do this, then set the ones you want, then call `proceed'. */
1652
1653 static void
1654 clear_proceed_status_thread (struct thread_info *tp)
1655 {
1656 if (debug_infrun)
1657 fprintf_unfiltered (gdb_stdlog,
1658 "infrun: clear_proceed_status_thread (%s)\n",
1659 target_pid_to_str (tp->ptid));
1660
1661 tp->trap_expected = 0;
1662 tp->step_range_start = 0;
1663 tp->step_range_end = 0;
1664 tp->step_frame_id = null_frame_id;
1665 tp->step_stack_frame_id = null_frame_id;
1666 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1667 tp->stop_requested = 0;
1668
1669 tp->stop_step = 0;
1670
1671 tp->proceed_to_finish = 0;
1672
1673 /* Discard any remaining commands or status from previous stop. */
1674 bpstat_clear (&tp->stop_bpstat);
1675 }
1676
1677 static int
1678 clear_proceed_status_callback (struct thread_info *tp, void *data)
1679 {
1680 if (is_exited (tp->ptid))
1681 return 0;
1682
1683 clear_proceed_status_thread (tp);
1684 return 0;
1685 }
1686
1687 void
1688 clear_proceed_status (void)
1689 {
1690 if (!non_stop)
1691 {
1692 /* In all-stop mode, delete the per-thread status of all
1693 threads, even if inferior_ptid is null_ptid, there may be
1694 threads on the list. E.g., we may be launching a new
1695 process, while selecting the executable. */
1696 iterate_over_threads (clear_proceed_status_callback, NULL);
1697 }
1698
1699 if (!ptid_equal (inferior_ptid, null_ptid))
1700 {
1701 struct inferior *inferior;
1702
1703 if (non_stop)
1704 {
1705 /* If in non-stop mode, only delete the per-thread status of
1706 the current thread. */
1707 clear_proceed_status_thread (inferior_thread ());
1708 }
1709
1710 inferior = current_inferior ();
1711 inferior->stop_soon = NO_STOP_QUIETLY;
1712 }
1713
1714 stop_after_trap = 0;
1715
1716 observer_notify_about_to_proceed ();
1717
1718 if (stop_registers)
1719 {
1720 regcache_xfree (stop_registers);
1721 stop_registers = NULL;
1722 }
1723 }
1724
1725 /* Check the current thread against the thread that reported the most recent
1726 event. If a step-over is required return TRUE and set the current thread
1727 to the old thread. Otherwise return FALSE.
1728
1729 This should be suitable for any targets that support threads. */
1730
1731 static int
1732 prepare_to_proceed (int step)
1733 {
1734 ptid_t wait_ptid;
1735 struct target_waitstatus wait_status;
1736 int schedlock_enabled;
1737
1738 /* With non-stop mode on, threads are always handled individually. */
1739 gdb_assert (! non_stop);
1740
1741 /* Get the last target status returned by target_wait(). */
1742 get_last_target_status (&wait_ptid, &wait_status);
1743
1744 /* Make sure we were stopped at a breakpoint. */
1745 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1746 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1747 && wait_status.value.sig != TARGET_SIGNAL_ILL
1748 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1749 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1750 {
1751 return 0;
1752 }
1753
1754 schedlock_enabled = (scheduler_mode == schedlock_on
1755 || (scheduler_mode == schedlock_step
1756 && step));
1757
1758 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1759 if (schedlock_enabled)
1760 return 0;
1761
1762 /* Don't switch over if we're about to resume some other process
1763 other than WAIT_PTID's, and schedule-multiple is off. */
1764 if (!sched_multi
1765 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1766 return 0;
1767
1768 /* Switched over from WAIT_PID. */
1769 if (!ptid_equal (wait_ptid, minus_one_ptid)
1770 && !ptid_equal (inferior_ptid, wait_ptid))
1771 {
1772 struct regcache *regcache = get_thread_regcache (wait_ptid);
1773
1774 if (breakpoint_here_p (get_regcache_aspace (regcache),
1775 regcache_read_pc (regcache)))
1776 {
1777 /* If stepping, remember current thread to switch back to. */
1778 if (step)
1779 deferred_step_ptid = inferior_ptid;
1780
1781 /* Switch back to WAIT_PID thread. */
1782 switch_to_thread (wait_ptid);
1783
1784 /* We return 1 to indicate that there is a breakpoint here,
1785 so we need to step over it before continuing to avoid
1786 hitting it straight away. */
1787 return 1;
1788 }
1789 }
1790
1791 return 0;
1792 }
1793
1794 /* Basic routine for continuing the program in various fashions.
1795
1796 ADDR is the address to resume at, or -1 for resume where stopped.
1797 SIGGNAL is the signal to give it, or 0 for none,
1798 or -1 for act according to how it stopped.
1799 STEP is nonzero if should trap after one instruction.
1800 -1 means return after that and print nothing.
1801 You should probably set various step_... variables
1802 before calling here, if you are stepping.
1803
1804 You should call clear_proceed_status before calling proceed. */
1805
1806 void
1807 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1808 {
1809 struct regcache *regcache;
1810 struct gdbarch *gdbarch;
1811 struct thread_info *tp;
1812 CORE_ADDR pc;
1813 struct address_space *aspace;
1814 int oneproc = 0;
1815
1816 /* If we're stopped at a fork/vfork, follow the branch set by the
1817 "set follow-fork-mode" command; otherwise, we'll just proceed
1818 resuming the current thread. */
1819 if (!follow_fork ())
1820 {
1821 /* The target for some reason decided not to resume. */
1822 normal_stop ();
1823 return;
1824 }
1825
1826 regcache = get_current_regcache ();
1827 gdbarch = get_regcache_arch (regcache);
1828 aspace = get_regcache_aspace (regcache);
1829 pc = regcache_read_pc (regcache);
1830
1831 if (step > 0)
1832 step_start_function = find_pc_function (pc);
1833 if (step < 0)
1834 stop_after_trap = 1;
1835
1836 if (addr == (CORE_ADDR) -1)
1837 {
1838 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1839 && execution_direction != EXEC_REVERSE)
1840 /* There is a breakpoint at the address we will resume at,
1841 step one instruction before inserting breakpoints so that
1842 we do not stop right away (and report a second hit at this
1843 breakpoint).
1844
1845 Note, we don't do this in reverse, because we won't
1846 actually be executing the breakpoint insn anyway.
1847 We'll be (un-)executing the previous instruction. */
1848
1849 oneproc = 1;
1850 else if (gdbarch_single_step_through_delay_p (gdbarch)
1851 && gdbarch_single_step_through_delay (gdbarch,
1852 get_current_frame ()))
1853 /* We stepped onto an instruction that needs to be stepped
1854 again before re-inserting the breakpoint, do so. */
1855 oneproc = 1;
1856 }
1857 else
1858 {
1859 regcache_write_pc (regcache, addr);
1860 }
1861
1862 if (debug_infrun)
1863 fprintf_unfiltered (gdb_stdlog,
1864 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1865 paddress (gdbarch, addr), siggnal, step);
1866
1867 /* We're handling a live event, so make sure we're doing live
1868 debugging. If we're looking at traceframes while the target is
1869 running, we're going to need to get back to that mode after
1870 handling the event. */
1871 if (non_stop)
1872 {
1873 make_cleanup_restore_current_traceframe ();
1874 set_traceframe_number (-1);
1875 }
1876
1877 if (non_stop)
1878 /* In non-stop, each thread is handled individually. The context
1879 must already be set to the right thread here. */
1880 ;
1881 else
1882 {
1883 /* In a multi-threaded task we may select another thread and
1884 then continue or step.
1885
1886 But if the old thread was stopped at a breakpoint, it will
1887 immediately cause another breakpoint stop without any
1888 execution (i.e. it will report a breakpoint hit incorrectly).
1889 So we must step over it first.
1890
1891 prepare_to_proceed checks the current thread against the
1892 thread that reported the most recent event. If a step-over
1893 is required it returns TRUE and sets the current thread to
1894 the old thread. */
1895 if (prepare_to_proceed (step))
1896 oneproc = 1;
1897 }
1898
1899 /* prepare_to_proceed may change the current thread. */
1900 tp = inferior_thread ();
1901
1902 if (oneproc)
1903 {
1904 tp->trap_expected = 1;
1905 /* If displaced stepping is enabled, we can step over the
1906 breakpoint without hitting it, so leave all breakpoints
1907 inserted. Otherwise we need to disable all breakpoints, step
1908 one instruction, and then re-add them when that step is
1909 finished. */
1910 if (!use_displaced_stepping (gdbarch))
1911 remove_breakpoints ();
1912 }
1913
1914 /* We can insert breakpoints if we're not trying to step over one,
1915 or if we are stepping over one but we're using displaced stepping
1916 to do so. */
1917 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1918 insert_breakpoints ();
1919
1920 if (!non_stop)
1921 {
1922 /* Pass the last stop signal to the thread we're resuming,
1923 irrespective of whether the current thread is the thread that
1924 got the last event or not. This was historically GDB's
1925 behaviour before keeping a stop_signal per thread. */
1926
1927 struct thread_info *last_thread;
1928 ptid_t last_ptid;
1929 struct target_waitstatus last_status;
1930
1931 get_last_target_status (&last_ptid, &last_status);
1932 if (!ptid_equal (inferior_ptid, last_ptid)
1933 && !ptid_equal (last_ptid, null_ptid)
1934 && !ptid_equal (last_ptid, minus_one_ptid))
1935 {
1936 last_thread = find_thread_ptid (last_ptid);
1937 if (last_thread)
1938 {
1939 tp->stop_signal = last_thread->stop_signal;
1940 last_thread->stop_signal = TARGET_SIGNAL_0;
1941 }
1942 }
1943 }
1944
1945 if (siggnal != TARGET_SIGNAL_DEFAULT)
1946 tp->stop_signal = siggnal;
1947 /* If this signal should not be seen by program,
1948 give it zero. Used for debugging signals. */
1949 else if (!signal_program[tp->stop_signal])
1950 tp->stop_signal = TARGET_SIGNAL_0;
1951
1952 annotate_starting ();
1953
1954 /* Make sure that output from GDB appears before output from the
1955 inferior. */
1956 gdb_flush (gdb_stdout);
1957
1958 /* Refresh prev_pc value just prior to resuming. This used to be
1959 done in stop_stepping, however, setting prev_pc there did not handle
1960 scenarios such as inferior function calls or returning from
1961 a function via the return command. In those cases, the prev_pc
1962 value was not set properly for subsequent commands. The prev_pc value
1963 is used to initialize the starting line number in the ecs. With an
1964 invalid value, the gdb next command ends up stopping at the position
1965 represented by the next line table entry past our start position.
1966 On platforms that generate one line table entry per line, this
1967 is not a problem. However, on the ia64, the compiler generates
1968 extraneous line table entries that do not increase the line number.
1969 When we issue the gdb next command on the ia64 after an inferior call
1970 or a return command, we often end up a few instructions forward, still
1971 within the original line we started.
1972
1973 An attempt was made to refresh the prev_pc at the same time the
1974 execution_control_state is initialized (for instance, just before
1975 waiting for an inferior event). But this approach did not work
1976 because of platforms that use ptrace, where the pc register cannot
1977 be read unless the inferior is stopped. At that point, we are not
1978 guaranteed the inferior is stopped and so the regcache_read_pc() call
1979 can fail. Setting the prev_pc value here ensures the value is updated
1980 correctly when the inferior is stopped. */
1981 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1982
1983 /* Fill in with reasonable starting values. */
1984 init_thread_stepping_state (tp);
1985
1986 /* Reset to normal state. */
1987 init_infwait_state ();
1988
1989 /* Resume inferior. */
1990 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1991
1992 /* Wait for it to stop (if not standalone)
1993 and in any case decode why it stopped, and act accordingly. */
1994 /* Do this only if we are not using the event loop, or if the target
1995 does not support asynchronous execution. */
1996 if (!target_can_async_p ())
1997 {
1998 wait_for_inferior (0);
1999 normal_stop ();
2000 }
2001 }
2002 \f
2003
2004 /* Start remote-debugging of a machine over a serial link. */
2005
2006 void
2007 start_remote (int from_tty)
2008 {
2009 struct inferior *inferior;
2010
2011 init_wait_for_inferior ();
2012 inferior = current_inferior ();
2013 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2014
2015 /* Always go on waiting for the target, regardless of the mode. */
2016 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2017 indicate to wait_for_inferior that a target should timeout if
2018 nothing is returned (instead of just blocking). Because of this,
2019 targets expecting an immediate response need to, internally, set
2020 things up so that the target_wait() is forced to eventually
2021 timeout. */
2022 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2023 differentiate to its caller what the state of the target is after
2024 the initial open has been performed. Here we're assuming that
2025 the target has stopped. It should be possible to eventually have
2026 target_open() return to the caller an indication that the target
2027 is currently running and GDB state should be set to the same as
2028 for an async run. */
2029 wait_for_inferior (0);
2030
2031 /* Now that the inferior has stopped, do any bookkeeping like
2032 loading shared libraries. We want to do this before normal_stop,
2033 so that the displayed frame is up to date. */
2034 post_create_inferior (&current_target, from_tty);
2035
2036 normal_stop ();
2037 }
2038
2039 /* Initialize static vars when a new inferior begins. */
2040
2041 void
2042 init_wait_for_inferior (void)
2043 {
2044 /* These are meaningless until the first time through wait_for_inferior. */
2045
2046 breakpoint_init_inferior (inf_starting);
2047
2048 clear_proceed_status ();
2049
2050 stepping_past_singlestep_breakpoint = 0;
2051 deferred_step_ptid = null_ptid;
2052
2053 target_last_wait_ptid = minus_one_ptid;
2054
2055 previous_inferior_ptid = null_ptid;
2056 init_infwait_state ();
2057
2058 /* Discard any skipped inlined frames. */
2059 clear_inline_frame_state (minus_one_ptid);
2060 }
2061
2062 \f
2063 /* This enum encodes possible reasons for doing a target_wait, so that
2064 wfi can call target_wait in one place. (Ultimately the call will be
2065 moved out of the infinite loop entirely.) */
2066
2067 enum infwait_states
2068 {
2069 infwait_normal_state,
2070 infwait_thread_hop_state,
2071 infwait_step_watch_state,
2072 infwait_nonstep_watch_state
2073 };
2074
2075 /* Why did the inferior stop? Used to print the appropriate messages
2076 to the interface from within handle_inferior_event(). */
2077 enum inferior_stop_reason
2078 {
2079 /* Step, next, nexti, stepi finished. */
2080 END_STEPPING_RANGE,
2081 /* Inferior terminated by signal. */
2082 SIGNAL_EXITED,
2083 /* Inferior exited. */
2084 EXITED,
2085 /* Inferior received signal, and user asked to be notified. */
2086 SIGNAL_RECEIVED,
2087 /* Reverse execution -- target ran out of history info. */
2088 NO_HISTORY
2089 };
2090
2091 /* The PTID we'll do a target_wait on.*/
2092 ptid_t waiton_ptid;
2093
2094 /* Current inferior wait state. */
2095 enum infwait_states infwait_state;
2096
2097 /* Data to be passed around while handling an event. This data is
2098 discarded between events. */
2099 struct execution_control_state
2100 {
2101 ptid_t ptid;
2102 /* The thread that got the event, if this was a thread event; NULL
2103 otherwise. */
2104 struct thread_info *event_thread;
2105
2106 struct target_waitstatus ws;
2107 int random_signal;
2108 CORE_ADDR stop_func_start;
2109 CORE_ADDR stop_func_end;
2110 char *stop_func_name;
2111 int new_thread_event;
2112 int wait_some_more;
2113 };
2114
2115 static void handle_inferior_event (struct execution_control_state *ecs);
2116
2117 static void handle_step_into_function (struct gdbarch *gdbarch,
2118 struct execution_control_state *ecs);
2119 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2120 struct execution_control_state *ecs);
2121 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2122 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2123 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2124 struct symtab_and_line sr_sal,
2125 struct frame_id sr_id);
2126 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2127
2128 static void stop_stepping (struct execution_control_state *ecs);
2129 static void prepare_to_wait (struct execution_control_state *ecs);
2130 static void keep_going (struct execution_control_state *ecs);
2131 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2132 int stop_info);
2133
2134 /* Callback for iterate over threads. If the thread is stopped, but
2135 the user/frontend doesn't know about that yet, go through
2136 normal_stop, as if the thread had just stopped now. ARG points at
2137 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2138 ptid_is_pid(PTID) is true, applies to all threads of the process
2139 pointed at by PTID. Otherwise, apply only to the thread pointed by
2140 PTID. */
2141
2142 static int
2143 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2144 {
2145 ptid_t ptid = * (ptid_t *) arg;
2146
2147 if ((ptid_equal (info->ptid, ptid)
2148 || ptid_equal (minus_one_ptid, ptid)
2149 || (ptid_is_pid (ptid)
2150 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2151 && is_running (info->ptid)
2152 && !is_executing (info->ptid))
2153 {
2154 struct cleanup *old_chain;
2155 struct execution_control_state ecss;
2156 struct execution_control_state *ecs = &ecss;
2157
2158 memset (ecs, 0, sizeof (*ecs));
2159
2160 old_chain = make_cleanup_restore_current_thread ();
2161
2162 switch_to_thread (info->ptid);
2163
2164 /* Go through handle_inferior_event/normal_stop, so we always
2165 have consistent output as if the stop event had been
2166 reported. */
2167 ecs->ptid = info->ptid;
2168 ecs->event_thread = find_thread_ptid (info->ptid);
2169 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2170 ecs->ws.value.sig = TARGET_SIGNAL_0;
2171
2172 handle_inferior_event (ecs);
2173
2174 if (!ecs->wait_some_more)
2175 {
2176 struct thread_info *tp;
2177
2178 normal_stop ();
2179
2180 /* Finish off the continuations. The continations
2181 themselves are responsible for realising the thread
2182 didn't finish what it was supposed to do. */
2183 tp = inferior_thread ();
2184 do_all_intermediate_continuations_thread (tp);
2185 do_all_continuations_thread (tp);
2186 }
2187
2188 do_cleanups (old_chain);
2189 }
2190
2191 return 0;
2192 }
2193
2194 /* This function is attached as a "thread_stop_requested" observer.
2195 Cleanup local state that assumed the PTID was to be resumed, and
2196 report the stop to the frontend. */
2197
2198 static void
2199 infrun_thread_stop_requested (ptid_t ptid)
2200 {
2201 struct displaced_step_inferior_state *displaced;
2202
2203 /* PTID was requested to stop. Remove it from the displaced
2204 stepping queue, so we don't try to resume it automatically. */
2205
2206 for (displaced = displaced_step_inferior_states;
2207 displaced;
2208 displaced = displaced->next)
2209 {
2210 struct displaced_step_request *it, **prev_next_p;
2211
2212 it = displaced->step_request_queue;
2213 prev_next_p = &displaced->step_request_queue;
2214 while (it)
2215 {
2216 if (ptid_match (it->ptid, ptid))
2217 {
2218 *prev_next_p = it->next;
2219 it->next = NULL;
2220 xfree (it);
2221 }
2222 else
2223 {
2224 prev_next_p = &it->next;
2225 }
2226
2227 it = *prev_next_p;
2228 }
2229 }
2230
2231 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2232 }
2233
2234 static void
2235 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2236 {
2237 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2238 nullify_last_target_wait_ptid ();
2239 }
2240
2241 /* Callback for iterate_over_threads. */
2242
2243 static int
2244 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2245 {
2246 if (is_exited (info->ptid))
2247 return 0;
2248
2249 delete_step_resume_breakpoint (info);
2250 return 0;
2251 }
2252
2253 /* In all-stop, delete the step resume breakpoint of any thread that
2254 had one. In non-stop, delete the step resume breakpoint of the
2255 thread that just stopped. */
2256
2257 static void
2258 delete_step_thread_step_resume_breakpoint (void)
2259 {
2260 if (!target_has_execution
2261 || ptid_equal (inferior_ptid, null_ptid))
2262 /* If the inferior has exited, we have already deleted the step
2263 resume breakpoints out of GDB's lists. */
2264 return;
2265
2266 if (non_stop)
2267 {
2268 /* If in non-stop mode, only delete the step-resume or
2269 longjmp-resume breakpoint of the thread that just stopped
2270 stepping. */
2271 struct thread_info *tp = inferior_thread ();
2272
2273 delete_step_resume_breakpoint (tp);
2274 }
2275 else
2276 /* In all-stop mode, delete all step-resume and longjmp-resume
2277 breakpoints of any thread that had them. */
2278 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2279 }
2280
2281 /* A cleanup wrapper. */
2282
2283 static void
2284 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2285 {
2286 delete_step_thread_step_resume_breakpoint ();
2287 }
2288
2289 /* Pretty print the results of target_wait, for debugging purposes. */
2290
2291 static void
2292 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2293 const struct target_waitstatus *ws)
2294 {
2295 char *status_string = target_waitstatus_to_string (ws);
2296 struct ui_file *tmp_stream = mem_fileopen ();
2297 char *text;
2298
2299 /* The text is split over several lines because it was getting too long.
2300 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2301 output as a unit; we want only one timestamp printed if debug_timestamp
2302 is set. */
2303
2304 fprintf_unfiltered (tmp_stream,
2305 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2306 if (PIDGET (waiton_ptid) != -1)
2307 fprintf_unfiltered (tmp_stream,
2308 " [%s]", target_pid_to_str (waiton_ptid));
2309 fprintf_unfiltered (tmp_stream, ", status) =\n");
2310 fprintf_unfiltered (tmp_stream,
2311 "infrun: %d [%s],\n",
2312 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2313 fprintf_unfiltered (tmp_stream,
2314 "infrun: %s\n",
2315 status_string);
2316
2317 text = ui_file_xstrdup (tmp_stream, NULL);
2318
2319 /* This uses %s in part to handle %'s in the text, but also to avoid
2320 a gcc error: the format attribute requires a string literal. */
2321 fprintf_unfiltered (gdb_stdlog, "%s", text);
2322
2323 xfree (status_string);
2324 xfree (text);
2325 ui_file_delete (tmp_stream);
2326 }
2327
2328 /* Prepare and stabilize the inferior for detaching it. E.g.,
2329 detaching while a thread is displaced stepping is a recipe for
2330 crashing it, as nothing would readjust the PC out of the scratch
2331 pad. */
2332
2333 void
2334 prepare_for_detach (void)
2335 {
2336 struct inferior *inf = current_inferior ();
2337 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2338 struct cleanup *old_chain_1;
2339 struct displaced_step_inferior_state *displaced;
2340
2341 displaced = get_displaced_stepping_state (inf->pid);
2342
2343 /* Is any thread of this process displaced stepping? If not,
2344 there's nothing else to do. */
2345 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2346 return;
2347
2348 if (debug_infrun)
2349 fprintf_unfiltered (gdb_stdlog,
2350 "displaced-stepping in-process while detaching");
2351
2352 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2353 inf->detaching = 1;
2354
2355 while (!ptid_equal (displaced->step_ptid, null_ptid))
2356 {
2357 struct cleanup *old_chain_2;
2358 struct execution_control_state ecss;
2359 struct execution_control_state *ecs;
2360
2361 ecs = &ecss;
2362 memset (ecs, 0, sizeof (*ecs));
2363
2364 overlay_cache_invalid = 1;
2365
2366 /* We have to invalidate the registers BEFORE calling
2367 target_wait because they can be loaded from the target while
2368 in target_wait. This makes remote debugging a bit more
2369 efficient for those targets that provide critical registers
2370 as part of their normal status mechanism. */
2371
2372 registers_changed ();
2373
2374 if (deprecated_target_wait_hook)
2375 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2376 else
2377 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2378
2379 if (debug_infrun)
2380 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2381
2382 /* If an error happens while handling the event, propagate GDB's
2383 knowledge of the executing state to the frontend/user running
2384 state. */
2385 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2386
2387 /* In non-stop mode, each thread is handled individually.
2388 Switch early, so the global state is set correctly for this
2389 thread. */
2390 if (non_stop
2391 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2392 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2393 context_switch (ecs->ptid);
2394
2395 /* Now figure out what to do with the result of the result. */
2396 handle_inferior_event (ecs);
2397
2398 /* No error, don't finish the state yet. */
2399 discard_cleanups (old_chain_2);
2400
2401 /* Breakpoints and watchpoints are not installed on the target
2402 at this point, and signals are passed directly to the
2403 inferior, so this must mean the process is gone. */
2404 if (!ecs->wait_some_more)
2405 {
2406 discard_cleanups (old_chain_1);
2407 error (_("Program exited while detaching"));
2408 }
2409 }
2410
2411 discard_cleanups (old_chain_1);
2412 }
2413
2414 /* Wait for control to return from inferior to debugger.
2415
2416 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2417 as if they were SIGTRAP signals. This can be useful during
2418 the startup sequence on some targets such as HP/UX, where
2419 we receive an EXEC event instead of the expected SIGTRAP.
2420
2421 If inferior gets a signal, we may decide to start it up again
2422 instead of returning. That is why there is a loop in this function.
2423 When this function actually returns it means the inferior
2424 should be left stopped and GDB should read more commands. */
2425
2426 void
2427 wait_for_inferior (int treat_exec_as_sigtrap)
2428 {
2429 struct cleanup *old_cleanups;
2430 struct execution_control_state ecss;
2431 struct execution_control_state *ecs;
2432
2433 if (debug_infrun)
2434 fprintf_unfiltered
2435 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2436 treat_exec_as_sigtrap);
2437
2438 old_cleanups =
2439 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2440
2441 ecs = &ecss;
2442 memset (ecs, 0, sizeof (*ecs));
2443
2444 /* We'll update this if & when we switch to a new thread. */
2445 previous_inferior_ptid = inferior_ptid;
2446
2447 while (1)
2448 {
2449 struct cleanup *old_chain;
2450
2451 /* We have to invalidate the registers BEFORE calling target_wait
2452 because they can be loaded from the target while in target_wait.
2453 This makes remote debugging a bit more efficient for those
2454 targets that provide critical registers as part of their normal
2455 status mechanism. */
2456
2457 overlay_cache_invalid = 1;
2458 registers_changed ();
2459
2460 if (deprecated_target_wait_hook)
2461 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2462 else
2463 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2464
2465 if (debug_infrun)
2466 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2467
2468 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2469 {
2470 xfree (ecs->ws.value.execd_pathname);
2471 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2472 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2473 }
2474
2475 /* If an error happens while handling the event, propagate GDB's
2476 knowledge of the executing state to the frontend/user running
2477 state. */
2478 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2479
2480 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2481 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2482 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2483
2484 /* Now figure out what to do with the result of the result. */
2485 handle_inferior_event (ecs);
2486
2487 /* No error, don't finish the state yet. */
2488 discard_cleanups (old_chain);
2489
2490 if (!ecs->wait_some_more)
2491 break;
2492 }
2493
2494 do_cleanups (old_cleanups);
2495 }
2496
2497 /* Asynchronous version of wait_for_inferior. It is called by the
2498 event loop whenever a change of state is detected on the file
2499 descriptor corresponding to the target. It can be called more than
2500 once to complete a single execution command. In such cases we need
2501 to keep the state in a global variable ECSS. If it is the last time
2502 that this function is called for a single execution command, then
2503 report to the user that the inferior has stopped, and do the
2504 necessary cleanups. */
2505
2506 void
2507 fetch_inferior_event (void *client_data)
2508 {
2509 struct execution_control_state ecss;
2510 struct execution_control_state *ecs = &ecss;
2511 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2512 struct cleanup *ts_old_chain;
2513 int was_sync = sync_execution;
2514
2515 memset (ecs, 0, sizeof (*ecs));
2516
2517 /* We'll update this if & when we switch to a new thread. */
2518 previous_inferior_ptid = inferior_ptid;
2519
2520 if (non_stop)
2521 /* In non-stop mode, the user/frontend should not notice a thread
2522 switch due to internal events. Make sure we reverse to the
2523 user selected thread and frame after handling the event and
2524 running any breakpoint commands. */
2525 make_cleanup_restore_current_thread ();
2526
2527 /* We have to invalidate the registers BEFORE calling target_wait
2528 because they can be loaded from the target while in target_wait.
2529 This makes remote debugging a bit more efficient for those
2530 targets that provide critical registers as part of their normal
2531 status mechanism. */
2532
2533 overlay_cache_invalid = 1;
2534 registers_changed ();
2535
2536 if (deprecated_target_wait_hook)
2537 ecs->ptid =
2538 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2539 else
2540 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2541
2542 if (debug_infrun)
2543 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2544
2545 if (non_stop
2546 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2547 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2548 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2549 /* In non-stop mode, each thread is handled individually. Switch
2550 early, so the global state is set correctly for this
2551 thread. */
2552 context_switch (ecs->ptid);
2553
2554 /* If an error happens while handling the event, propagate GDB's
2555 knowledge of the executing state to the frontend/user running
2556 state. */
2557 if (!non_stop)
2558 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2559 else
2560 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2561
2562 /* Now figure out what to do with the result of the result. */
2563 handle_inferior_event (ecs);
2564
2565 if (!ecs->wait_some_more)
2566 {
2567 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2568
2569 delete_step_thread_step_resume_breakpoint ();
2570
2571 /* We may not find an inferior if this was a process exit. */
2572 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2573 normal_stop ();
2574
2575 if (target_has_execution
2576 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2577 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2578 && ecs->event_thread->step_multi
2579 && ecs->event_thread->stop_step)
2580 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2581 else
2582 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2583 }
2584
2585 /* No error, don't finish the thread states yet. */
2586 discard_cleanups (ts_old_chain);
2587
2588 /* Revert thread and frame. */
2589 do_cleanups (old_chain);
2590
2591 /* If the inferior was in sync execution mode, and now isn't,
2592 restore the prompt. */
2593 if (was_sync && !sync_execution)
2594 display_gdb_prompt (0);
2595 }
2596
2597 /* Record the frame and location we're currently stepping through. */
2598 void
2599 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2600 {
2601 struct thread_info *tp = inferior_thread ();
2602
2603 tp->step_frame_id = get_frame_id (frame);
2604 tp->step_stack_frame_id = get_stack_frame_id (frame);
2605
2606 tp->current_symtab = sal.symtab;
2607 tp->current_line = sal.line;
2608 }
2609
2610 /* Clear context switchable stepping state. */
2611
2612 void
2613 init_thread_stepping_state (struct thread_info *tss)
2614 {
2615 tss->stepping_over_breakpoint = 0;
2616 tss->step_after_step_resume_breakpoint = 0;
2617 tss->stepping_through_solib_after_catch = 0;
2618 tss->stepping_through_solib_catchpoints = NULL;
2619 }
2620
2621 /* Return the cached copy of the last pid/waitstatus returned by
2622 target_wait()/deprecated_target_wait_hook(). The data is actually
2623 cached by handle_inferior_event(), which gets called immediately
2624 after target_wait()/deprecated_target_wait_hook(). */
2625
2626 void
2627 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2628 {
2629 *ptidp = target_last_wait_ptid;
2630 *status = target_last_waitstatus;
2631 }
2632
2633 void
2634 nullify_last_target_wait_ptid (void)
2635 {
2636 target_last_wait_ptid = minus_one_ptid;
2637 }
2638
2639 /* Switch thread contexts. */
2640
2641 static void
2642 context_switch (ptid_t ptid)
2643 {
2644 if (debug_infrun)
2645 {
2646 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2647 target_pid_to_str (inferior_ptid));
2648 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2649 target_pid_to_str (ptid));
2650 }
2651
2652 switch_to_thread (ptid);
2653 }
2654
2655 static void
2656 adjust_pc_after_break (struct execution_control_state *ecs)
2657 {
2658 struct regcache *regcache;
2659 struct gdbarch *gdbarch;
2660 struct address_space *aspace;
2661 CORE_ADDR breakpoint_pc;
2662
2663 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2664 we aren't, just return.
2665
2666 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2667 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2668 implemented by software breakpoints should be handled through the normal
2669 breakpoint layer.
2670
2671 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2672 different signals (SIGILL or SIGEMT for instance), but it is less
2673 clear where the PC is pointing afterwards. It may not match
2674 gdbarch_decr_pc_after_break. I don't know any specific target that
2675 generates these signals at breakpoints (the code has been in GDB since at
2676 least 1992) so I can not guess how to handle them here.
2677
2678 In earlier versions of GDB, a target with
2679 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2680 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2681 target with both of these set in GDB history, and it seems unlikely to be
2682 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2683
2684 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2685 return;
2686
2687 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2688 return;
2689
2690 /* In reverse execution, when a breakpoint is hit, the instruction
2691 under it has already been de-executed. The reported PC always
2692 points at the breakpoint address, so adjusting it further would
2693 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2694 architecture:
2695
2696 B1 0x08000000 : INSN1
2697 B2 0x08000001 : INSN2
2698 0x08000002 : INSN3
2699 PC -> 0x08000003 : INSN4
2700
2701 Say you're stopped at 0x08000003 as above. Reverse continuing
2702 from that point should hit B2 as below. Reading the PC when the
2703 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2704 been de-executed already.
2705
2706 B1 0x08000000 : INSN1
2707 B2 PC -> 0x08000001 : INSN2
2708 0x08000002 : INSN3
2709 0x08000003 : INSN4
2710
2711 We can't apply the same logic as for forward execution, because
2712 we would wrongly adjust the PC to 0x08000000, since there's a
2713 breakpoint at PC - 1. We'd then report a hit on B1, although
2714 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2715 behaviour. */
2716 if (execution_direction == EXEC_REVERSE)
2717 return;
2718
2719 /* If this target does not decrement the PC after breakpoints, then
2720 we have nothing to do. */
2721 regcache = get_thread_regcache (ecs->ptid);
2722 gdbarch = get_regcache_arch (regcache);
2723 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2724 return;
2725
2726 aspace = get_regcache_aspace (regcache);
2727
2728 /* Find the location where (if we've hit a breakpoint) the
2729 breakpoint would be. */
2730 breakpoint_pc = regcache_read_pc (regcache)
2731 - gdbarch_decr_pc_after_break (gdbarch);
2732
2733 /* Check whether there actually is a software breakpoint inserted at
2734 that location.
2735
2736 If in non-stop mode, a race condition is possible where we've
2737 removed a breakpoint, but stop events for that breakpoint were
2738 already queued and arrive later. To suppress those spurious
2739 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2740 and retire them after a number of stop events are reported. */
2741 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2742 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2743 {
2744 struct cleanup *old_cleanups = NULL;
2745
2746 if (RECORD_IS_USED)
2747 old_cleanups = record_gdb_operation_disable_set ();
2748
2749 /* When using hardware single-step, a SIGTRAP is reported for both
2750 a completed single-step and a software breakpoint. Need to
2751 differentiate between the two, as the latter needs adjusting
2752 but the former does not.
2753
2754 The SIGTRAP can be due to a completed hardware single-step only if
2755 - we didn't insert software single-step breakpoints
2756 - the thread to be examined is still the current thread
2757 - this thread is currently being stepped
2758
2759 If any of these events did not occur, we must have stopped due
2760 to hitting a software breakpoint, and have to back up to the
2761 breakpoint address.
2762
2763 As a special case, we could have hardware single-stepped a
2764 software breakpoint. In this case (prev_pc == breakpoint_pc),
2765 we also need to back up to the breakpoint address. */
2766
2767 if (singlestep_breakpoints_inserted_p
2768 || !ptid_equal (ecs->ptid, inferior_ptid)
2769 || !currently_stepping (ecs->event_thread)
2770 || ecs->event_thread->prev_pc == breakpoint_pc)
2771 regcache_write_pc (regcache, breakpoint_pc);
2772
2773 if (RECORD_IS_USED)
2774 do_cleanups (old_cleanups);
2775 }
2776 }
2777
2778 void
2779 init_infwait_state (void)
2780 {
2781 waiton_ptid = pid_to_ptid (-1);
2782 infwait_state = infwait_normal_state;
2783 }
2784
2785 void
2786 error_is_running (void)
2787 {
2788 error (_("\
2789 Cannot execute this command while the selected thread is running."));
2790 }
2791
2792 void
2793 ensure_not_running (void)
2794 {
2795 if (is_running (inferior_ptid))
2796 error_is_running ();
2797 }
2798
2799 static int
2800 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2801 {
2802 for (frame = get_prev_frame (frame);
2803 frame != NULL;
2804 frame = get_prev_frame (frame))
2805 {
2806 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2807 return 1;
2808 if (get_frame_type (frame) != INLINE_FRAME)
2809 break;
2810 }
2811
2812 return 0;
2813 }
2814
2815 /* Auxiliary function that handles syscall entry/return events.
2816 It returns 1 if the inferior should keep going (and GDB
2817 should ignore the event), or 0 if the event deserves to be
2818 processed. */
2819
2820 static int
2821 handle_syscall_event (struct execution_control_state *ecs)
2822 {
2823 struct regcache *regcache;
2824 struct gdbarch *gdbarch;
2825 int syscall_number;
2826
2827 if (!ptid_equal (ecs->ptid, inferior_ptid))
2828 context_switch (ecs->ptid);
2829
2830 regcache = get_thread_regcache (ecs->ptid);
2831 gdbarch = get_regcache_arch (regcache);
2832 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2833 stop_pc = regcache_read_pc (regcache);
2834
2835 target_last_waitstatus.value.syscall_number = syscall_number;
2836
2837 if (catch_syscall_enabled () > 0
2838 && catching_syscall_number (syscall_number) > 0)
2839 {
2840 if (debug_infrun)
2841 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2842 syscall_number);
2843
2844 ecs->event_thread->stop_bpstat
2845 = bpstat_stop_status (get_regcache_aspace (regcache),
2846 stop_pc, ecs->ptid);
2847 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2848
2849 if (!ecs->random_signal)
2850 {
2851 /* Catchpoint hit. */
2852 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2853 return 0;
2854 }
2855 }
2856
2857 /* If no catchpoint triggered for this, then keep going. */
2858 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2859 keep_going (ecs);
2860 return 1;
2861 }
2862
2863 /* Given an execution control state that has been freshly filled in
2864 by an event from the inferior, figure out what it means and take
2865 appropriate action. */
2866
2867 static void
2868 handle_inferior_event (struct execution_control_state *ecs)
2869 {
2870 struct frame_info *frame;
2871 struct gdbarch *gdbarch;
2872 int sw_single_step_trap_p = 0;
2873 int stopped_by_watchpoint;
2874 int stepped_after_stopped_by_watchpoint = 0;
2875 struct symtab_and_line stop_pc_sal;
2876 enum stop_kind stop_soon;
2877
2878 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2879 {
2880 /* We had an event in the inferior, but we are not interested in
2881 handling it at this level. The lower layers have already
2882 done what needs to be done, if anything.
2883
2884 One of the possible circumstances for this is when the
2885 inferior produces output for the console. The inferior has
2886 not stopped, and we are ignoring the event. Another possible
2887 circumstance is any event which the lower level knows will be
2888 reported multiple times without an intervening resume. */
2889 if (debug_infrun)
2890 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2891 prepare_to_wait (ecs);
2892 return;
2893 }
2894
2895 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2896 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2897 {
2898 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2899
2900 gdb_assert (inf);
2901 stop_soon = inf->stop_soon;
2902 }
2903 else
2904 stop_soon = NO_STOP_QUIETLY;
2905
2906 /* Cache the last pid/waitstatus. */
2907 target_last_wait_ptid = ecs->ptid;
2908 target_last_waitstatus = ecs->ws;
2909
2910 /* Always clear state belonging to the previous time we stopped. */
2911 stop_stack_dummy = STOP_NONE;
2912
2913 /* If it's a new process, add it to the thread database */
2914
2915 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2916 && !ptid_equal (ecs->ptid, minus_one_ptid)
2917 && !in_thread_list (ecs->ptid));
2918
2919 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2920 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2921 add_thread (ecs->ptid);
2922
2923 ecs->event_thread = find_thread_ptid (ecs->ptid);
2924
2925 /* Dependent on valid ECS->EVENT_THREAD. */
2926 adjust_pc_after_break (ecs);
2927
2928 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2929 reinit_frame_cache ();
2930
2931 breakpoint_retire_moribund ();
2932
2933 /* First, distinguish signals caused by the debugger from signals
2934 that have to do with the program's own actions. Note that
2935 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2936 on the operating system version. Here we detect when a SIGILL or
2937 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2938 something similar for SIGSEGV, since a SIGSEGV will be generated
2939 when we're trying to execute a breakpoint instruction on a
2940 non-executable stack. This happens for call dummy breakpoints
2941 for architectures like SPARC that place call dummies on the
2942 stack. */
2943 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2944 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2945 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2946 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2947 {
2948 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2949
2950 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2951 regcache_read_pc (regcache)))
2952 {
2953 if (debug_infrun)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "infrun: Treating signal as SIGTRAP\n");
2956 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2957 }
2958 }
2959
2960 /* Mark the non-executing threads accordingly. In all-stop, all
2961 threads of all processes are stopped when we get any event
2962 reported. In non-stop mode, only the event thread stops. If
2963 we're handling a process exit in non-stop mode, there's nothing
2964 to do, as threads of the dead process are gone, and threads of
2965 any other process were left running. */
2966 if (!non_stop)
2967 set_executing (minus_one_ptid, 0);
2968 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2969 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2970 set_executing (inferior_ptid, 0);
2971
2972 switch (infwait_state)
2973 {
2974 case infwait_thread_hop_state:
2975 if (debug_infrun)
2976 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2977 break;
2978
2979 case infwait_normal_state:
2980 if (debug_infrun)
2981 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2982 break;
2983
2984 case infwait_step_watch_state:
2985 if (debug_infrun)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "infrun: infwait_step_watch_state\n");
2988
2989 stepped_after_stopped_by_watchpoint = 1;
2990 break;
2991
2992 case infwait_nonstep_watch_state:
2993 if (debug_infrun)
2994 fprintf_unfiltered (gdb_stdlog,
2995 "infrun: infwait_nonstep_watch_state\n");
2996 insert_breakpoints ();
2997
2998 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2999 handle things like signals arriving and other things happening
3000 in combination correctly? */
3001 stepped_after_stopped_by_watchpoint = 1;
3002 break;
3003
3004 default:
3005 internal_error (__FILE__, __LINE__, _("bad switch"));
3006 }
3007
3008 infwait_state = infwait_normal_state;
3009 waiton_ptid = pid_to_ptid (-1);
3010
3011 switch (ecs->ws.kind)
3012 {
3013 case TARGET_WAITKIND_LOADED:
3014 if (debug_infrun)
3015 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3016 /* Ignore gracefully during startup of the inferior, as it might
3017 be the shell which has just loaded some objects, otherwise
3018 add the symbols for the newly loaded objects. Also ignore at
3019 the beginning of an attach or remote session; we will query
3020 the full list of libraries once the connection is
3021 established. */
3022 if (stop_soon == NO_STOP_QUIETLY)
3023 {
3024 /* Check for any newly added shared libraries if we're
3025 supposed to be adding them automatically. Switch
3026 terminal for any messages produced by
3027 breakpoint_re_set. */
3028 target_terminal_ours_for_output ();
3029 /* NOTE: cagney/2003-11-25: Make certain that the target
3030 stack's section table is kept up-to-date. Architectures,
3031 (e.g., PPC64), use the section table to perform
3032 operations such as address => section name and hence
3033 require the table to contain all sections (including
3034 those found in shared libraries). */
3035 #ifdef SOLIB_ADD
3036 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3037 #else
3038 solib_add (NULL, 0, &current_target, auto_solib_add);
3039 #endif
3040 target_terminal_inferior ();
3041
3042 /* If requested, stop when the dynamic linker notifies
3043 gdb of events. This allows the user to get control
3044 and place breakpoints in initializer routines for
3045 dynamically loaded objects (among other things). */
3046 if (stop_on_solib_events)
3047 {
3048 /* Make sure we print "Stopped due to solib-event" in
3049 normal_stop. */
3050 stop_print_frame = 1;
3051
3052 stop_stepping (ecs);
3053 return;
3054 }
3055
3056 /* NOTE drow/2007-05-11: This might be a good place to check
3057 for "catch load". */
3058 }
3059
3060 /* If we are skipping through a shell, or through shared library
3061 loading that we aren't interested in, resume the program. If
3062 we're running the program normally, also resume. But stop if
3063 we're attaching or setting up a remote connection. */
3064 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3065 {
3066 /* Loading of shared libraries might have changed breakpoint
3067 addresses. Make sure new breakpoints are inserted. */
3068 if (stop_soon == NO_STOP_QUIETLY
3069 && !breakpoints_always_inserted_mode ())
3070 insert_breakpoints ();
3071 resume (0, TARGET_SIGNAL_0);
3072 prepare_to_wait (ecs);
3073 return;
3074 }
3075
3076 break;
3077
3078 case TARGET_WAITKIND_SPURIOUS:
3079 if (debug_infrun)
3080 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3081 resume (0, TARGET_SIGNAL_0);
3082 prepare_to_wait (ecs);
3083 return;
3084
3085 case TARGET_WAITKIND_EXITED:
3086 if (debug_infrun)
3087 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3088 inferior_ptid = ecs->ptid;
3089 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3090 set_current_program_space (current_inferior ()->pspace);
3091 handle_vfork_child_exec_or_exit (0);
3092 target_terminal_ours (); /* Must do this before mourn anyway */
3093 print_stop_reason (EXITED, ecs->ws.value.integer);
3094
3095 /* Record the exit code in the convenience variable $_exitcode, so
3096 that the user can inspect this again later. */
3097 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3098 (LONGEST) ecs->ws.value.integer);
3099 gdb_flush (gdb_stdout);
3100 target_mourn_inferior ();
3101 singlestep_breakpoints_inserted_p = 0;
3102 stop_print_frame = 0;
3103 stop_stepping (ecs);
3104 return;
3105
3106 case TARGET_WAITKIND_SIGNALLED:
3107 if (debug_infrun)
3108 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3109 inferior_ptid = ecs->ptid;
3110 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3111 set_current_program_space (current_inferior ()->pspace);
3112 handle_vfork_child_exec_or_exit (0);
3113 stop_print_frame = 0;
3114 target_terminal_ours (); /* Must do this before mourn anyway */
3115
3116 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3117 reach here unless the inferior is dead. However, for years
3118 target_kill() was called here, which hints that fatal signals aren't
3119 really fatal on some systems. If that's true, then some changes
3120 may be needed. */
3121 target_mourn_inferior ();
3122
3123 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3124 singlestep_breakpoints_inserted_p = 0;
3125 stop_stepping (ecs);
3126 return;
3127
3128 /* The following are the only cases in which we keep going;
3129 the above cases end in a continue or goto. */
3130 case TARGET_WAITKIND_FORKED:
3131 case TARGET_WAITKIND_VFORKED:
3132 if (debug_infrun)
3133 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3134
3135 if (!ptid_equal (ecs->ptid, inferior_ptid))
3136 {
3137 context_switch (ecs->ptid);
3138 reinit_frame_cache ();
3139 }
3140
3141 /* Immediately detach breakpoints from the child before there's
3142 any chance of letting the user delete breakpoints from the
3143 breakpoint lists. If we don't do this early, it's easy to
3144 leave left over traps in the child, vis: "break foo; catch
3145 fork; c; <fork>; del; c; <child calls foo>". We only follow
3146 the fork on the last `continue', and by that time the
3147 breakpoint at "foo" is long gone from the breakpoint table.
3148 If we vforked, then we don't need to unpatch here, since both
3149 parent and child are sharing the same memory pages; we'll
3150 need to unpatch at follow/detach time instead to be certain
3151 that new breakpoints added between catchpoint hit time and
3152 vfork follow are detached. */
3153 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3154 {
3155 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3156
3157 /* This won't actually modify the breakpoint list, but will
3158 physically remove the breakpoints from the child. */
3159 detach_breakpoints (child_pid);
3160 }
3161
3162 /* In case the event is caught by a catchpoint, remember that
3163 the event is to be followed at the next resume of the thread,
3164 and not immediately. */
3165 ecs->event_thread->pending_follow = ecs->ws;
3166
3167 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3168
3169 ecs->event_thread->stop_bpstat
3170 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3171 stop_pc, ecs->ptid);
3172
3173 /* Note that we're interested in knowing the bpstat actually
3174 causes a stop, not just if it may explain the signal.
3175 Software watchpoints, for example, always appear in the
3176 bpstat. */
3177 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3178
3179 /* If no catchpoint triggered for this, then keep going. */
3180 if (ecs->random_signal)
3181 {
3182 ptid_t parent;
3183 ptid_t child;
3184 int should_resume;
3185 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3186
3187 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3188
3189 should_resume = follow_fork ();
3190
3191 parent = ecs->ptid;
3192 child = ecs->ws.value.related_pid;
3193
3194 /* In non-stop mode, also resume the other branch. */
3195 if (non_stop && !detach_fork)
3196 {
3197 if (follow_child)
3198 switch_to_thread (parent);
3199 else
3200 switch_to_thread (child);
3201
3202 ecs->event_thread = inferior_thread ();
3203 ecs->ptid = inferior_ptid;
3204 keep_going (ecs);
3205 }
3206
3207 if (follow_child)
3208 switch_to_thread (child);
3209 else
3210 switch_to_thread (parent);
3211
3212 ecs->event_thread = inferior_thread ();
3213 ecs->ptid = inferior_ptid;
3214
3215 if (should_resume)
3216 keep_going (ecs);
3217 else
3218 stop_stepping (ecs);
3219 return;
3220 }
3221 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3222 goto process_event_stop_test;
3223
3224 case TARGET_WAITKIND_VFORK_DONE:
3225 /* Done with the shared memory region. Re-insert breakpoints in
3226 the parent, and keep going. */
3227
3228 if (debug_infrun)
3229 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3230
3231 if (!ptid_equal (ecs->ptid, inferior_ptid))
3232 context_switch (ecs->ptid);
3233
3234 current_inferior ()->waiting_for_vfork_done = 0;
3235 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3236 /* This also takes care of reinserting breakpoints in the
3237 previously locked inferior. */
3238 keep_going (ecs);
3239 return;
3240
3241 case TARGET_WAITKIND_EXECD:
3242 if (debug_infrun)
3243 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3244
3245 if (!ptid_equal (ecs->ptid, inferior_ptid))
3246 {
3247 context_switch (ecs->ptid);
3248 reinit_frame_cache ();
3249 }
3250
3251 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3252
3253 /* Do whatever is necessary to the parent branch of the vfork. */
3254 handle_vfork_child_exec_or_exit (1);
3255
3256 /* This causes the eventpoints and symbol table to be reset.
3257 Must do this now, before trying to determine whether to
3258 stop. */
3259 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3260
3261 ecs->event_thread->stop_bpstat
3262 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3263 stop_pc, ecs->ptid);
3264 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3265
3266 /* Note that this may be referenced from inside
3267 bpstat_stop_status above, through inferior_has_execd. */
3268 xfree (ecs->ws.value.execd_pathname);
3269 ecs->ws.value.execd_pathname = NULL;
3270
3271 /* If no catchpoint triggered for this, then keep going. */
3272 if (ecs->random_signal)
3273 {
3274 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3275 keep_going (ecs);
3276 return;
3277 }
3278 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3279 goto process_event_stop_test;
3280
3281 /* Be careful not to try to gather much state about a thread
3282 that's in a syscall. It's frequently a losing proposition. */
3283 case TARGET_WAITKIND_SYSCALL_ENTRY:
3284 if (debug_infrun)
3285 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3286 /* Getting the current syscall number */
3287 if (handle_syscall_event (ecs) != 0)
3288 return;
3289 goto process_event_stop_test;
3290
3291 /* Before examining the threads further, step this thread to
3292 get it entirely out of the syscall. (We get notice of the
3293 event when the thread is just on the verge of exiting a
3294 syscall. Stepping one instruction seems to get it back
3295 into user code.) */
3296 case TARGET_WAITKIND_SYSCALL_RETURN:
3297 if (debug_infrun)
3298 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3299 if (handle_syscall_event (ecs) != 0)
3300 return;
3301 goto process_event_stop_test;
3302
3303 case TARGET_WAITKIND_STOPPED:
3304 if (debug_infrun)
3305 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3306 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3307 break;
3308
3309 case TARGET_WAITKIND_NO_HISTORY:
3310 /* Reverse execution: target ran out of history info. */
3311 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3312 print_stop_reason (NO_HISTORY, 0);
3313 stop_stepping (ecs);
3314 return;
3315 }
3316
3317 if (ecs->new_thread_event)
3318 {
3319 if (non_stop)
3320 /* Non-stop assumes that the target handles adding new threads
3321 to the thread list. */
3322 internal_error (__FILE__, __LINE__, "\
3323 targets should add new threads to the thread list themselves in non-stop mode.");
3324
3325 /* We may want to consider not doing a resume here in order to
3326 give the user a chance to play with the new thread. It might
3327 be good to make that a user-settable option. */
3328
3329 /* At this point, all threads are stopped (happens automatically
3330 in either the OS or the native code). Therefore we need to
3331 continue all threads in order to make progress. */
3332
3333 if (!ptid_equal (ecs->ptid, inferior_ptid))
3334 context_switch (ecs->ptid);
3335 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3336 prepare_to_wait (ecs);
3337 return;
3338 }
3339
3340 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3341 {
3342 /* Do we need to clean up the state of a thread that has
3343 completed a displaced single-step? (Doing so usually affects
3344 the PC, so do it here, before we set stop_pc.) */
3345 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3346
3347 /* If we either finished a single-step or hit a breakpoint, but
3348 the user wanted this thread to be stopped, pretend we got a
3349 SIG0 (generic unsignaled stop). */
3350
3351 if (ecs->event_thread->stop_requested
3352 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3353 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3354 }
3355
3356 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3357
3358 if (debug_infrun)
3359 {
3360 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3361 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3362 struct cleanup *old_chain = save_inferior_ptid ();
3363
3364 inferior_ptid = ecs->ptid;
3365
3366 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3367 paddress (gdbarch, stop_pc));
3368 if (target_stopped_by_watchpoint ())
3369 {
3370 CORE_ADDR addr;
3371
3372 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3373
3374 if (target_stopped_data_address (&current_target, &addr))
3375 fprintf_unfiltered (gdb_stdlog,
3376 "infrun: stopped data address = %s\n",
3377 paddress (gdbarch, addr));
3378 else
3379 fprintf_unfiltered (gdb_stdlog,
3380 "infrun: (no data address available)\n");
3381 }
3382
3383 do_cleanups (old_chain);
3384 }
3385
3386 if (stepping_past_singlestep_breakpoint)
3387 {
3388 gdb_assert (singlestep_breakpoints_inserted_p);
3389 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3390 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3391
3392 stepping_past_singlestep_breakpoint = 0;
3393
3394 /* We've either finished single-stepping past the single-step
3395 breakpoint, or stopped for some other reason. It would be nice if
3396 we could tell, but we can't reliably. */
3397 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3398 {
3399 if (debug_infrun)
3400 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3401 /* Pull the single step breakpoints out of the target. */
3402 remove_single_step_breakpoints ();
3403 singlestep_breakpoints_inserted_p = 0;
3404
3405 ecs->random_signal = 0;
3406 ecs->event_thread->trap_expected = 0;
3407
3408 context_switch (saved_singlestep_ptid);
3409 if (deprecated_context_hook)
3410 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3411
3412 resume (1, TARGET_SIGNAL_0);
3413 prepare_to_wait (ecs);
3414 return;
3415 }
3416 }
3417
3418 if (!ptid_equal (deferred_step_ptid, null_ptid))
3419 {
3420 /* In non-stop mode, there's never a deferred_step_ptid set. */
3421 gdb_assert (!non_stop);
3422
3423 /* If we stopped for some other reason than single-stepping, ignore
3424 the fact that we were supposed to switch back. */
3425 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3426 {
3427 if (debug_infrun)
3428 fprintf_unfiltered (gdb_stdlog,
3429 "infrun: handling deferred step\n");
3430
3431 /* Pull the single step breakpoints out of the target. */
3432 if (singlestep_breakpoints_inserted_p)
3433 {
3434 remove_single_step_breakpoints ();
3435 singlestep_breakpoints_inserted_p = 0;
3436 }
3437
3438 /* Note: We do not call context_switch at this point, as the
3439 context is already set up for stepping the original thread. */
3440 switch_to_thread (deferred_step_ptid);
3441 deferred_step_ptid = null_ptid;
3442 /* Suppress spurious "Switching to ..." message. */
3443 previous_inferior_ptid = inferior_ptid;
3444
3445 resume (1, TARGET_SIGNAL_0);
3446 prepare_to_wait (ecs);
3447 return;
3448 }
3449
3450 deferred_step_ptid = null_ptid;
3451 }
3452
3453 /* See if a thread hit a thread-specific breakpoint that was meant for
3454 another thread. If so, then step that thread past the breakpoint,
3455 and continue it. */
3456
3457 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3458 {
3459 int thread_hop_needed = 0;
3460 struct address_space *aspace =
3461 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3462
3463 /* Check if a regular breakpoint has been hit before checking
3464 for a potential single step breakpoint. Otherwise, GDB will
3465 not see this breakpoint hit when stepping onto breakpoints. */
3466 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3467 {
3468 ecs->random_signal = 0;
3469 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3470 thread_hop_needed = 1;
3471 }
3472 else if (singlestep_breakpoints_inserted_p)
3473 {
3474 /* We have not context switched yet, so this should be true
3475 no matter which thread hit the singlestep breakpoint. */
3476 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3477 if (debug_infrun)
3478 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3479 "trap for %s\n",
3480 target_pid_to_str (ecs->ptid));
3481
3482 ecs->random_signal = 0;
3483 /* The call to in_thread_list is necessary because PTIDs sometimes
3484 change when we go from single-threaded to multi-threaded. If
3485 the singlestep_ptid is still in the list, assume that it is
3486 really different from ecs->ptid. */
3487 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3488 && in_thread_list (singlestep_ptid))
3489 {
3490 /* If the PC of the thread we were trying to single-step
3491 has changed, discard this event (which we were going
3492 to ignore anyway), and pretend we saw that thread
3493 trap. This prevents us continuously moving the
3494 single-step breakpoint forward, one instruction at a
3495 time. If the PC has changed, then the thread we were
3496 trying to single-step has trapped or been signalled,
3497 but the event has not been reported to GDB yet.
3498
3499 There might be some cases where this loses signal
3500 information, if a signal has arrived at exactly the
3501 same time that the PC changed, but this is the best
3502 we can do with the information available. Perhaps we
3503 should arrange to report all events for all threads
3504 when they stop, or to re-poll the remote looking for
3505 this particular thread (i.e. temporarily enable
3506 schedlock). */
3507
3508 CORE_ADDR new_singlestep_pc
3509 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3510
3511 if (new_singlestep_pc != singlestep_pc)
3512 {
3513 enum target_signal stop_signal;
3514
3515 if (debug_infrun)
3516 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3517 " but expected thread advanced also\n");
3518
3519 /* The current context still belongs to
3520 singlestep_ptid. Don't swap here, since that's
3521 the context we want to use. Just fudge our
3522 state and continue. */
3523 stop_signal = ecs->event_thread->stop_signal;
3524 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3525 ecs->ptid = singlestep_ptid;
3526 ecs->event_thread = find_thread_ptid (ecs->ptid);
3527 ecs->event_thread->stop_signal = stop_signal;
3528 stop_pc = new_singlestep_pc;
3529 }
3530 else
3531 {
3532 if (debug_infrun)
3533 fprintf_unfiltered (gdb_stdlog,
3534 "infrun: unexpected thread\n");
3535
3536 thread_hop_needed = 1;
3537 stepping_past_singlestep_breakpoint = 1;
3538 saved_singlestep_ptid = singlestep_ptid;
3539 }
3540 }
3541 }
3542
3543 if (thread_hop_needed)
3544 {
3545 struct regcache *thread_regcache;
3546 int remove_status = 0;
3547
3548 if (debug_infrun)
3549 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3550
3551 /* Switch context before touching inferior memory, the
3552 previous thread may have exited. */
3553 if (!ptid_equal (inferior_ptid, ecs->ptid))
3554 context_switch (ecs->ptid);
3555
3556 /* Saw a breakpoint, but it was hit by the wrong thread.
3557 Just continue. */
3558
3559 if (singlestep_breakpoints_inserted_p)
3560 {
3561 /* Pull the single step breakpoints out of the target. */
3562 remove_single_step_breakpoints ();
3563 singlestep_breakpoints_inserted_p = 0;
3564 }
3565
3566 /* If the arch can displace step, don't remove the
3567 breakpoints. */
3568 thread_regcache = get_thread_regcache (ecs->ptid);
3569 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3570 remove_status = remove_breakpoints ();
3571
3572 /* Did we fail to remove breakpoints? If so, try
3573 to set the PC past the bp. (There's at least
3574 one situation in which we can fail to remove
3575 the bp's: On HP-UX's that use ttrace, we can't
3576 change the address space of a vforking child
3577 process until the child exits (well, okay, not
3578 then either :-) or execs. */
3579 if (remove_status != 0)
3580 error (_("Cannot step over breakpoint hit in wrong thread"));
3581 else
3582 { /* Single step */
3583 if (!non_stop)
3584 {
3585 /* Only need to require the next event from this
3586 thread in all-stop mode. */
3587 waiton_ptid = ecs->ptid;
3588 infwait_state = infwait_thread_hop_state;
3589 }
3590
3591 ecs->event_thread->stepping_over_breakpoint = 1;
3592 keep_going (ecs);
3593 return;
3594 }
3595 }
3596 else if (singlestep_breakpoints_inserted_p)
3597 {
3598 sw_single_step_trap_p = 1;
3599 ecs->random_signal = 0;
3600 }
3601 }
3602 else
3603 ecs->random_signal = 1;
3604
3605 /* See if something interesting happened to the non-current thread. If
3606 so, then switch to that thread. */
3607 if (!ptid_equal (ecs->ptid, inferior_ptid))
3608 {
3609 if (debug_infrun)
3610 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3611
3612 context_switch (ecs->ptid);
3613
3614 if (deprecated_context_hook)
3615 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3616 }
3617
3618 /* At this point, get hold of the now-current thread's frame. */
3619 frame = get_current_frame ();
3620 gdbarch = get_frame_arch (frame);
3621
3622 if (singlestep_breakpoints_inserted_p)
3623 {
3624 /* Pull the single step breakpoints out of the target. */
3625 remove_single_step_breakpoints ();
3626 singlestep_breakpoints_inserted_p = 0;
3627 }
3628
3629 if (stepped_after_stopped_by_watchpoint)
3630 stopped_by_watchpoint = 0;
3631 else
3632 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3633
3634 /* If necessary, step over this watchpoint. We'll be back to display
3635 it in a moment. */
3636 if (stopped_by_watchpoint
3637 && (target_have_steppable_watchpoint
3638 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3639 {
3640 /* At this point, we are stopped at an instruction which has
3641 attempted to write to a piece of memory under control of
3642 a watchpoint. The instruction hasn't actually executed
3643 yet. If we were to evaluate the watchpoint expression
3644 now, we would get the old value, and therefore no change
3645 would seem to have occurred.
3646
3647 In order to make watchpoints work `right', we really need
3648 to complete the memory write, and then evaluate the
3649 watchpoint expression. We do this by single-stepping the
3650 target.
3651
3652 It may not be necessary to disable the watchpoint to stop over
3653 it. For example, the PA can (with some kernel cooperation)
3654 single step over a watchpoint without disabling the watchpoint.
3655
3656 It is far more common to need to disable a watchpoint to step
3657 the inferior over it. If we have non-steppable watchpoints,
3658 we must disable the current watchpoint; it's simplest to
3659 disable all watchpoints and breakpoints. */
3660 int hw_step = 1;
3661
3662 if (!target_have_steppable_watchpoint)
3663 remove_breakpoints ();
3664 /* Single step */
3665 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3666 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3667 waiton_ptid = ecs->ptid;
3668 if (target_have_steppable_watchpoint)
3669 infwait_state = infwait_step_watch_state;
3670 else
3671 infwait_state = infwait_nonstep_watch_state;
3672 prepare_to_wait (ecs);
3673 return;
3674 }
3675
3676 ecs->stop_func_start = 0;
3677 ecs->stop_func_end = 0;
3678 ecs->stop_func_name = 0;
3679 /* Don't care about return value; stop_func_start and stop_func_name
3680 will both be 0 if it doesn't work. */
3681 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3682 &ecs->stop_func_start, &ecs->stop_func_end);
3683 ecs->stop_func_start
3684 += gdbarch_deprecated_function_start_offset (gdbarch);
3685 ecs->event_thread->stepping_over_breakpoint = 0;
3686 bpstat_clear (&ecs->event_thread->stop_bpstat);
3687 ecs->event_thread->stop_step = 0;
3688 stop_print_frame = 1;
3689 ecs->random_signal = 0;
3690 stopped_by_random_signal = 0;
3691
3692 /* Hide inlined functions starting here, unless we just performed stepi or
3693 nexti. After stepi and nexti, always show the innermost frame (not any
3694 inline function call sites). */
3695 if (ecs->event_thread->step_range_end != 1)
3696 skip_inline_frames (ecs->ptid);
3697
3698 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3699 && ecs->event_thread->trap_expected
3700 && gdbarch_single_step_through_delay_p (gdbarch)
3701 && currently_stepping (ecs->event_thread))
3702 {
3703 /* We're trying to step off a breakpoint. Turns out that we're
3704 also on an instruction that needs to be stepped multiple
3705 times before it's been fully executing. E.g., architectures
3706 with a delay slot. It needs to be stepped twice, once for
3707 the instruction and once for the delay slot. */
3708 int step_through_delay
3709 = gdbarch_single_step_through_delay (gdbarch, frame);
3710
3711 if (debug_infrun && step_through_delay)
3712 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3713 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3714 {
3715 /* The user issued a continue when stopped at a breakpoint.
3716 Set up for another trap and get out of here. */
3717 ecs->event_thread->stepping_over_breakpoint = 1;
3718 keep_going (ecs);
3719 return;
3720 }
3721 else if (step_through_delay)
3722 {
3723 /* The user issued a step when stopped at a breakpoint.
3724 Maybe we should stop, maybe we should not - the delay
3725 slot *might* correspond to a line of source. In any
3726 case, don't decide that here, just set
3727 ecs->stepping_over_breakpoint, making sure we
3728 single-step again before breakpoints are re-inserted. */
3729 ecs->event_thread->stepping_over_breakpoint = 1;
3730 }
3731 }
3732
3733 /* Look at the cause of the stop, and decide what to do.
3734 The alternatives are:
3735 1) stop_stepping and return; to really stop and return to the debugger,
3736 2) keep_going and return to start up again
3737 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3738 3) set ecs->random_signal to 1, and the decision between 1 and 2
3739 will be made according to the signal handling tables. */
3740
3741 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3742 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3743 || stop_soon == STOP_QUIETLY_REMOTE)
3744 {
3745 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3746 {
3747 if (debug_infrun)
3748 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3749 stop_print_frame = 0;
3750 stop_stepping (ecs);
3751 return;
3752 }
3753
3754 /* This is originated from start_remote(), start_inferior() and
3755 shared libraries hook functions. */
3756 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3757 {
3758 if (debug_infrun)
3759 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3760 stop_stepping (ecs);
3761 return;
3762 }
3763
3764 /* This originates from attach_command(). We need to overwrite
3765 the stop_signal here, because some kernels don't ignore a
3766 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3767 See more comments in inferior.h. On the other hand, if we
3768 get a non-SIGSTOP, report it to the user - assume the backend
3769 will handle the SIGSTOP if it should show up later.
3770
3771 Also consider that the attach is complete when we see a
3772 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3773 target extended-remote report it instead of a SIGSTOP
3774 (e.g. gdbserver). We already rely on SIGTRAP being our
3775 signal, so this is no exception.
3776
3777 Also consider that the attach is complete when we see a
3778 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3779 the target to stop all threads of the inferior, in case the
3780 low level attach operation doesn't stop them implicitly. If
3781 they weren't stopped implicitly, then the stub will report a
3782 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3783 other than GDB's request. */
3784 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3785 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3786 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3787 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3788 {
3789 stop_stepping (ecs);
3790 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3791 return;
3792 }
3793
3794 /* See if there is a breakpoint at the current PC. */
3795 ecs->event_thread->stop_bpstat
3796 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3797 stop_pc, ecs->ptid);
3798
3799 /* Following in case break condition called a
3800 function. */
3801 stop_print_frame = 1;
3802
3803 /* This is where we handle "moribund" watchpoints. Unlike
3804 software breakpoints traps, hardware watchpoint traps are
3805 always distinguishable from random traps. If no high-level
3806 watchpoint is associated with the reported stop data address
3807 anymore, then the bpstat does not explain the signal ---
3808 simply make sure to ignore it if `stopped_by_watchpoint' is
3809 set. */
3810
3811 if (debug_infrun
3812 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3813 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3814 && stopped_by_watchpoint)
3815 fprintf_unfiltered (gdb_stdlog, "\
3816 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3817
3818 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3819 at one stage in the past included checks for an inferior
3820 function call's call dummy's return breakpoint. The original
3821 comment, that went with the test, read:
3822
3823 ``End of a stack dummy. Some systems (e.g. Sony news) give
3824 another signal besides SIGTRAP, so check here as well as
3825 above.''
3826
3827 If someone ever tries to get call dummys on a
3828 non-executable stack to work (where the target would stop
3829 with something like a SIGSEGV), then those tests might need
3830 to be re-instated. Given, however, that the tests were only
3831 enabled when momentary breakpoints were not being used, I
3832 suspect that it won't be the case.
3833
3834 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3835 be necessary for call dummies on a non-executable stack on
3836 SPARC. */
3837
3838 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3839 ecs->random_signal
3840 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3841 || stopped_by_watchpoint
3842 || ecs->event_thread->trap_expected
3843 || (ecs->event_thread->step_range_end
3844 && ecs->event_thread->step_resume_breakpoint == NULL));
3845 else
3846 {
3847 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3848 if (!ecs->random_signal)
3849 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3850 }
3851 }
3852
3853 /* When we reach this point, we've pretty much decided
3854 that the reason for stopping must've been a random
3855 (unexpected) signal. */
3856
3857 else
3858 ecs->random_signal = 1;
3859
3860 process_event_stop_test:
3861
3862 /* Re-fetch current thread's frame in case we did a
3863 "goto process_event_stop_test" above. */
3864 frame = get_current_frame ();
3865 gdbarch = get_frame_arch (frame);
3866
3867 /* For the program's own signals, act according to
3868 the signal handling tables. */
3869
3870 if (ecs->random_signal)
3871 {
3872 /* Signal not for debugging purposes. */
3873 int printed = 0;
3874 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3875
3876 if (debug_infrun)
3877 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3878 ecs->event_thread->stop_signal);
3879
3880 stopped_by_random_signal = 1;
3881
3882 if (signal_print[ecs->event_thread->stop_signal])
3883 {
3884 printed = 1;
3885 target_terminal_ours_for_output ();
3886 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3887 }
3888 /* Always stop on signals if we're either just gaining control
3889 of the program, or the user explicitly requested this thread
3890 to remain stopped. */
3891 if (stop_soon != NO_STOP_QUIETLY
3892 || ecs->event_thread->stop_requested
3893 || (!inf->detaching
3894 && signal_stop_state (ecs->event_thread->stop_signal)))
3895 {
3896 stop_stepping (ecs);
3897 return;
3898 }
3899 /* If not going to stop, give terminal back
3900 if we took it away. */
3901 else if (printed)
3902 target_terminal_inferior ();
3903
3904 /* Clear the signal if it should not be passed. */
3905 if (signal_program[ecs->event_thread->stop_signal] == 0)
3906 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3907
3908 if (ecs->event_thread->prev_pc == stop_pc
3909 && ecs->event_thread->trap_expected
3910 && ecs->event_thread->step_resume_breakpoint == NULL)
3911 {
3912 /* We were just starting a new sequence, attempting to
3913 single-step off of a breakpoint and expecting a SIGTRAP.
3914 Instead this signal arrives. This signal will take us out
3915 of the stepping range so GDB needs to remember to, when
3916 the signal handler returns, resume stepping off that
3917 breakpoint. */
3918 /* To simplify things, "continue" is forced to use the same
3919 code paths as single-step - set a breakpoint at the
3920 signal return address and then, once hit, step off that
3921 breakpoint. */
3922 if (debug_infrun)
3923 fprintf_unfiltered (gdb_stdlog,
3924 "infrun: signal arrived while stepping over "
3925 "breakpoint\n");
3926
3927 insert_step_resume_breakpoint_at_frame (frame);
3928 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3929 keep_going (ecs);
3930 return;
3931 }
3932
3933 if (ecs->event_thread->step_range_end != 0
3934 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3935 && (ecs->event_thread->step_range_start <= stop_pc
3936 && stop_pc < ecs->event_thread->step_range_end)
3937 && frame_id_eq (get_stack_frame_id (frame),
3938 ecs->event_thread->step_stack_frame_id)
3939 && ecs->event_thread->step_resume_breakpoint == NULL)
3940 {
3941 /* The inferior is about to take a signal that will take it
3942 out of the single step range. Set a breakpoint at the
3943 current PC (which is presumably where the signal handler
3944 will eventually return) and then allow the inferior to
3945 run free.
3946
3947 Note that this is only needed for a signal delivered
3948 while in the single-step range. Nested signals aren't a
3949 problem as they eventually all return. */
3950 if (debug_infrun)
3951 fprintf_unfiltered (gdb_stdlog,
3952 "infrun: signal may take us out of "
3953 "single-step range\n");
3954
3955 insert_step_resume_breakpoint_at_frame (frame);
3956 keep_going (ecs);
3957 return;
3958 }
3959
3960 /* Note: step_resume_breakpoint may be non-NULL. This occures
3961 when either there's a nested signal, or when there's a
3962 pending signal enabled just as the signal handler returns
3963 (leaving the inferior at the step-resume-breakpoint without
3964 actually executing it). Either way continue until the
3965 breakpoint is really hit. */
3966 keep_going (ecs);
3967 return;
3968 }
3969
3970 /* Handle cases caused by hitting a breakpoint. */
3971 {
3972 CORE_ADDR jmp_buf_pc;
3973 struct bpstat_what what;
3974
3975 what = bpstat_what (ecs->event_thread->stop_bpstat);
3976
3977 if (what.call_dummy)
3978 {
3979 stop_stack_dummy = what.call_dummy;
3980 }
3981
3982 switch (what.main_action)
3983 {
3984 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3985 /* If we hit the breakpoint at longjmp while stepping, we
3986 install a momentary breakpoint at the target of the
3987 jmp_buf. */
3988
3989 if (debug_infrun)
3990 fprintf_unfiltered (gdb_stdlog,
3991 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3992
3993 ecs->event_thread->stepping_over_breakpoint = 1;
3994
3995 if (!gdbarch_get_longjmp_target_p (gdbarch)
3996 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3997 {
3998 if (debug_infrun)
3999 fprintf_unfiltered (gdb_stdlog, "\
4000 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4001 keep_going (ecs);
4002 return;
4003 }
4004
4005 /* We're going to replace the current step-resume breakpoint
4006 with a longjmp-resume breakpoint. */
4007 delete_step_resume_breakpoint (ecs->event_thread);
4008
4009 /* Insert a breakpoint at resume address. */
4010 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4011
4012 keep_going (ecs);
4013 return;
4014
4015 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4016 if (debug_infrun)
4017 fprintf_unfiltered (gdb_stdlog,
4018 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4019
4020 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4021 delete_step_resume_breakpoint (ecs->event_thread);
4022
4023 ecs->event_thread->stop_step = 1;
4024 print_stop_reason (END_STEPPING_RANGE, 0);
4025 stop_stepping (ecs);
4026 return;
4027
4028 case BPSTAT_WHAT_SINGLE:
4029 if (debug_infrun)
4030 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4031 ecs->event_thread->stepping_over_breakpoint = 1;
4032 /* Still need to check other stuff, at least the case
4033 where we are stepping and step out of the right range. */
4034 break;
4035
4036 case BPSTAT_WHAT_STOP_NOISY:
4037 if (debug_infrun)
4038 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4039 stop_print_frame = 1;
4040
4041 /* We are about to nuke the step_resume_breakpointt via the
4042 cleanup chain, so no need to worry about it here. */
4043
4044 stop_stepping (ecs);
4045 return;
4046
4047 case BPSTAT_WHAT_STOP_SILENT:
4048 if (debug_infrun)
4049 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4050 stop_print_frame = 0;
4051
4052 /* We are about to nuke the step_resume_breakpoin via the
4053 cleanup chain, so no need to worry about it here. */
4054
4055 stop_stepping (ecs);
4056 return;
4057
4058 case BPSTAT_WHAT_STEP_RESUME:
4059 if (debug_infrun)
4060 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4061
4062 delete_step_resume_breakpoint (ecs->event_thread);
4063 if (ecs->event_thread->step_after_step_resume_breakpoint)
4064 {
4065 /* Back when the step-resume breakpoint was inserted, we
4066 were trying to single-step off a breakpoint. Go back
4067 to doing that. */
4068 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4069 ecs->event_thread->stepping_over_breakpoint = 1;
4070 keep_going (ecs);
4071 return;
4072 }
4073 if (stop_pc == ecs->stop_func_start
4074 && execution_direction == EXEC_REVERSE)
4075 {
4076 /* We are stepping over a function call in reverse, and
4077 just hit the step-resume breakpoint at the start
4078 address of the function. Go back to single-stepping,
4079 which should take us back to the function call. */
4080 ecs->event_thread->stepping_over_breakpoint = 1;
4081 keep_going (ecs);
4082 return;
4083 }
4084 break;
4085
4086 case BPSTAT_WHAT_CHECK_SHLIBS:
4087 {
4088 if (debug_infrun)
4089 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4090
4091 /* Check for any newly added shared libraries if we're
4092 supposed to be adding them automatically. Switch
4093 terminal for any messages produced by
4094 breakpoint_re_set. */
4095 target_terminal_ours_for_output ();
4096 /* NOTE: cagney/2003-11-25: Make certain that the target
4097 stack's section table is kept up-to-date. Architectures,
4098 (e.g., PPC64), use the section table to perform
4099 operations such as address => section name and hence
4100 require the table to contain all sections (including
4101 those found in shared libraries). */
4102 #ifdef SOLIB_ADD
4103 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4104 #else
4105 solib_add (NULL, 0, &current_target, auto_solib_add);
4106 #endif
4107 target_terminal_inferior ();
4108
4109 /* If requested, stop when the dynamic linker notifies
4110 gdb of events. This allows the user to get control
4111 and place breakpoints in initializer routines for
4112 dynamically loaded objects (among other things). */
4113 if (stop_on_solib_events || stop_stack_dummy)
4114 {
4115 stop_stepping (ecs);
4116 return;
4117 }
4118 else
4119 {
4120 /* We want to step over this breakpoint, then keep going. */
4121 ecs->event_thread->stepping_over_breakpoint = 1;
4122 break;
4123 }
4124 }
4125 break;
4126
4127 case BPSTAT_WHAT_CHECK_JIT:
4128 if (debug_infrun)
4129 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4130
4131 /* Switch terminal for any messages produced by breakpoint_re_set. */
4132 target_terminal_ours_for_output ();
4133
4134 jit_event_handler (gdbarch);
4135
4136 target_terminal_inferior ();
4137
4138 /* We want to step over this breakpoint, then keep going. */
4139 ecs->event_thread->stepping_over_breakpoint = 1;
4140
4141 break;
4142
4143 case BPSTAT_WHAT_LAST:
4144 /* Not a real code, but listed here to shut up gcc -Wall. */
4145
4146 case BPSTAT_WHAT_KEEP_CHECKING:
4147 break;
4148 }
4149 }
4150
4151 /* We come here if we hit a breakpoint but should not
4152 stop for it. Possibly we also were stepping
4153 and should stop for that. So fall through and
4154 test for stepping. But, if not stepping,
4155 do not stop. */
4156
4157 /* In all-stop mode, if we're currently stepping but have stopped in
4158 some other thread, we need to switch back to the stepped thread. */
4159 if (!non_stop)
4160 {
4161 struct thread_info *tp;
4162
4163 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4164 ecs->event_thread);
4165 if (tp)
4166 {
4167 /* However, if the current thread is blocked on some internal
4168 breakpoint, and we simply need to step over that breakpoint
4169 to get it going again, do that first. */
4170 if ((ecs->event_thread->trap_expected
4171 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4172 || ecs->event_thread->stepping_over_breakpoint)
4173 {
4174 keep_going (ecs);
4175 return;
4176 }
4177
4178 /* If the stepping thread exited, then don't try to switch
4179 back and resume it, which could fail in several different
4180 ways depending on the target. Instead, just keep going.
4181
4182 We can find a stepping dead thread in the thread list in
4183 two cases:
4184
4185 - The target supports thread exit events, and when the
4186 target tries to delete the thread from the thread list,
4187 inferior_ptid pointed at the exiting thread. In such
4188 case, calling delete_thread does not really remove the
4189 thread from the list; instead, the thread is left listed,
4190 with 'exited' state.
4191
4192 - The target's debug interface does not support thread
4193 exit events, and so we have no idea whatsoever if the
4194 previously stepping thread is still alive. For that
4195 reason, we need to synchronously query the target
4196 now. */
4197 if (is_exited (tp->ptid)
4198 || !target_thread_alive (tp->ptid))
4199 {
4200 if (debug_infrun)
4201 fprintf_unfiltered (gdb_stdlog, "\
4202 infrun: not switching back to stepped thread, it has vanished\n");
4203
4204 delete_thread (tp->ptid);
4205 keep_going (ecs);
4206 return;
4207 }
4208
4209 /* Otherwise, we no longer expect a trap in the current thread.
4210 Clear the trap_expected flag before switching back -- this is
4211 what keep_going would do as well, if we called it. */
4212 ecs->event_thread->trap_expected = 0;
4213
4214 if (debug_infrun)
4215 fprintf_unfiltered (gdb_stdlog,
4216 "infrun: switching back to stepped thread\n");
4217
4218 ecs->event_thread = tp;
4219 ecs->ptid = tp->ptid;
4220 context_switch (ecs->ptid);
4221 keep_going (ecs);
4222 return;
4223 }
4224 }
4225
4226 /* Are we stepping to get the inferior out of the dynamic linker's
4227 hook (and possibly the dld itself) after catching a shlib
4228 event? */
4229 if (ecs->event_thread->stepping_through_solib_after_catch)
4230 {
4231 #if defined(SOLIB_ADD)
4232 /* Have we reached our destination? If not, keep going. */
4233 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4234 {
4235 if (debug_infrun)
4236 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4237 ecs->event_thread->stepping_over_breakpoint = 1;
4238 keep_going (ecs);
4239 return;
4240 }
4241 #endif
4242 if (debug_infrun)
4243 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4244 /* Else, stop and report the catchpoint(s) whose triggering
4245 caused us to begin stepping. */
4246 ecs->event_thread->stepping_through_solib_after_catch = 0;
4247 bpstat_clear (&ecs->event_thread->stop_bpstat);
4248 ecs->event_thread->stop_bpstat
4249 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4250 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4251 stop_print_frame = 1;
4252 stop_stepping (ecs);
4253 return;
4254 }
4255
4256 if (ecs->event_thread->step_resume_breakpoint)
4257 {
4258 if (debug_infrun)
4259 fprintf_unfiltered (gdb_stdlog,
4260 "infrun: step-resume breakpoint is inserted\n");
4261
4262 /* Having a step-resume breakpoint overrides anything
4263 else having to do with stepping commands until
4264 that breakpoint is reached. */
4265 keep_going (ecs);
4266 return;
4267 }
4268
4269 if (ecs->event_thread->step_range_end == 0)
4270 {
4271 if (debug_infrun)
4272 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4273 /* Likewise if we aren't even stepping. */
4274 keep_going (ecs);
4275 return;
4276 }
4277
4278 /* Re-fetch current thread's frame in case the code above caused
4279 the frame cache to be re-initialized, making our FRAME variable
4280 a dangling pointer. */
4281 frame = get_current_frame ();
4282
4283 /* If stepping through a line, keep going if still within it.
4284
4285 Note that step_range_end is the address of the first instruction
4286 beyond the step range, and NOT the address of the last instruction
4287 within it!
4288
4289 Note also that during reverse execution, we may be stepping
4290 through a function epilogue and therefore must detect when
4291 the current-frame changes in the middle of a line. */
4292
4293 if (stop_pc >= ecs->event_thread->step_range_start
4294 && stop_pc < ecs->event_thread->step_range_end
4295 && (execution_direction != EXEC_REVERSE
4296 || frame_id_eq (get_frame_id (frame),
4297 ecs->event_thread->step_frame_id)))
4298 {
4299 if (debug_infrun)
4300 fprintf_unfiltered
4301 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4302 paddress (gdbarch, ecs->event_thread->step_range_start),
4303 paddress (gdbarch, ecs->event_thread->step_range_end));
4304
4305 /* When stepping backward, stop at beginning of line range
4306 (unless it's the function entry point, in which case
4307 keep going back to the call point). */
4308 if (stop_pc == ecs->event_thread->step_range_start
4309 && stop_pc != ecs->stop_func_start
4310 && execution_direction == EXEC_REVERSE)
4311 {
4312 ecs->event_thread->stop_step = 1;
4313 print_stop_reason (END_STEPPING_RANGE, 0);
4314 stop_stepping (ecs);
4315 }
4316 else
4317 keep_going (ecs);
4318
4319 return;
4320 }
4321
4322 /* We stepped out of the stepping range. */
4323
4324 /* If we are stepping at the source level and entered the runtime
4325 loader dynamic symbol resolution code...
4326
4327 EXEC_FORWARD: we keep on single stepping until we exit the run
4328 time loader code and reach the callee's address.
4329
4330 EXEC_REVERSE: we've already executed the callee (backward), and
4331 the runtime loader code is handled just like any other
4332 undebuggable function call. Now we need only keep stepping
4333 backward through the trampoline code, and that's handled further
4334 down, so there is nothing for us to do here. */
4335
4336 if (execution_direction != EXEC_REVERSE
4337 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4338 && in_solib_dynsym_resolve_code (stop_pc))
4339 {
4340 CORE_ADDR pc_after_resolver =
4341 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4342
4343 if (debug_infrun)
4344 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4345
4346 if (pc_after_resolver)
4347 {
4348 /* Set up a step-resume breakpoint at the address
4349 indicated by SKIP_SOLIB_RESOLVER. */
4350 struct symtab_and_line sr_sal;
4351
4352 init_sal (&sr_sal);
4353 sr_sal.pc = pc_after_resolver;
4354 sr_sal.pspace = get_frame_program_space (frame);
4355
4356 insert_step_resume_breakpoint_at_sal (gdbarch,
4357 sr_sal, null_frame_id);
4358 }
4359
4360 keep_going (ecs);
4361 return;
4362 }
4363
4364 if (ecs->event_thread->step_range_end != 1
4365 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4366 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4367 && get_frame_type (frame) == SIGTRAMP_FRAME)
4368 {
4369 if (debug_infrun)
4370 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4371 /* The inferior, while doing a "step" or "next", has ended up in
4372 a signal trampoline (either by a signal being delivered or by
4373 the signal handler returning). Just single-step until the
4374 inferior leaves the trampoline (either by calling the handler
4375 or returning). */
4376 keep_going (ecs);
4377 return;
4378 }
4379
4380 /* Check for subroutine calls. The check for the current frame
4381 equalling the step ID is not necessary - the check of the
4382 previous frame's ID is sufficient - but it is a common case and
4383 cheaper than checking the previous frame's ID.
4384
4385 NOTE: frame_id_eq will never report two invalid frame IDs as
4386 being equal, so to get into this block, both the current and
4387 previous frame must have valid frame IDs. */
4388 /* The outer_frame_id check is a heuristic to detect stepping
4389 through startup code. If we step over an instruction which
4390 sets the stack pointer from an invalid value to a valid value,
4391 we may detect that as a subroutine call from the mythical
4392 "outermost" function. This could be fixed by marking
4393 outermost frames as !stack_p,code_p,special_p. Then the
4394 initial outermost frame, before sp was valid, would
4395 have code_addr == &_start. See the comment in frame_id_eq
4396 for more. */
4397 if (!frame_id_eq (get_stack_frame_id (frame),
4398 ecs->event_thread->step_stack_frame_id)
4399 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4400 ecs->event_thread->step_stack_frame_id)
4401 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4402 outer_frame_id)
4403 || step_start_function != find_pc_function (stop_pc))))
4404 {
4405 CORE_ADDR real_stop_pc;
4406
4407 if (debug_infrun)
4408 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4409
4410 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4411 || ((ecs->event_thread->step_range_end == 1)
4412 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4413 ecs->stop_func_start)))
4414 {
4415 /* I presume that step_over_calls is only 0 when we're
4416 supposed to be stepping at the assembly language level
4417 ("stepi"). Just stop. */
4418 /* Also, maybe we just did a "nexti" inside a prolog, so we
4419 thought it was a subroutine call but it was not. Stop as
4420 well. FENN */
4421 /* And this works the same backward as frontward. MVS */
4422 ecs->event_thread->stop_step = 1;
4423 print_stop_reason (END_STEPPING_RANGE, 0);
4424 stop_stepping (ecs);
4425 return;
4426 }
4427
4428 /* Reverse stepping through solib trampolines. */
4429
4430 if (execution_direction == EXEC_REVERSE
4431 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4432 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4433 || (ecs->stop_func_start == 0
4434 && in_solib_dynsym_resolve_code (stop_pc))))
4435 {
4436 /* Any solib trampoline code can be handled in reverse
4437 by simply continuing to single-step. We have already
4438 executed the solib function (backwards), and a few
4439 steps will take us back through the trampoline to the
4440 caller. */
4441 keep_going (ecs);
4442 return;
4443 }
4444
4445 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4446 {
4447 /* We're doing a "next".
4448
4449 Normal (forward) execution: set a breakpoint at the
4450 callee's return address (the address at which the caller
4451 will resume).
4452
4453 Reverse (backward) execution. set the step-resume
4454 breakpoint at the start of the function that we just
4455 stepped into (backwards), and continue to there. When we
4456 get there, we'll need to single-step back to the caller. */
4457
4458 if (execution_direction == EXEC_REVERSE)
4459 {
4460 struct symtab_and_line sr_sal;
4461
4462 /* Normal function call return (static or dynamic). */
4463 init_sal (&sr_sal);
4464 sr_sal.pc = ecs->stop_func_start;
4465 sr_sal.pspace = get_frame_program_space (frame);
4466 insert_step_resume_breakpoint_at_sal (gdbarch,
4467 sr_sal, null_frame_id);
4468 }
4469 else
4470 insert_step_resume_breakpoint_at_caller (frame);
4471
4472 keep_going (ecs);
4473 return;
4474 }
4475
4476 /* If we are in a function call trampoline (a stub between the
4477 calling routine and the real function), locate the real
4478 function. That's what tells us (a) whether we want to step
4479 into it at all, and (b) what prologue we want to run to the
4480 end of, if we do step into it. */
4481 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4482 if (real_stop_pc == 0)
4483 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4484 if (real_stop_pc != 0)
4485 ecs->stop_func_start = real_stop_pc;
4486
4487 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4488 {
4489 struct symtab_and_line sr_sal;
4490
4491 init_sal (&sr_sal);
4492 sr_sal.pc = ecs->stop_func_start;
4493 sr_sal.pspace = get_frame_program_space (frame);
4494
4495 insert_step_resume_breakpoint_at_sal (gdbarch,
4496 sr_sal, null_frame_id);
4497 keep_going (ecs);
4498 return;
4499 }
4500
4501 /* If we have line number information for the function we are
4502 thinking of stepping into, step into it.
4503
4504 If there are several symtabs at that PC (e.g. with include
4505 files), just want to know whether *any* of them have line
4506 numbers. find_pc_line handles this. */
4507 {
4508 struct symtab_and_line tmp_sal;
4509
4510 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4511 tmp_sal.pspace = get_frame_program_space (frame);
4512 if (tmp_sal.line != 0)
4513 {
4514 if (execution_direction == EXEC_REVERSE)
4515 handle_step_into_function_backward (gdbarch, ecs);
4516 else
4517 handle_step_into_function (gdbarch, ecs);
4518 return;
4519 }
4520 }
4521
4522 /* If we have no line number and the step-stop-if-no-debug is
4523 set, we stop the step so that the user has a chance to switch
4524 in assembly mode. */
4525 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4526 && step_stop_if_no_debug)
4527 {
4528 ecs->event_thread->stop_step = 1;
4529 print_stop_reason (END_STEPPING_RANGE, 0);
4530 stop_stepping (ecs);
4531 return;
4532 }
4533
4534 if (execution_direction == EXEC_REVERSE)
4535 {
4536 /* Set a breakpoint at callee's start address.
4537 From there we can step once and be back in the caller. */
4538 struct symtab_and_line sr_sal;
4539
4540 init_sal (&sr_sal);
4541 sr_sal.pc = ecs->stop_func_start;
4542 sr_sal.pspace = get_frame_program_space (frame);
4543 insert_step_resume_breakpoint_at_sal (gdbarch,
4544 sr_sal, null_frame_id);
4545 }
4546 else
4547 /* Set a breakpoint at callee's return address (the address
4548 at which the caller will resume). */
4549 insert_step_resume_breakpoint_at_caller (frame);
4550
4551 keep_going (ecs);
4552 return;
4553 }
4554
4555 /* Reverse stepping through solib trampolines. */
4556
4557 if (execution_direction == EXEC_REVERSE
4558 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4559 {
4560 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4561 || (ecs->stop_func_start == 0
4562 && in_solib_dynsym_resolve_code (stop_pc)))
4563 {
4564 /* Any solib trampoline code can be handled in reverse
4565 by simply continuing to single-step. We have already
4566 executed the solib function (backwards), and a few
4567 steps will take us back through the trampoline to the
4568 caller. */
4569 keep_going (ecs);
4570 return;
4571 }
4572 else if (in_solib_dynsym_resolve_code (stop_pc))
4573 {
4574 /* Stepped backward into the solib dynsym resolver.
4575 Set a breakpoint at its start and continue, then
4576 one more step will take us out. */
4577 struct symtab_and_line sr_sal;
4578
4579 init_sal (&sr_sal);
4580 sr_sal.pc = ecs->stop_func_start;
4581 sr_sal.pspace = get_frame_program_space (frame);
4582 insert_step_resume_breakpoint_at_sal (gdbarch,
4583 sr_sal, null_frame_id);
4584 keep_going (ecs);
4585 return;
4586 }
4587 }
4588
4589 /* If we're in the return path from a shared library trampoline,
4590 we want to proceed through the trampoline when stepping. */
4591 if (gdbarch_in_solib_return_trampoline (gdbarch,
4592 stop_pc, ecs->stop_func_name))
4593 {
4594 /* Determine where this trampoline returns. */
4595 CORE_ADDR real_stop_pc;
4596
4597 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4598
4599 if (debug_infrun)
4600 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4601
4602 /* Only proceed through if we know where it's going. */
4603 if (real_stop_pc)
4604 {
4605 /* And put the step-breakpoint there and go until there. */
4606 struct symtab_and_line sr_sal;
4607
4608 init_sal (&sr_sal); /* initialize to zeroes */
4609 sr_sal.pc = real_stop_pc;
4610 sr_sal.section = find_pc_overlay (sr_sal.pc);
4611 sr_sal.pspace = get_frame_program_space (frame);
4612
4613 /* Do not specify what the fp should be when we stop since
4614 on some machines the prologue is where the new fp value
4615 is established. */
4616 insert_step_resume_breakpoint_at_sal (gdbarch,
4617 sr_sal, null_frame_id);
4618
4619 /* Restart without fiddling with the step ranges or
4620 other state. */
4621 keep_going (ecs);
4622 return;
4623 }
4624 }
4625
4626 stop_pc_sal = find_pc_line (stop_pc, 0);
4627
4628 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4629 the trampoline processing logic, however, there are some trampolines
4630 that have no names, so we should do trampoline handling first. */
4631 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4632 && ecs->stop_func_name == NULL
4633 && stop_pc_sal.line == 0)
4634 {
4635 if (debug_infrun)
4636 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4637
4638 /* The inferior just stepped into, or returned to, an
4639 undebuggable function (where there is no debugging information
4640 and no line number corresponding to the address where the
4641 inferior stopped). Since we want to skip this kind of code,
4642 we keep going until the inferior returns from this
4643 function - unless the user has asked us not to (via
4644 set step-mode) or we no longer know how to get back
4645 to the call site. */
4646 if (step_stop_if_no_debug
4647 || !frame_id_p (frame_unwind_caller_id (frame)))
4648 {
4649 /* If we have no line number and the step-stop-if-no-debug
4650 is set, we stop the step so that the user has a chance to
4651 switch in assembly mode. */
4652 ecs->event_thread->stop_step = 1;
4653 print_stop_reason (END_STEPPING_RANGE, 0);
4654 stop_stepping (ecs);
4655 return;
4656 }
4657 else
4658 {
4659 /* Set a breakpoint at callee's return address (the address
4660 at which the caller will resume). */
4661 insert_step_resume_breakpoint_at_caller (frame);
4662 keep_going (ecs);
4663 return;
4664 }
4665 }
4666
4667 if (ecs->event_thread->step_range_end == 1)
4668 {
4669 /* It is stepi or nexti. We always want to stop stepping after
4670 one instruction. */
4671 if (debug_infrun)
4672 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4673 ecs->event_thread->stop_step = 1;
4674 print_stop_reason (END_STEPPING_RANGE, 0);
4675 stop_stepping (ecs);
4676 return;
4677 }
4678
4679 if (stop_pc_sal.line == 0)
4680 {
4681 /* We have no line number information. That means to stop
4682 stepping (does this always happen right after one instruction,
4683 when we do "s" in a function with no line numbers,
4684 or can this happen as a result of a return or longjmp?). */
4685 if (debug_infrun)
4686 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4687 ecs->event_thread->stop_step = 1;
4688 print_stop_reason (END_STEPPING_RANGE, 0);
4689 stop_stepping (ecs);
4690 return;
4691 }
4692
4693 /* Look for "calls" to inlined functions, part one. If the inline
4694 frame machinery detected some skipped call sites, we have entered
4695 a new inline function. */
4696
4697 if (frame_id_eq (get_frame_id (get_current_frame ()),
4698 ecs->event_thread->step_frame_id)
4699 && inline_skipped_frames (ecs->ptid))
4700 {
4701 struct symtab_and_line call_sal;
4702
4703 if (debug_infrun)
4704 fprintf_unfiltered (gdb_stdlog,
4705 "infrun: stepped into inlined function\n");
4706
4707 find_frame_sal (get_current_frame (), &call_sal);
4708
4709 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4710 {
4711 /* For "step", we're going to stop. But if the call site
4712 for this inlined function is on the same source line as
4713 we were previously stepping, go down into the function
4714 first. Otherwise stop at the call site. */
4715
4716 if (call_sal.line == ecs->event_thread->current_line
4717 && call_sal.symtab == ecs->event_thread->current_symtab)
4718 step_into_inline_frame (ecs->ptid);
4719
4720 ecs->event_thread->stop_step = 1;
4721 print_stop_reason (END_STEPPING_RANGE, 0);
4722 stop_stepping (ecs);
4723 return;
4724 }
4725 else
4726 {
4727 /* For "next", we should stop at the call site if it is on a
4728 different source line. Otherwise continue through the
4729 inlined function. */
4730 if (call_sal.line == ecs->event_thread->current_line
4731 && call_sal.symtab == ecs->event_thread->current_symtab)
4732 keep_going (ecs);
4733 else
4734 {
4735 ecs->event_thread->stop_step = 1;
4736 print_stop_reason (END_STEPPING_RANGE, 0);
4737 stop_stepping (ecs);
4738 }
4739 return;
4740 }
4741 }
4742
4743 /* Look for "calls" to inlined functions, part two. If we are still
4744 in the same real function we were stepping through, but we have
4745 to go further up to find the exact frame ID, we are stepping
4746 through a more inlined call beyond its call site. */
4747
4748 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4749 && !frame_id_eq (get_frame_id (get_current_frame ()),
4750 ecs->event_thread->step_frame_id)
4751 && stepped_in_from (get_current_frame (),
4752 ecs->event_thread->step_frame_id))
4753 {
4754 if (debug_infrun)
4755 fprintf_unfiltered (gdb_stdlog,
4756 "infrun: stepping through inlined function\n");
4757
4758 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4759 keep_going (ecs);
4760 else
4761 {
4762 ecs->event_thread->stop_step = 1;
4763 print_stop_reason (END_STEPPING_RANGE, 0);
4764 stop_stepping (ecs);
4765 }
4766 return;
4767 }
4768
4769 if ((stop_pc == stop_pc_sal.pc)
4770 && (ecs->event_thread->current_line != stop_pc_sal.line
4771 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4772 {
4773 /* We are at the start of a different line. So stop. Note that
4774 we don't stop if we step into the middle of a different line.
4775 That is said to make things like for (;;) statements work
4776 better. */
4777 if (debug_infrun)
4778 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4779 ecs->event_thread->stop_step = 1;
4780 print_stop_reason (END_STEPPING_RANGE, 0);
4781 stop_stepping (ecs);
4782 return;
4783 }
4784
4785 /* We aren't done stepping.
4786
4787 Optimize by setting the stepping range to the line.
4788 (We might not be in the original line, but if we entered a
4789 new line in mid-statement, we continue stepping. This makes
4790 things like for(;;) statements work better.) */
4791
4792 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4793 ecs->event_thread->step_range_end = stop_pc_sal.end;
4794 set_step_info (frame, stop_pc_sal);
4795
4796 if (debug_infrun)
4797 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4798 keep_going (ecs);
4799 }
4800
4801 /* Is thread TP in the middle of single-stepping? */
4802
4803 static int
4804 currently_stepping (struct thread_info *tp)
4805 {
4806 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4807 || tp->trap_expected
4808 || tp->stepping_through_solib_after_catch
4809 || bpstat_should_step ());
4810 }
4811
4812 /* Returns true if any thread *but* the one passed in "data" is in the
4813 middle of stepping or of handling a "next". */
4814
4815 static int
4816 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4817 {
4818 if (tp == data)
4819 return 0;
4820
4821 return (tp->step_range_end
4822 || tp->trap_expected
4823 || tp->stepping_through_solib_after_catch);
4824 }
4825
4826 /* Inferior has stepped into a subroutine call with source code that
4827 we should not step over. Do step to the first line of code in
4828 it. */
4829
4830 static void
4831 handle_step_into_function (struct gdbarch *gdbarch,
4832 struct execution_control_state *ecs)
4833 {
4834 struct symtab *s;
4835 struct symtab_and_line stop_func_sal, sr_sal;
4836
4837 s = find_pc_symtab (stop_pc);
4838 if (s && s->language != language_asm)
4839 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4840 ecs->stop_func_start);
4841
4842 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4843 /* Use the step_resume_break to step until the end of the prologue,
4844 even if that involves jumps (as it seems to on the vax under
4845 4.2). */
4846 /* If the prologue ends in the middle of a source line, continue to
4847 the end of that source line (if it is still within the function).
4848 Otherwise, just go to end of prologue. */
4849 if (stop_func_sal.end
4850 && stop_func_sal.pc != ecs->stop_func_start
4851 && stop_func_sal.end < ecs->stop_func_end)
4852 ecs->stop_func_start = stop_func_sal.end;
4853
4854 /* Architectures which require breakpoint adjustment might not be able
4855 to place a breakpoint at the computed address. If so, the test
4856 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4857 ecs->stop_func_start to an address at which a breakpoint may be
4858 legitimately placed.
4859
4860 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4861 made, GDB will enter an infinite loop when stepping through
4862 optimized code consisting of VLIW instructions which contain
4863 subinstructions corresponding to different source lines. On
4864 FR-V, it's not permitted to place a breakpoint on any but the
4865 first subinstruction of a VLIW instruction. When a breakpoint is
4866 set, GDB will adjust the breakpoint address to the beginning of
4867 the VLIW instruction. Thus, we need to make the corresponding
4868 adjustment here when computing the stop address. */
4869
4870 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4871 {
4872 ecs->stop_func_start
4873 = gdbarch_adjust_breakpoint_address (gdbarch,
4874 ecs->stop_func_start);
4875 }
4876
4877 if (ecs->stop_func_start == stop_pc)
4878 {
4879 /* We are already there: stop now. */
4880 ecs->event_thread->stop_step = 1;
4881 print_stop_reason (END_STEPPING_RANGE, 0);
4882 stop_stepping (ecs);
4883 return;
4884 }
4885 else
4886 {
4887 /* Put the step-breakpoint there and go until there. */
4888 init_sal (&sr_sal); /* initialize to zeroes */
4889 sr_sal.pc = ecs->stop_func_start;
4890 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4891 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4892
4893 /* Do not specify what the fp should be when we stop since on
4894 some machines the prologue is where the new fp value is
4895 established. */
4896 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4897
4898 /* And make sure stepping stops right away then. */
4899 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4900 }
4901 keep_going (ecs);
4902 }
4903
4904 /* Inferior has stepped backward into a subroutine call with source
4905 code that we should not step over. Do step to the beginning of the
4906 last line of code in it. */
4907
4908 static void
4909 handle_step_into_function_backward (struct gdbarch *gdbarch,
4910 struct execution_control_state *ecs)
4911 {
4912 struct symtab *s;
4913 struct symtab_and_line stop_func_sal;
4914
4915 s = find_pc_symtab (stop_pc);
4916 if (s && s->language != language_asm)
4917 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4918 ecs->stop_func_start);
4919
4920 stop_func_sal = find_pc_line (stop_pc, 0);
4921
4922 /* OK, we're just going to keep stepping here. */
4923 if (stop_func_sal.pc == stop_pc)
4924 {
4925 /* We're there already. Just stop stepping now. */
4926 ecs->event_thread->stop_step = 1;
4927 print_stop_reason (END_STEPPING_RANGE, 0);
4928 stop_stepping (ecs);
4929 }
4930 else
4931 {
4932 /* Else just reset the step range and keep going.
4933 No step-resume breakpoint, they don't work for
4934 epilogues, which can have multiple entry paths. */
4935 ecs->event_thread->step_range_start = stop_func_sal.pc;
4936 ecs->event_thread->step_range_end = stop_func_sal.end;
4937 keep_going (ecs);
4938 }
4939 return;
4940 }
4941
4942 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4943 This is used to both functions and to skip over code. */
4944
4945 static void
4946 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4947 struct symtab_and_line sr_sal,
4948 struct frame_id sr_id)
4949 {
4950 /* There should never be more than one step-resume or longjmp-resume
4951 breakpoint per thread, so we should never be setting a new
4952 step_resume_breakpoint when one is already active. */
4953 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4954
4955 if (debug_infrun)
4956 fprintf_unfiltered (gdb_stdlog,
4957 "infrun: inserting step-resume breakpoint at %s\n",
4958 paddress (gdbarch, sr_sal.pc));
4959
4960 inferior_thread ()->step_resume_breakpoint
4961 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4962 }
4963
4964 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4965 to skip a potential signal handler.
4966
4967 This is called with the interrupted function's frame. The signal
4968 handler, when it returns, will resume the interrupted function at
4969 RETURN_FRAME.pc. */
4970
4971 static void
4972 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4973 {
4974 struct symtab_and_line sr_sal;
4975 struct gdbarch *gdbarch;
4976
4977 gdb_assert (return_frame != NULL);
4978 init_sal (&sr_sal); /* initialize to zeros */
4979
4980 gdbarch = get_frame_arch (return_frame);
4981 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4982 sr_sal.section = find_pc_overlay (sr_sal.pc);
4983 sr_sal.pspace = get_frame_program_space (return_frame);
4984
4985 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4986 get_stack_frame_id (return_frame));
4987 }
4988
4989 /* Similar to insert_step_resume_breakpoint_at_frame, except
4990 but a breakpoint at the previous frame's PC. This is used to
4991 skip a function after stepping into it (for "next" or if the called
4992 function has no debugging information).
4993
4994 The current function has almost always been reached by single
4995 stepping a call or return instruction. NEXT_FRAME belongs to the
4996 current function, and the breakpoint will be set at the caller's
4997 resume address.
4998
4999 This is a separate function rather than reusing
5000 insert_step_resume_breakpoint_at_frame in order to avoid
5001 get_prev_frame, which may stop prematurely (see the implementation
5002 of frame_unwind_caller_id for an example). */
5003
5004 static void
5005 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5006 {
5007 struct symtab_and_line sr_sal;
5008 struct gdbarch *gdbarch;
5009
5010 /* We shouldn't have gotten here if we don't know where the call site
5011 is. */
5012 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5013
5014 init_sal (&sr_sal); /* initialize to zeros */
5015
5016 gdbarch = frame_unwind_caller_arch (next_frame);
5017 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5018 frame_unwind_caller_pc (next_frame));
5019 sr_sal.section = find_pc_overlay (sr_sal.pc);
5020 sr_sal.pspace = frame_unwind_program_space (next_frame);
5021
5022 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5023 frame_unwind_caller_id (next_frame));
5024 }
5025
5026 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5027 new breakpoint at the target of a jmp_buf. The handling of
5028 longjmp-resume uses the same mechanisms used for handling
5029 "step-resume" breakpoints. */
5030
5031 static void
5032 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5033 {
5034 /* There should never be more than one step-resume or longjmp-resume
5035 breakpoint per thread, so we should never be setting a new
5036 longjmp_resume_breakpoint when one is already active. */
5037 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5038
5039 if (debug_infrun)
5040 fprintf_unfiltered (gdb_stdlog,
5041 "infrun: inserting longjmp-resume breakpoint at %s\n",
5042 paddress (gdbarch, pc));
5043
5044 inferior_thread ()->step_resume_breakpoint =
5045 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5046 }
5047
5048 static void
5049 stop_stepping (struct execution_control_state *ecs)
5050 {
5051 if (debug_infrun)
5052 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5053
5054 /* Let callers know we don't want to wait for the inferior anymore. */
5055 ecs->wait_some_more = 0;
5056 }
5057
5058 /* This function handles various cases where we need to continue
5059 waiting for the inferior. */
5060 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5061
5062 static void
5063 keep_going (struct execution_control_state *ecs)
5064 {
5065 /* Make sure normal_stop is called if we get a QUIT handled before
5066 reaching resume. */
5067 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5068
5069 /* Save the pc before execution, to compare with pc after stop. */
5070 ecs->event_thread->prev_pc
5071 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5072
5073 /* If we did not do break;, it means we should keep running the
5074 inferior and not return to debugger. */
5075
5076 if (ecs->event_thread->trap_expected
5077 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5078 {
5079 /* We took a signal (which we are supposed to pass through to
5080 the inferior, else we'd not get here) and we haven't yet
5081 gotten our trap. Simply continue. */
5082
5083 discard_cleanups (old_cleanups);
5084 resume (currently_stepping (ecs->event_thread),
5085 ecs->event_thread->stop_signal);
5086 }
5087 else
5088 {
5089 /* Either the trap was not expected, but we are continuing
5090 anyway (the user asked that this signal be passed to the
5091 child)
5092 -- or --
5093 The signal was SIGTRAP, e.g. it was our signal, but we
5094 decided we should resume from it.
5095
5096 We're going to run this baby now!
5097
5098 Note that insert_breakpoints won't try to re-insert
5099 already inserted breakpoints. Therefore, we don't
5100 care if breakpoints were already inserted, or not. */
5101
5102 if (ecs->event_thread->stepping_over_breakpoint)
5103 {
5104 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5105
5106 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5107 /* Since we can't do a displaced step, we have to remove
5108 the breakpoint while we step it. To keep things
5109 simple, we remove them all. */
5110 remove_breakpoints ();
5111 }
5112 else
5113 {
5114 struct gdb_exception e;
5115
5116 /* Stop stepping when inserting breakpoints
5117 has failed. */
5118 TRY_CATCH (e, RETURN_MASK_ERROR)
5119 {
5120 insert_breakpoints ();
5121 }
5122 if (e.reason < 0)
5123 {
5124 exception_print (gdb_stderr, e);
5125 stop_stepping (ecs);
5126 return;
5127 }
5128 }
5129
5130 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5131
5132 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5133 specifies that such a signal should be delivered to the
5134 target program).
5135
5136 Typically, this would occure when a user is debugging a
5137 target monitor on a simulator: the target monitor sets a
5138 breakpoint; the simulator encounters this break-point and
5139 halts the simulation handing control to GDB; GDB, noteing
5140 that the break-point isn't valid, returns control back to the
5141 simulator; the simulator then delivers the hardware
5142 equivalent of a SIGNAL_TRAP to the program being debugged. */
5143
5144 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5145 && !signal_program[ecs->event_thread->stop_signal])
5146 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5147
5148 discard_cleanups (old_cleanups);
5149 resume (currently_stepping (ecs->event_thread),
5150 ecs->event_thread->stop_signal);
5151 }
5152
5153 prepare_to_wait (ecs);
5154 }
5155
5156 /* This function normally comes after a resume, before
5157 handle_inferior_event exits. It takes care of any last bits of
5158 housekeeping, and sets the all-important wait_some_more flag. */
5159
5160 static void
5161 prepare_to_wait (struct execution_control_state *ecs)
5162 {
5163 if (debug_infrun)
5164 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5165
5166 /* This is the old end of the while loop. Let everybody know we
5167 want to wait for the inferior some more and get called again
5168 soon. */
5169 ecs->wait_some_more = 1;
5170 }
5171
5172 /* Print why the inferior has stopped. We always print something when
5173 the inferior exits, or receives a signal. The rest of the cases are
5174 dealt with later on in normal_stop() and print_it_typical(). Ideally
5175 there should be a call to this function from handle_inferior_event()
5176 each time stop_stepping() is called.*/
5177 static void
5178 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5179 {
5180 switch (stop_reason)
5181 {
5182 case END_STEPPING_RANGE:
5183 /* We are done with a step/next/si/ni command. */
5184 /* For now print nothing. */
5185 /* Print a message only if not in the middle of doing a "step n"
5186 operation for n > 1 */
5187 if (!inferior_thread ()->step_multi
5188 || !inferior_thread ()->stop_step)
5189 if (ui_out_is_mi_like_p (uiout))
5190 ui_out_field_string
5191 (uiout, "reason",
5192 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5193 break;
5194 case SIGNAL_EXITED:
5195 /* The inferior was terminated by a signal. */
5196 annotate_signalled ();
5197 if (ui_out_is_mi_like_p (uiout))
5198 ui_out_field_string
5199 (uiout, "reason",
5200 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5201 ui_out_text (uiout, "\nProgram terminated with signal ");
5202 annotate_signal_name ();
5203 ui_out_field_string (uiout, "signal-name",
5204 target_signal_to_name (stop_info));
5205 annotate_signal_name_end ();
5206 ui_out_text (uiout, ", ");
5207 annotate_signal_string ();
5208 ui_out_field_string (uiout, "signal-meaning",
5209 target_signal_to_string (stop_info));
5210 annotate_signal_string_end ();
5211 ui_out_text (uiout, ".\n");
5212 ui_out_text (uiout, "The program no longer exists.\n");
5213 break;
5214 case EXITED:
5215 /* The inferior program is finished. */
5216 annotate_exited (stop_info);
5217 if (stop_info)
5218 {
5219 if (ui_out_is_mi_like_p (uiout))
5220 ui_out_field_string (uiout, "reason",
5221 async_reason_lookup (EXEC_ASYNC_EXITED));
5222 ui_out_text (uiout, "\nProgram exited with code ");
5223 ui_out_field_fmt (uiout, "exit-code", "0%o",
5224 (unsigned int) stop_info);
5225 ui_out_text (uiout, ".\n");
5226 }
5227 else
5228 {
5229 if (ui_out_is_mi_like_p (uiout))
5230 ui_out_field_string
5231 (uiout, "reason",
5232 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5233 ui_out_text (uiout, "\nProgram exited normally.\n");
5234 }
5235 /* Support the --return-child-result option. */
5236 return_child_result_value = stop_info;
5237 break;
5238 case SIGNAL_RECEIVED:
5239 /* Signal received. The signal table tells us to print about
5240 it. */
5241 annotate_signal ();
5242
5243 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5244 {
5245 struct thread_info *t = inferior_thread ();
5246
5247 ui_out_text (uiout, "\n[");
5248 ui_out_field_string (uiout, "thread-name",
5249 target_pid_to_str (t->ptid));
5250 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5251 ui_out_text (uiout, " stopped");
5252 }
5253 else
5254 {
5255 ui_out_text (uiout, "\nProgram received signal ");
5256 annotate_signal_name ();
5257 if (ui_out_is_mi_like_p (uiout))
5258 ui_out_field_string
5259 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5260 ui_out_field_string (uiout, "signal-name",
5261 target_signal_to_name (stop_info));
5262 annotate_signal_name_end ();
5263 ui_out_text (uiout, ", ");
5264 annotate_signal_string ();
5265 ui_out_field_string (uiout, "signal-meaning",
5266 target_signal_to_string (stop_info));
5267 annotate_signal_string_end ();
5268 }
5269 ui_out_text (uiout, ".\n");
5270 break;
5271 case NO_HISTORY:
5272 /* Reverse execution: target ran out of history info. */
5273 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5274 break;
5275 default:
5276 internal_error (__FILE__, __LINE__,
5277 _("print_stop_reason: unrecognized enum value"));
5278 break;
5279 }
5280 }
5281 \f
5282
5283 /* Here to return control to GDB when the inferior stops for real.
5284 Print appropriate messages, remove breakpoints, give terminal our modes.
5285
5286 STOP_PRINT_FRAME nonzero means print the executing frame
5287 (pc, function, args, file, line number and line text).
5288 BREAKPOINTS_FAILED nonzero means stop was due to error
5289 attempting to insert breakpoints. */
5290
5291 void
5292 normal_stop (void)
5293 {
5294 struct target_waitstatus last;
5295 ptid_t last_ptid;
5296 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5297
5298 get_last_target_status (&last_ptid, &last);
5299
5300 /* If an exception is thrown from this point on, make sure to
5301 propagate GDB's knowledge of the executing state to the
5302 frontend/user running state. A QUIT is an easy exception to see
5303 here, so do this before any filtered output. */
5304 if (!non_stop)
5305 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5306 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5307 && last.kind != TARGET_WAITKIND_EXITED)
5308 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5309
5310 /* In non-stop mode, we don't want GDB to switch threads behind the
5311 user's back, to avoid races where the user is typing a command to
5312 apply to thread x, but GDB switches to thread y before the user
5313 finishes entering the command. */
5314
5315 /* As with the notification of thread events, we want to delay
5316 notifying the user that we've switched thread context until
5317 the inferior actually stops.
5318
5319 There's no point in saying anything if the inferior has exited.
5320 Note that SIGNALLED here means "exited with a signal", not
5321 "received a signal". */
5322 if (!non_stop
5323 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5324 && target_has_execution
5325 && last.kind != TARGET_WAITKIND_SIGNALLED
5326 && last.kind != TARGET_WAITKIND_EXITED)
5327 {
5328 target_terminal_ours_for_output ();
5329 printf_filtered (_("[Switching to %s]\n"),
5330 target_pid_to_str (inferior_ptid));
5331 annotate_thread_changed ();
5332 previous_inferior_ptid = inferior_ptid;
5333 }
5334
5335 if (!breakpoints_always_inserted_mode () && target_has_execution)
5336 {
5337 if (remove_breakpoints ())
5338 {
5339 target_terminal_ours_for_output ();
5340 printf_filtered (_("\
5341 Cannot remove breakpoints because program is no longer writable.\n\
5342 Further execution is probably impossible.\n"));
5343 }
5344 }
5345
5346 /* If an auto-display called a function and that got a signal,
5347 delete that auto-display to avoid an infinite recursion. */
5348
5349 if (stopped_by_random_signal)
5350 disable_current_display ();
5351
5352 /* Don't print a message if in the middle of doing a "step n"
5353 operation for n > 1 */
5354 if (target_has_execution
5355 && last.kind != TARGET_WAITKIND_SIGNALLED
5356 && last.kind != TARGET_WAITKIND_EXITED
5357 && inferior_thread ()->step_multi
5358 && inferior_thread ()->stop_step)
5359 goto done;
5360
5361 target_terminal_ours ();
5362
5363 /* Set the current source location. This will also happen if we
5364 display the frame below, but the current SAL will be incorrect
5365 during a user hook-stop function. */
5366 if (has_stack_frames () && !stop_stack_dummy)
5367 set_current_sal_from_frame (get_current_frame (), 1);
5368
5369 /* Let the user/frontend see the threads as stopped. */
5370 do_cleanups (old_chain);
5371
5372 /* Look up the hook_stop and run it (CLI internally handles problem
5373 of stop_command's pre-hook not existing). */
5374 if (stop_command)
5375 catch_errors (hook_stop_stub, stop_command,
5376 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5377
5378 if (!has_stack_frames ())
5379 goto done;
5380
5381 if (last.kind == TARGET_WAITKIND_SIGNALLED
5382 || last.kind == TARGET_WAITKIND_EXITED)
5383 goto done;
5384
5385 /* Select innermost stack frame - i.e., current frame is frame 0,
5386 and current location is based on that.
5387 Don't do this on return from a stack dummy routine,
5388 or if the program has exited. */
5389
5390 if (!stop_stack_dummy)
5391 {
5392 select_frame (get_current_frame ());
5393
5394 /* Print current location without a level number, if
5395 we have changed functions or hit a breakpoint.
5396 Print source line if we have one.
5397 bpstat_print() contains the logic deciding in detail
5398 what to print, based on the event(s) that just occurred. */
5399
5400 /* If --batch-silent is enabled then there's no need to print the current
5401 source location, and to try risks causing an error message about
5402 missing source files. */
5403 if (stop_print_frame && !batch_silent)
5404 {
5405 int bpstat_ret;
5406 int source_flag;
5407 int do_frame_printing = 1;
5408 struct thread_info *tp = inferior_thread ();
5409
5410 bpstat_ret = bpstat_print (tp->stop_bpstat);
5411 switch (bpstat_ret)
5412 {
5413 case PRINT_UNKNOWN:
5414 /* If we had hit a shared library event breakpoint,
5415 bpstat_print would print out this message. If we hit
5416 an OS-level shared library event, do the same
5417 thing. */
5418 if (last.kind == TARGET_WAITKIND_LOADED)
5419 {
5420 printf_filtered (_("Stopped due to shared library event\n"));
5421 source_flag = SRC_LINE; /* something bogus */
5422 do_frame_printing = 0;
5423 break;
5424 }
5425
5426 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5427 (or should) carry around the function and does (or
5428 should) use that when doing a frame comparison. */
5429 if (tp->stop_step
5430 && frame_id_eq (tp->step_frame_id,
5431 get_frame_id (get_current_frame ()))
5432 && step_start_function == find_pc_function (stop_pc))
5433 source_flag = SRC_LINE; /* finished step, just print source line */
5434 else
5435 source_flag = SRC_AND_LOC; /* print location and source line */
5436 break;
5437 case PRINT_SRC_AND_LOC:
5438 source_flag = SRC_AND_LOC; /* print location and source line */
5439 break;
5440 case PRINT_SRC_ONLY:
5441 source_flag = SRC_LINE;
5442 break;
5443 case PRINT_NOTHING:
5444 source_flag = SRC_LINE; /* something bogus */
5445 do_frame_printing = 0;
5446 break;
5447 default:
5448 internal_error (__FILE__, __LINE__, _("Unknown value."));
5449 }
5450
5451 /* The behavior of this routine with respect to the source
5452 flag is:
5453 SRC_LINE: Print only source line
5454 LOCATION: Print only location
5455 SRC_AND_LOC: Print location and source line */
5456 if (do_frame_printing)
5457 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5458
5459 /* Display the auto-display expressions. */
5460 do_displays ();
5461 }
5462 }
5463
5464 /* Save the function value return registers, if we care.
5465 We might be about to restore their previous contents. */
5466 if (inferior_thread ()->proceed_to_finish)
5467 {
5468 /* This should not be necessary. */
5469 if (stop_registers)
5470 regcache_xfree (stop_registers);
5471
5472 /* NB: The copy goes through to the target picking up the value of
5473 all the registers. */
5474 stop_registers = regcache_dup (get_current_regcache ());
5475 }
5476
5477 if (stop_stack_dummy == STOP_STACK_DUMMY)
5478 {
5479 /* Pop the empty frame that contains the stack dummy.
5480 This also restores inferior state prior to the call
5481 (struct inferior_thread_state). */
5482 struct frame_info *frame = get_current_frame ();
5483
5484 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5485 frame_pop (frame);
5486 /* frame_pop() calls reinit_frame_cache as the last thing it does
5487 which means there's currently no selected frame. We don't need
5488 to re-establish a selected frame if the dummy call returns normally,
5489 that will be done by restore_inferior_status. However, we do have
5490 to handle the case where the dummy call is returning after being
5491 stopped (e.g. the dummy call previously hit a breakpoint). We
5492 can't know which case we have so just always re-establish a
5493 selected frame here. */
5494 select_frame (get_current_frame ());
5495 }
5496
5497 done:
5498 annotate_stopped ();
5499
5500 /* Suppress the stop observer if we're in the middle of:
5501
5502 - a step n (n > 1), as there still more steps to be done.
5503
5504 - a "finish" command, as the observer will be called in
5505 finish_command_continuation, so it can include the inferior
5506 function's return value.
5507
5508 - calling an inferior function, as we pretend we inferior didn't
5509 run at all. The return value of the call is handled by the
5510 expression evaluator, through call_function_by_hand. */
5511
5512 if (!target_has_execution
5513 || last.kind == TARGET_WAITKIND_SIGNALLED
5514 || last.kind == TARGET_WAITKIND_EXITED
5515 || (!inferior_thread ()->step_multi
5516 && !(inferior_thread ()->stop_bpstat
5517 && inferior_thread ()->proceed_to_finish)
5518 && !inferior_thread ()->in_infcall))
5519 {
5520 if (!ptid_equal (inferior_ptid, null_ptid))
5521 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5522 stop_print_frame);
5523 else
5524 observer_notify_normal_stop (NULL, stop_print_frame);
5525 }
5526
5527 if (target_has_execution)
5528 {
5529 if (last.kind != TARGET_WAITKIND_SIGNALLED
5530 && last.kind != TARGET_WAITKIND_EXITED)
5531 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5532 Delete any breakpoint that is to be deleted at the next stop. */
5533 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5534 }
5535
5536 /* Try to get rid of automatically added inferiors that are no
5537 longer needed. Keeping those around slows down things linearly.
5538 Note that this never removes the current inferior. */
5539 prune_inferiors ();
5540 }
5541
5542 static int
5543 hook_stop_stub (void *cmd)
5544 {
5545 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5546 return (0);
5547 }
5548 \f
5549 int
5550 signal_stop_state (int signo)
5551 {
5552 return signal_stop[signo];
5553 }
5554
5555 int
5556 signal_print_state (int signo)
5557 {
5558 return signal_print[signo];
5559 }
5560
5561 int
5562 signal_pass_state (int signo)
5563 {
5564 return signal_program[signo];
5565 }
5566
5567 int
5568 signal_stop_update (int signo, int state)
5569 {
5570 int ret = signal_stop[signo];
5571
5572 signal_stop[signo] = state;
5573 return ret;
5574 }
5575
5576 int
5577 signal_print_update (int signo, int state)
5578 {
5579 int ret = signal_print[signo];
5580
5581 signal_print[signo] = state;
5582 return ret;
5583 }
5584
5585 int
5586 signal_pass_update (int signo, int state)
5587 {
5588 int ret = signal_program[signo];
5589
5590 signal_program[signo] = state;
5591 return ret;
5592 }
5593
5594 static void
5595 sig_print_header (void)
5596 {
5597 printf_filtered (_("\
5598 Signal Stop\tPrint\tPass to program\tDescription\n"));
5599 }
5600
5601 static void
5602 sig_print_info (enum target_signal oursig)
5603 {
5604 const char *name = target_signal_to_name (oursig);
5605 int name_padding = 13 - strlen (name);
5606
5607 if (name_padding <= 0)
5608 name_padding = 0;
5609
5610 printf_filtered ("%s", name);
5611 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5612 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5613 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5614 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5615 printf_filtered ("%s\n", target_signal_to_string (oursig));
5616 }
5617
5618 /* Specify how various signals in the inferior should be handled. */
5619
5620 static void
5621 handle_command (char *args, int from_tty)
5622 {
5623 char **argv;
5624 int digits, wordlen;
5625 int sigfirst, signum, siglast;
5626 enum target_signal oursig;
5627 int allsigs;
5628 int nsigs;
5629 unsigned char *sigs;
5630 struct cleanup *old_chain;
5631
5632 if (args == NULL)
5633 {
5634 error_no_arg (_("signal to handle"));
5635 }
5636
5637 /* Allocate and zero an array of flags for which signals to handle. */
5638
5639 nsigs = (int) TARGET_SIGNAL_LAST;
5640 sigs = (unsigned char *) alloca (nsigs);
5641 memset (sigs, 0, nsigs);
5642
5643 /* Break the command line up into args. */
5644
5645 argv = gdb_buildargv (args);
5646 old_chain = make_cleanup_freeargv (argv);
5647
5648 /* Walk through the args, looking for signal oursigs, signal names, and
5649 actions. Signal numbers and signal names may be interspersed with
5650 actions, with the actions being performed for all signals cumulatively
5651 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5652
5653 while (*argv != NULL)
5654 {
5655 wordlen = strlen (*argv);
5656 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5657 {;
5658 }
5659 allsigs = 0;
5660 sigfirst = siglast = -1;
5661
5662 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5663 {
5664 /* Apply action to all signals except those used by the
5665 debugger. Silently skip those. */
5666 allsigs = 1;
5667 sigfirst = 0;
5668 siglast = nsigs - 1;
5669 }
5670 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5671 {
5672 SET_SIGS (nsigs, sigs, signal_stop);
5673 SET_SIGS (nsigs, sigs, signal_print);
5674 }
5675 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5676 {
5677 UNSET_SIGS (nsigs, sigs, signal_program);
5678 }
5679 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5680 {
5681 SET_SIGS (nsigs, sigs, signal_print);
5682 }
5683 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5684 {
5685 SET_SIGS (nsigs, sigs, signal_program);
5686 }
5687 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5688 {
5689 UNSET_SIGS (nsigs, sigs, signal_stop);
5690 }
5691 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5692 {
5693 SET_SIGS (nsigs, sigs, signal_program);
5694 }
5695 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5696 {
5697 UNSET_SIGS (nsigs, sigs, signal_print);
5698 UNSET_SIGS (nsigs, sigs, signal_stop);
5699 }
5700 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5701 {
5702 UNSET_SIGS (nsigs, sigs, signal_program);
5703 }
5704 else if (digits > 0)
5705 {
5706 /* It is numeric. The numeric signal refers to our own
5707 internal signal numbering from target.h, not to host/target
5708 signal number. This is a feature; users really should be
5709 using symbolic names anyway, and the common ones like
5710 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5711
5712 sigfirst = siglast = (int)
5713 target_signal_from_command (atoi (*argv));
5714 if ((*argv)[digits] == '-')
5715 {
5716 siglast = (int)
5717 target_signal_from_command (atoi ((*argv) + digits + 1));
5718 }
5719 if (sigfirst > siglast)
5720 {
5721 /* Bet he didn't figure we'd think of this case... */
5722 signum = sigfirst;
5723 sigfirst = siglast;
5724 siglast = signum;
5725 }
5726 }
5727 else
5728 {
5729 oursig = target_signal_from_name (*argv);
5730 if (oursig != TARGET_SIGNAL_UNKNOWN)
5731 {
5732 sigfirst = siglast = (int) oursig;
5733 }
5734 else
5735 {
5736 /* Not a number and not a recognized flag word => complain. */
5737 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5738 }
5739 }
5740
5741 /* If any signal numbers or symbol names were found, set flags for
5742 which signals to apply actions to. */
5743
5744 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5745 {
5746 switch ((enum target_signal) signum)
5747 {
5748 case TARGET_SIGNAL_TRAP:
5749 case TARGET_SIGNAL_INT:
5750 if (!allsigs && !sigs[signum])
5751 {
5752 if (query (_("%s is used by the debugger.\n\
5753 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5754 {
5755 sigs[signum] = 1;
5756 }
5757 else
5758 {
5759 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5760 gdb_flush (gdb_stdout);
5761 }
5762 }
5763 break;
5764 case TARGET_SIGNAL_0:
5765 case TARGET_SIGNAL_DEFAULT:
5766 case TARGET_SIGNAL_UNKNOWN:
5767 /* Make sure that "all" doesn't print these. */
5768 break;
5769 default:
5770 sigs[signum] = 1;
5771 break;
5772 }
5773 }
5774
5775 argv++;
5776 }
5777
5778 for (signum = 0; signum < nsigs; signum++)
5779 if (sigs[signum])
5780 {
5781 target_notice_signals (inferior_ptid);
5782
5783 if (from_tty)
5784 {
5785 /* Show the results. */
5786 sig_print_header ();
5787 for (; signum < nsigs; signum++)
5788 if (sigs[signum])
5789 sig_print_info (signum);
5790 }
5791
5792 break;
5793 }
5794
5795 do_cleanups (old_chain);
5796 }
5797
5798 static void
5799 xdb_handle_command (char *args, int from_tty)
5800 {
5801 char **argv;
5802 struct cleanup *old_chain;
5803
5804 if (args == NULL)
5805 error_no_arg (_("xdb command"));
5806
5807 /* Break the command line up into args. */
5808
5809 argv = gdb_buildargv (args);
5810 old_chain = make_cleanup_freeargv (argv);
5811 if (argv[1] != (char *) NULL)
5812 {
5813 char *argBuf;
5814 int bufLen;
5815
5816 bufLen = strlen (argv[0]) + 20;
5817 argBuf = (char *) xmalloc (bufLen);
5818 if (argBuf)
5819 {
5820 int validFlag = 1;
5821 enum target_signal oursig;
5822
5823 oursig = target_signal_from_name (argv[0]);
5824 memset (argBuf, 0, bufLen);
5825 if (strcmp (argv[1], "Q") == 0)
5826 sprintf (argBuf, "%s %s", argv[0], "noprint");
5827 else
5828 {
5829 if (strcmp (argv[1], "s") == 0)
5830 {
5831 if (!signal_stop[oursig])
5832 sprintf (argBuf, "%s %s", argv[0], "stop");
5833 else
5834 sprintf (argBuf, "%s %s", argv[0], "nostop");
5835 }
5836 else if (strcmp (argv[1], "i") == 0)
5837 {
5838 if (!signal_program[oursig])
5839 sprintf (argBuf, "%s %s", argv[0], "pass");
5840 else
5841 sprintf (argBuf, "%s %s", argv[0], "nopass");
5842 }
5843 else if (strcmp (argv[1], "r") == 0)
5844 {
5845 if (!signal_print[oursig])
5846 sprintf (argBuf, "%s %s", argv[0], "print");
5847 else
5848 sprintf (argBuf, "%s %s", argv[0], "noprint");
5849 }
5850 else
5851 validFlag = 0;
5852 }
5853 if (validFlag)
5854 handle_command (argBuf, from_tty);
5855 else
5856 printf_filtered (_("Invalid signal handling flag.\n"));
5857 if (argBuf)
5858 xfree (argBuf);
5859 }
5860 }
5861 do_cleanups (old_chain);
5862 }
5863
5864 /* Print current contents of the tables set by the handle command.
5865 It is possible we should just be printing signals actually used
5866 by the current target (but for things to work right when switching
5867 targets, all signals should be in the signal tables). */
5868
5869 static void
5870 signals_info (char *signum_exp, int from_tty)
5871 {
5872 enum target_signal oursig;
5873
5874 sig_print_header ();
5875
5876 if (signum_exp)
5877 {
5878 /* First see if this is a symbol name. */
5879 oursig = target_signal_from_name (signum_exp);
5880 if (oursig == TARGET_SIGNAL_UNKNOWN)
5881 {
5882 /* No, try numeric. */
5883 oursig =
5884 target_signal_from_command (parse_and_eval_long (signum_exp));
5885 }
5886 sig_print_info (oursig);
5887 return;
5888 }
5889
5890 printf_filtered ("\n");
5891 /* These ugly casts brought to you by the native VAX compiler. */
5892 for (oursig = TARGET_SIGNAL_FIRST;
5893 (int) oursig < (int) TARGET_SIGNAL_LAST;
5894 oursig = (enum target_signal) ((int) oursig + 1))
5895 {
5896 QUIT;
5897
5898 if (oursig != TARGET_SIGNAL_UNKNOWN
5899 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5900 sig_print_info (oursig);
5901 }
5902
5903 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5904 }
5905
5906 /* The $_siginfo convenience variable is a bit special. We don't know
5907 for sure the type of the value until we actually have a chance to
5908 fetch the data. The type can change depending on gdbarch, so it it
5909 also dependent on which thread you have selected.
5910
5911 1. making $_siginfo be an internalvar that creates a new value on
5912 access.
5913
5914 2. making the value of $_siginfo be an lval_computed value. */
5915
5916 /* This function implements the lval_computed support for reading a
5917 $_siginfo value. */
5918
5919 static void
5920 siginfo_value_read (struct value *v)
5921 {
5922 LONGEST transferred;
5923
5924 transferred =
5925 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5926 NULL,
5927 value_contents_all_raw (v),
5928 value_offset (v),
5929 TYPE_LENGTH (value_type (v)));
5930
5931 if (transferred != TYPE_LENGTH (value_type (v)))
5932 error (_("Unable to read siginfo"));
5933 }
5934
5935 /* This function implements the lval_computed support for writing a
5936 $_siginfo value. */
5937
5938 static void
5939 siginfo_value_write (struct value *v, struct value *fromval)
5940 {
5941 LONGEST transferred;
5942
5943 transferred = target_write (&current_target,
5944 TARGET_OBJECT_SIGNAL_INFO,
5945 NULL,
5946 value_contents_all_raw (fromval),
5947 value_offset (v),
5948 TYPE_LENGTH (value_type (fromval)));
5949
5950 if (transferred != TYPE_LENGTH (value_type (fromval)))
5951 error (_("Unable to write siginfo"));
5952 }
5953
5954 static struct lval_funcs siginfo_value_funcs =
5955 {
5956 siginfo_value_read,
5957 siginfo_value_write
5958 };
5959
5960 /* Return a new value with the correct type for the siginfo object of
5961 the current thread using architecture GDBARCH. Return a void value
5962 if there's no object available. */
5963
5964 static struct value *
5965 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5966 {
5967 if (target_has_stack
5968 && !ptid_equal (inferior_ptid, null_ptid)
5969 && gdbarch_get_siginfo_type_p (gdbarch))
5970 {
5971 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5972
5973 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5974 }
5975
5976 return allocate_value (builtin_type (gdbarch)->builtin_void);
5977 }
5978
5979 \f
5980 /* Inferior thread state.
5981 These are details related to the inferior itself, and don't include
5982 things like what frame the user had selected or what gdb was doing
5983 with the target at the time.
5984 For inferior function calls these are things we want to restore
5985 regardless of whether the function call successfully completes
5986 or the dummy frame has to be manually popped. */
5987
5988 struct inferior_thread_state
5989 {
5990 enum target_signal stop_signal;
5991 CORE_ADDR stop_pc;
5992 struct regcache *registers;
5993 };
5994
5995 struct inferior_thread_state *
5996 save_inferior_thread_state (void)
5997 {
5998 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5999 struct thread_info *tp = inferior_thread ();
6000
6001 inf_state->stop_signal = tp->stop_signal;
6002 inf_state->stop_pc = stop_pc;
6003
6004 inf_state->registers = regcache_dup (get_current_regcache ());
6005
6006 return inf_state;
6007 }
6008
6009 /* Restore inferior session state to INF_STATE. */
6010
6011 void
6012 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6013 {
6014 struct thread_info *tp = inferior_thread ();
6015
6016 tp->stop_signal = inf_state->stop_signal;
6017 stop_pc = inf_state->stop_pc;
6018
6019 /* The inferior can be gone if the user types "print exit(0)"
6020 (and perhaps other times). */
6021 if (target_has_execution)
6022 /* NB: The register write goes through to the target. */
6023 regcache_cpy (get_current_regcache (), inf_state->registers);
6024 regcache_xfree (inf_state->registers);
6025 xfree (inf_state);
6026 }
6027
6028 static void
6029 do_restore_inferior_thread_state_cleanup (void *state)
6030 {
6031 restore_inferior_thread_state (state);
6032 }
6033
6034 struct cleanup *
6035 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6036 {
6037 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6038 }
6039
6040 void
6041 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6042 {
6043 regcache_xfree (inf_state->registers);
6044 xfree (inf_state);
6045 }
6046
6047 struct regcache *
6048 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6049 {
6050 return inf_state->registers;
6051 }
6052
6053 /* Session related state for inferior function calls.
6054 These are the additional bits of state that need to be restored
6055 when an inferior function call successfully completes. */
6056
6057 struct inferior_status
6058 {
6059 bpstat stop_bpstat;
6060 int stop_step;
6061 enum stop_stack_kind stop_stack_dummy;
6062 int stopped_by_random_signal;
6063 int stepping_over_breakpoint;
6064 CORE_ADDR step_range_start;
6065 CORE_ADDR step_range_end;
6066 struct frame_id step_frame_id;
6067 struct frame_id step_stack_frame_id;
6068 enum step_over_calls_kind step_over_calls;
6069 CORE_ADDR step_resume_break_address;
6070 int stop_after_trap;
6071 int stop_soon;
6072
6073 /* ID if the selected frame when the inferior function call was made. */
6074 struct frame_id selected_frame_id;
6075
6076 int proceed_to_finish;
6077 int in_infcall;
6078 };
6079
6080 /* Save all of the information associated with the inferior<==>gdb
6081 connection. */
6082
6083 struct inferior_status *
6084 save_inferior_status (void)
6085 {
6086 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6087 struct thread_info *tp = inferior_thread ();
6088 struct inferior *inf = current_inferior ();
6089
6090 inf_status->stop_step = tp->stop_step;
6091 inf_status->stop_stack_dummy = stop_stack_dummy;
6092 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6093 inf_status->stepping_over_breakpoint = tp->trap_expected;
6094 inf_status->step_range_start = tp->step_range_start;
6095 inf_status->step_range_end = tp->step_range_end;
6096 inf_status->step_frame_id = tp->step_frame_id;
6097 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6098 inf_status->step_over_calls = tp->step_over_calls;
6099 inf_status->stop_after_trap = stop_after_trap;
6100 inf_status->stop_soon = inf->stop_soon;
6101 /* Save original bpstat chain here; replace it with copy of chain.
6102 If caller's caller is walking the chain, they'll be happier if we
6103 hand them back the original chain when restore_inferior_status is
6104 called. */
6105 inf_status->stop_bpstat = tp->stop_bpstat;
6106 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6107 inf_status->proceed_to_finish = tp->proceed_to_finish;
6108 inf_status->in_infcall = tp->in_infcall;
6109
6110 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6111
6112 return inf_status;
6113 }
6114
6115 static int
6116 restore_selected_frame (void *args)
6117 {
6118 struct frame_id *fid = (struct frame_id *) args;
6119 struct frame_info *frame;
6120
6121 frame = frame_find_by_id (*fid);
6122
6123 /* If inf_status->selected_frame_id is NULL, there was no previously
6124 selected frame. */
6125 if (frame == NULL)
6126 {
6127 warning (_("Unable to restore previously selected frame."));
6128 return 0;
6129 }
6130
6131 select_frame (frame);
6132
6133 return (1);
6134 }
6135
6136 /* Restore inferior session state to INF_STATUS. */
6137
6138 void
6139 restore_inferior_status (struct inferior_status *inf_status)
6140 {
6141 struct thread_info *tp = inferior_thread ();
6142 struct inferior *inf = current_inferior ();
6143
6144 tp->stop_step = inf_status->stop_step;
6145 stop_stack_dummy = inf_status->stop_stack_dummy;
6146 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6147 tp->trap_expected = inf_status->stepping_over_breakpoint;
6148 tp->step_range_start = inf_status->step_range_start;
6149 tp->step_range_end = inf_status->step_range_end;
6150 tp->step_frame_id = inf_status->step_frame_id;
6151 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6152 tp->step_over_calls = inf_status->step_over_calls;
6153 stop_after_trap = inf_status->stop_after_trap;
6154 inf->stop_soon = inf_status->stop_soon;
6155 bpstat_clear (&tp->stop_bpstat);
6156 tp->stop_bpstat = inf_status->stop_bpstat;
6157 inf_status->stop_bpstat = NULL;
6158 tp->proceed_to_finish = inf_status->proceed_to_finish;
6159 tp->in_infcall = inf_status->in_infcall;
6160
6161 if (target_has_stack)
6162 {
6163 /* The point of catch_errors is that if the stack is clobbered,
6164 walking the stack might encounter a garbage pointer and
6165 error() trying to dereference it. */
6166 if (catch_errors
6167 (restore_selected_frame, &inf_status->selected_frame_id,
6168 "Unable to restore previously selected frame:\n",
6169 RETURN_MASK_ERROR) == 0)
6170 /* Error in restoring the selected frame. Select the innermost
6171 frame. */
6172 select_frame (get_current_frame ());
6173 }
6174
6175 xfree (inf_status);
6176 }
6177
6178 static void
6179 do_restore_inferior_status_cleanup (void *sts)
6180 {
6181 restore_inferior_status (sts);
6182 }
6183
6184 struct cleanup *
6185 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6186 {
6187 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6188 }
6189
6190 void
6191 discard_inferior_status (struct inferior_status *inf_status)
6192 {
6193 /* See save_inferior_status for info on stop_bpstat. */
6194 bpstat_clear (&inf_status->stop_bpstat);
6195 xfree (inf_status);
6196 }
6197 \f
6198 int
6199 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6200 {
6201 struct target_waitstatus last;
6202 ptid_t last_ptid;
6203
6204 get_last_target_status (&last_ptid, &last);
6205
6206 if (last.kind != TARGET_WAITKIND_FORKED)
6207 return 0;
6208
6209 if (!ptid_equal (last_ptid, pid))
6210 return 0;
6211
6212 *child_pid = last.value.related_pid;
6213 return 1;
6214 }
6215
6216 int
6217 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6218 {
6219 struct target_waitstatus last;
6220 ptid_t last_ptid;
6221
6222 get_last_target_status (&last_ptid, &last);
6223
6224 if (last.kind != TARGET_WAITKIND_VFORKED)
6225 return 0;
6226
6227 if (!ptid_equal (last_ptid, pid))
6228 return 0;
6229
6230 *child_pid = last.value.related_pid;
6231 return 1;
6232 }
6233
6234 int
6235 inferior_has_execd (ptid_t pid, char **execd_pathname)
6236 {
6237 struct target_waitstatus last;
6238 ptid_t last_ptid;
6239
6240 get_last_target_status (&last_ptid, &last);
6241
6242 if (last.kind != TARGET_WAITKIND_EXECD)
6243 return 0;
6244
6245 if (!ptid_equal (last_ptid, pid))
6246 return 0;
6247
6248 *execd_pathname = xstrdup (last.value.execd_pathname);
6249 return 1;
6250 }
6251
6252 int
6253 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6254 {
6255 struct target_waitstatus last;
6256 ptid_t last_ptid;
6257
6258 get_last_target_status (&last_ptid, &last);
6259
6260 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6261 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6262 return 0;
6263
6264 if (!ptid_equal (last_ptid, pid))
6265 return 0;
6266
6267 *syscall_number = last.value.syscall_number;
6268 return 1;
6269 }
6270
6271 /* Oft used ptids */
6272 ptid_t null_ptid;
6273 ptid_t minus_one_ptid;
6274
6275 /* Create a ptid given the necessary PID, LWP, and TID components. */
6276
6277 ptid_t
6278 ptid_build (int pid, long lwp, long tid)
6279 {
6280 ptid_t ptid;
6281
6282 ptid.pid = pid;
6283 ptid.lwp = lwp;
6284 ptid.tid = tid;
6285 return ptid;
6286 }
6287
6288 /* Create a ptid from just a pid. */
6289
6290 ptid_t
6291 pid_to_ptid (int pid)
6292 {
6293 return ptid_build (pid, 0, 0);
6294 }
6295
6296 /* Fetch the pid (process id) component from a ptid. */
6297
6298 int
6299 ptid_get_pid (ptid_t ptid)
6300 {
6301 return ptid.pid;
6302 }
6303
6304 /* Fetch the lwp (lightweight process) component from a ptid. */
6305
6306 long
6307 ptid_get_lwp (ptid_t ptid)
6308 {
6309 return ptid.lwp;
6310 }
6311
6312 /* Fetch the tid (thread id) component from a ptid. */
6313
6314 long
6315 ptid_get_tid (ptid_t ptid)
6316 {
6317 return ptid.tid;
6318 }
6319
6320 /* ptid_equal() is used to test equality of two ptids. */
6321
6322 int
6323 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6324 {
6325 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6326 && ptid1.tid == ptid2.tid);
6327 }
6328
6329 /* Returns true if PTID represents a process. */
6330
6331 int
6332 ptid_is_pid (ptid_t ptid)
6333 {
6334 if (ptid_equal (minus_one_ptid, ptid))
6335 return 0;
6336 if (ptid_equal (null_ptid, ptid))
6337 return 0;
6338
6339 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6340 }
6341
6342 int
6343 ptid_match (ptid_t ptid, ptid_t filter)
6344 {
6345 /* Since both parameters have the same type, prevent easy mistakes
6346 from happening. */
6347 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6348 && !ptid_equal (ptid, null_ptid));
6349
6350 if (ptid_equal (filter, minus_one_ptid))
6351 return 1;
6352 if (ptid_is_pid (filter)
6353 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6354 return 1;
6355 else if (ptid_equal (ptid, filter))
6356 return 1;
6357
6358 return 0;
6359 }
6360
6361 /* restore_inferior_ptid() will be used by the cleanup machinery
6362 to restore the inferior_ptid value saved in a call to
6363 save_inferior_ptid(). */
6364
6365 static void
6366 restore_inferior_ptid (void *arg)
6367 {
6368 ptid_t *saved_ptid_ptr = arg;
6369
6370 inferior_ptid = *saved_ptid_ptr;
6371 xfree (arg);
6372 }
6373
6374 /* Save the value of inferior_ptid so that it may be restored by a
6375 later call to do_cleanups(). Returns the struct cleanup pointer
6376 needed for later doing the cleanup. */
6377
6378 struct cleanup *
6379 save_inferior_ptid (void)
6380 {
6381 ptid_t *saved_ptid_ptr;
6382
6383 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6384 *saved_ptid_ptr = inferior_ptid;
6385 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6386 }
6387 \f
6388
6389 /* User interface for reverse debugging:
6390 Set exec-direction / show exec-direction commands
6391 (returns error unless target implements to_set_exec_direction method). */
6392
6393 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6394 static const char exec_forward[] = "forward";
6395 static const char exec_reverse[] = "reverse";
6396 static const char *exec_direction = exec_forward;
6397 static const char *exec_direction_names[] = {
6398 exec_forward,
6399 exec_reverse,
6400 NULL
6401 };
6402
6403 static void
6404 set_exec_direction_func (char *args, int from_tty,
6405 struct cmd_list_element *cmd)
6406 {
6407 if (target_can_execute_reverse)
6408 {
6409 if (!strcmp (exec_direction, exec_forward))
6410 execution_direction = EXEC_FORWARD;
6411 else if (!strcmp (exec_direction, exec_reverse))
6412 execution_direction = EXEC_REVERSE;
6413 }
6414 }
6415
6416 static void
6417 show_exec_direction_func (struct ui_file *out, int from_tty,
6418 struct cmd_list_element *cmd, const char *value)
6419 {
6420 switch (execution_direction) {
6421 case EXEC_FORWARD:
6422 fprintf_filtered (out, _("Forward.\n"));
6423 break;
6424 case EXEC_REVERSE:
6425 fprintf_filtered (out, _("Reverse.\n"));
6426 break;
6427 case EXEC_ERROR:
6428 default:
6429 fprintf_filtered (out,
6430 _("Forward (target `%s' does not support exec-direction).\n"),
6431 target_shortname);
6432 break;
6433 }
6434 }
6435
6436 /* User interface for non-stop mode. */
6437
6438 int non_stop = 0;
6439 static int non_stop_1 = 0;
6440
6441 static void
6442 set_non_stop (char *args, int from_tty,
6443 struct cmd_list_element *c)
6444 {
6445 if (target_has_execution)
6446 {
6447 non_stop_1 = non_stop;
6448 error (_("Cannot change this setting while the inferior is running."));
6449 }
6450
6451 non_stop = non_stop_1;
6452 }
6453
6454 static void
6455 show_non_stop (struct ui_file *file, int from_tty,
6456 struct cmd_list_element *c, const char *value)
6457 {
6458 fprintf_filtered (file,
6459 _("Controlling the inferior in non-stop mode is %s.\n"),
6460 value);
6461 }
6462
6463 static void
6464 show_schedule_multiple (struct ui_file *file, int from_tty,
6465 struct cmd_list_element *c, const char *value)
6466 {
6467 fprintf_filtered (file, _("\
6468 Resuming the execution of threads of all processes is %s.\n"), value);
6469 }
6470
6471 void
6472 _initialize_infrun (void)
6473 {
6474 int i;
6475 int numsigs;
6476
6477 add_info ("signals", signals_info, _("\
6478 What debugger does when program gets various signals.\n\
6479 Specify a signal as argument to print info on that signal only."));
6480 add_info_alias ("handle", "signals", 0);
6481
6482 add_com ("handle", class_run, handle_command, _("\
6483 Specify how to handle a signal.\n\
6484 Args are signals and actions to apply to those signals.\n\
6485 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6486 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6487 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6488 The special arg \"all\" is recognized to mean all signals except those\n\
6489 used by the debugger, typically SIGTRAP and SIGINT.\n\
6490 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6491 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6492 Stop means reenter debugger if this signal happens (implies print).\n\
6493 Print means print a message if this signal happens.\n\
6494 Pass means let program see this signal; otherwise program doesn't know.\n\
6495 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6496 Pass and Stop may be combined."));
6497 if (xdb_commands)
6498 {
6499 add_com ("lz", class_info, signals_info, _("\
6500 What debugger does when program gets various signals.\n\
6501 Specify a signal as argument to print info on that signal only."));
6502 add_com ("z", class_run, xdb_handle_command, _("\
6503 Specify how to handle a signal.\n\
6504 Args are signals and actions to apply to those signals.\n\
6505 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6506 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6507 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6508 The special arg \"all\" is recognized to mean all signals except those\n\
6509 used by the debugger, typically SIGTRAP and SIGINT.\n\
6510 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6511 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6512 nopass), \"Q\" (noprint)\n\
6513 Stop means reenter debugger if this signal happens (implies print).\n\
6514 Print means print a message if this signal happens.\n\
6515 Pass means let program see this signal; otherwise program doesn't know.\n\
6516 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6517 Pass and Stop may be combined."));
6518 }
6519
6520 if (!dbx_commands)
6521 stop_command = add_cmd ("stop", class_obscure,
6522 not_just_help_class_command, _("\
6523 There is no `stop' command, but you can set a hook on `stop'.\n\
6524 This allows you to set a list of commands to be run each time execution\n\
6525 of the program stops."), &cmdlist);
6526
6527 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6528 Set inferior debugging."), _("\
6529 Show inferior debugging."), _("\
6530 When non-zero, inferior specific debugging is enabled."),
6531 NULL,
6532 show_debug_infrun,
6533 &setdebuglist, &showdebuglist);
6534
6535 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6536 Set displaced stepping debugging."), _("\
6537 Show displaced stepping debugging."), _("\
6538 When non-zero, displaced stepping specific debugging is enabled."),
6539 NULL,
6540 show_debug_displaced,
6541 &setdebuglist, &showdebuglist);
6542
6543 add_setshow_boolean_cmd ("non-stop", no_class,
6544 &non_stop_1, _("\
6545 Set whether gdb controls the inferior in non-stop mode."), _("\
6546 Show whether gdb controls the inferior in non-stop mode."), _("\
6547 When debugging a multi-threaded program and this setting is\n\
6548 off (the default, also called all-stop mode), when one thread stops\n\
6549 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6550 all other threads in the program while you interact with the thread of\n\
6551 interest. When you continue or step a thread, you can allow the other\n\
6552 threads to run, or have them remain stopped, but while you inspect any\n\
6553 thread's state, all threads stop.\n\
6554 \n\
6555 In non-stop mode, when one thread stops, other threads can continue\n\
6556 to run freely. You'll be able to step each thread independently,\n\
6557 leave it stopped or free to run as needed."),
6558 set_non_stop,
6559 show_non_stop,
6560 &setlist,
6561 &showlist);
6562
6563 numsigs = (int) TARGET_SIGNAL_LAST;
6564 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6565 signal_print = (unsigned char *)
6566 xmalloc (sizeof (signal_print[0]) * numsigs);
6567 signal_program = (unsigned char *)
6568 xmalloc (sizeof (signal_program[0]) * numsigs);
6569 for (i = 0; i < numsigs; i++)
6570 {
6571 signal_stop[i] = 1;
6572 signal_print[i] = 1;
6573 signal_program[i] = 1;
6574 }
6575
6576 /* Signals caused by debugger's own actions
6577 should not be given to the program afterwards. */
6578 signal_program[TARGET_SIGNAL_TRAP] = 0;
6579 signal_program[TARGET_SIGNAL_INT] = 0;
6580
6581 /* Signals that are not errors should not normally enter the debugger. */
6582 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6583 signal_print[TARGET_SIGNAL_ALRM] = 0;
6584 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6585 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6586 signal_stop[TARGET_SIGNAL_PROF] = 0;
6587 signal_print[TARGET_SIGNAL_PROF] = 0;
6588 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6589 signal_print[TARGET_SIGNAL_CHLD] = 0;
6590 signal_stop[TARGET_SIGNAL_IO] = 0;
6591 signal_print[TARGET_SIGNAL_IO] = 0;
6592 signal_stop[TARGET_SIGNAL_POLL] = 0;
6593 signal_print[TARGET_SIGNAL_POLL] = 0;
6594 signal_stop[TARGET_SIGNAL_URG] = 0;
6595 signal_print[TARGET_SIGNAL_URG] = 0;
6596 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6597 signal_print[TARGET_SIGNAL_WINCH] = 0;
6598
6599 /* These signals are used internally by user-level thread
6600 implementations. (See signal(5) on Solaris.) Like the above
6601 signals, a healthy program receives and handles them as part of
6602 its normal operation. */
6603 signal_stop[TARGET_SIGNAL_LWP] = 0;
6604 signal_print[TARGET_SIGNAL_LWP] = 0;
6605 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6606 signal_print[TARGET_SIGNAL_WAITING] = 0;
6607 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6608 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6609
6610 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6611 &stop_on_solib_events, _("\
6612 Set stopping for shared library events."), _("\
6613 Show stopping for shared library events."), _("\
6614 If nonzero, gdb will give control to the user when the dynamic linker\n\
6615 notifies gdb of shared library events. The most common event of interest\n\
6616 to the user would be loading/unloading of a new library."),
6617 NULL,
6618 show_stop_on_solib_events,
6619 &setlist, &showlist);
6620
6621 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6622 follow_fork_mode_kind_names,
6623 &follow_fork_mode_string, _("\
6624 Set debugger response to a program call of fork or vfork."), _("\
6625 Show debugger response to a program call of fork or vfork."), _("\
6626 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6627 parent - the original process is debugged after a fork\n\
6628 child - the new process is debugged after a fork\n\
6629 The unfollowed process will continue to run.\n\
6630 By default, the debugger will follow the parent process."),
6631 NULL,
6632 show_follow_fork_mode_string,
6633 &setlist, &showlist);
6634
6635 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6636 follow_exec_mode_names,
6637 &follow_exec_mode_string, _("\
6638 Set debugger response to a program call of exec."), _("\
6639 Show debugger response to a program call of exec."), _("\
6640 An exec call replaces the program image of a process.\n\
6641 \n\
6642 follow-exec-mode can be:\n\
6643 \n\
6644 new - the debugger creates a new inferior and rebinds the process \n\
6645 to this new inferior. The program the process was running before\n\
6646 the exec call can be restarted afterwards by restarting the original\n\
6647 inferior.\n\
6648 \n\
6649 same - the debugger keeps the process bound to the same inferior.\n\
6650 The new executable image replaces the previous executable loaded in\n\
6651 the inferior. Restarting the inferior after the exec call restarts\n\
6652 the executable the process was running after the exec call.\n\
6653 \n\
6654 By default, the debugger will use the same inferior."),
6655 NULL,
6656 show_follow_exec_mode_string,
6657 &setlist, &showlist);
6658
6659 add_setshow_enum_cmd ("scheduler-locking", class_run,
6660 scheduler_enums, &scheduler_mode, _("\
6661 Set mode for locking scheduler during execution."), _("\
6662 Show mode for locking scheduler during execution."), _("\
6663 off == no locking (threads may preempt at any time)\n\
6664 on == full locking (no thread except the current thread may run)\n\
6665 step == scheduler locked during every single-step operation.\n\
6666 In this mode, no other thread may run during a step command.\n\
6667 Other threads may run while stepping over a function call ('next')."),
6668 set_schedlock_func, /* traps on target vector */
6669 show_scheduler_mode,
6670 &setlist, &showlist);
6671
6672 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6673 Set mode for resuming threads of all processes."), _("\
6674 Show mode for resuming threads of all processes."), _("\
6675 When on, execution commands (such as 'continue' or 'next') resume all\n\
6676 threads of all processes. When off (which is the default), execution\n\
6677 commands only resume the threads of the current process. The set of\n\
6678 threads that are resumed is further refined by the scheduler-locking\n\
6679 mode (see help set scheduler-locking)."),
6680 NULL,
6681 show_schedule_multiple,
6682 &setlist, &showlist);
6683
6684 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6685 Set mode of the step operation."), _("\
6686 Show mode of the step operation."), _("\
6687 When set, doing a step over a function without debug line information\n\
6688 will stop at the first instruction of that function. Otherwise, the\n\
6689 function is skipped and the step command stops at a different source line."),
6690 NULL,
6691 show_step_stop_if_no_debug,
6692 &setlist, &showlist);
6693
6694 add_setshow_enum_cmd ("displaced-stepping", class_run,
6695 can_use_displaced_stepping_enum,
6696 &can_use_displaced_stepping, _("\
6697 Set debugger's willingness to use displaced stepping."), _("\
6698 Show debugger's willingness to use displaced stepping."), _("\
6699 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6700 supported by the target architecture. If off, gdb will not use displaced\n\
6701 stepping to step over breakpoints, even if such is supported by the target\n\
6702 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6703 if the target architecture supports it and non-stop mode is active, but will not\n\
6704 use it in all-stop mode (see help set non-stop)."),
6705 NULL,
6706 show_can_use_displaced_stepping,
6707 &setlist, &showlist);
6708
6709 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6710 &exec_direction, _("Set direction of execution.\n\
6711 Options are 'forward' or 'reverse'."),
6712 _("Show direction of execution (forward/reverse)."),
6713 _("Tells gdb whether to execute forward or backward."),
6714 set_exec_direction_func, show_exec_direction_func,
6715 &setlist, &showlist);
6716
6717 /* Set/show detach-on-fork: user-settable mode. */
6718
6719 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6720 Set whether gdb will detach the child of a fork."), _("\
6721 Show whether gdb will detach the child of a fork."), _("\
6722 Tells gdb whether to detach the child of a fork."),
6723 NULL, NULL, &setlist, &showlist);
6724
6725 /* ptid initializations */
6726 null_ptid = ptid_build (0, 0, 0);
6727 minus_one_ptid = ptid_build (-1, 0, 0);
6728 inferior_ptid = null_ptid;
6729 target_last_wait_ptid = minus_one_ptid;
6730
6731 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6732 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6733 observer_attach_thread_exit (infrun_thread_thread_exit);
6734 observer_attach_inferior_exit (infrun_inferior_exit);
6735
6736 /* Explicitly create without lookup, since that tries to create a
6737 value with a void typed value, and when we get here, gdbarch
6738 isn't initialized yet. At this point, we're quite sure there
6739 isn't another convenience variable of the same name. */
6740 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6741 }
This page took 0.265871 seconds and 4 git commands to generate.