2010-05-06 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Tables of how to react to signals; the user sets them. */
183
184 static unsigned char *signal_stop;
185 static unsigned char *signal_print;
186 static unsigned char *signal_program;
187
188 #define SET_SIGS(nsigs,sigs,flags) \
189 do { \
190 int signum = (nsigs); \
191 while (signum-- > 0) \
192 if ((sigs)[signum]) \
193 (flags)[signum] = 1; \
194 } while (0)
195
196 #define UNSET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 0; \
202 } while (0)
203
204 /* Value to pass to target_resume() to cause all threads to resume */
205
206 #define RESUME_ALL minus_one_ptid
207
208 /* Command list pointer for the "stop" placeholder. */
209
210 static struct cmd_list_element *stop_command;
211
212 /* Function inferior was in as of last step command. */
213
214 static struct symbol *step_start_function;
215
216 /* Nonzero if we want to give control to the user when we're notified
217 of shared library events by the dynamic linker. */
218 static int stop_on_solib_events;
219 static void
220 show_stop_on_solib_events (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
224 value);
225 }
226
227 /* Nonzero means expecting a trace trap
228 and should stop the inferior and return silently when it happens. */
229
230 int stop_after_trap;
231
232 /* Save register contents here when executing a "finish" command or are
233 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
234 Thus this contains the return value from the called function (assuming
235 values are returned in a register). */
236
237 struct regcache *stop_registers;
238
239 /* Nonzero after stop if current stack frame should be printed. */
240
241 static int stop_print_frame;
242
243 /* This is a cached copy of the pid/waitstatus of the last event
244 returned by target_wait()/deprecated_target_wait_hook(). This
245 information is returned by get_last_target_status(). */
246 static ptid_t target_last_wait_ptid;
247 static struct target_waitstatus target_last_waitstatus;
248
249 static void context_switch (ptid_t ptid);
250
251 void init_thread_stepping_state (struct thread_info *tss);
252
253 void init_infwait_state (void);
254
255 static const char follow_fork_mode_child[] = "child";
256 static const char follow_fork_mode_parent[] = "parent";
257
258 static const char *follow_fork_mode_kind_names[] = {
259 follow_fork_mode_child,
260 follow_fork_mode_parent,
261 NULL
262 };
263
264 static const char *follow_fork_mode_string = follow_fork_mode_parent;
265 static void
266 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
267 struct cmd_list_element *c, const char *value)
268 {
269 fprintf_filtered (file, _("\
270 Debugger response to a program call of fork or vfork is \"%s\".\n"),
271 value);
272 }
273 \f
274
275 /* Tell the target to follow the fork we're stopped at. Returns true
276 if the inferior should be resumed; false, if the target for some
277 reason decided it's best not to resume. */
278
279 static int
280 follow_fork (void)
281 {
282 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
283 int should_resume = 1;
284 struct thread_info *tp;
285
286 /* Copy user stepping state to the new inferior thread. FIXME: the
287 followed fork child thread should have a copy of most of the
288 parent thread structure's run control related fields, not just these.
289 Initialized to avoid "may be used uninitialized" warnings from gcc. */
290 struct breakpoint *step_resume_breakpoint = NULL;
291 CORE_ADDR step_range_start = 0;
292 CORE_ADDR step_range_end = 0;
293 struct frame_id step_frame_id = { 0 };
294
295 if (!non_stop)
296 {
297 ptid_t wait_ptid;
298 struct target_waitstatus wait_status;
299
300 /* Get the last target status returned by target_wait(). */
301 get_last_target_status (&wait_ptid, &wait_status);
302
303 /* If not stopped at a fork event, then there's nothing else to
304 do. */
305 if (wait_status.kind != TARGET_WAITKIND_FORKED
306 && wait_status.kind != TARGET_WAITKIND_VFORKED)
307 return 1;
308
309 /* Check if we switched over from WAIT_PTID, since the event was
310 reported. */
311 if (!ptid_equal (wait_ptid, minus_one_ptid)
312 && !ptid_equal (inferior_ptid, wait_ptid))
313 {
314 /* We did. Switch back to WAIT_PTID thread, to tell the
315 target to follow it (in either direction). We'll
316 afterwards refuse to resume, and inform the user what
317 happened. */
318 switch_to_thread (wait_ptid);
319 should_resume = 0;
320 }
321 }
322
323 tp = inferior_thread ();
324
325 /* If there were any forks/vforks that were caught and are now to be
326 followed, then do so now. */
327 switch (tp->pending_follow.kind)
328 {
329 case TARGET_WAITKIND_FORKED:
330 case TARGET_WAITKIND_VFORKED:
331 {
332 ptid_t parent, child;
333
334 /* If the user did a next/step, etc, over a fork call,
335 preserve the stepping state in the fork child. */
336 if (follow_child && should_resume)
337 {
338 step_resume_breakpoint
339 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
340 step_range_start = tp->step_range_start;
341 step_range_end = tp->step_range_end;
342 step_frame_id = tp->step_frame_id;
343
344 /* For now, delete the parent's sr breakpoint, otherwise,
345 parent/child sr breakpoints are considered duplicates,
346 and the child version will not be installed. Remove
347 this when the breakpoints module becomes aware of
348 inferiors and address spaces. */
349 delete_step_resume_breakpoint (tp);
350 tp->step_range_start = 0;
351 tp->step_range_end = 0;
352 tp->step_frame_id = null_frame_id;
353 }
354
355 parent = inferior_ptid;
356 child = tp->pending_follow.value.related_pid;
357
358 /* Tell the target to do whatever is necessary to follow
359 either parent or child. */
360 if (target_follow_fork (follow_child))
361 {
362 /* Target refused to follow, or there's some other reason
363 we shouldn't resume. */
364 should_resume = 0;
365 }
366 else
367 {
368 /* This pending follow fork event is now handled, one way
369 or another. The previous selected thread may be gone
370 from the lists by now, but if it is still around, need
371 to clear the pending follow request. */
372 tp = find_thread_ptid (parent);
373 if (tp)
374 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
375
376 /* This makes sure we don't try to apply the "Switched
377 over from WAIT_PID" logic above. */
378 nullify_last_target_wait_ptid ();
379
380 /* If we followed the child, switch to it... */
381 if (follow_child)
382 {
383 switch_to_thread (child);
384
385 /* ... and preserve the stepping state, in case the
386 user was stepping over the fork call. */
387 if (should_resume)
388 {
389 tp = inferior_thread ();
390 tp->step_resume_breakpoint = step_resume_breakpoint;
391 tp->step_range_start = step_range_start;
392 tp->step_range_end = step_range_end;
393 tp->step_frame_id = step_frame_id;
394 }
395 else
396 {
397 /* If we get here, it was because we're trying to
398 resume from a fork catchpoint, but, the user
399 has switched threads away from the thread that
400 forked. In that case, the resume command
401 issued is most likely not applicable to the
402 child, so just warn, and refuse to resume. */
403 warning (_("\
404 Not resuming: switched threads before following fork child.\n"));
405 }
406
407 /* Reset breakpoints in the child as appropriate. */
408 follow_inferior_reset_breakpoints ();
409 }
410 else
411 switch_to_thread (parent);
412 }
413 }
414 break;
415 case TARGET_WAITKIND_SPURIOUS:
416 /* Nothing to follow. */
417 break;
418 default:
419 internal_error (__FILE__, __LINE__,
420 "Unexpected pending_follow.kind %d\n",
421 tp->pending_follow.kind);
422 break;
423 }
424
425 return should_resume;
426 }
427
428 void
429 follow_inferior_reset_breakpoints (void)
430 {
431 struct thread_info *tp = inferior_thread ();
432
433 /* Was there a step_resume breakpoint? (There was if the user
434 did a "next" at the fork() call.) If so, explicitly reset its
435 thread number.
436
437 step_resumes are a form of bp that are made to be per-thread.
438 Since we created the step_resume bp when the parent process
439 was being debugged, and now are switching to the child process,
440 from the breakpoint package's viewpoint, that's a switch of
441 "threads". We must update the bp's notion of which thread
442 it is for, or it'll be ignored when it triggers. */
443
444 if (tp->step_resume_breakpoint)
445 breakpoint_re_set_thread (tp->step_resume_breakpoint);
446
447 /* Reinsert all breakpoints in the child. The user may have set
448 breakpoints after catching the fork, in which case those
449 were never set in the child, but only in the parent. This makes
450 sure the inserted breakpoints match the breakpoint list. */
451
452 breakpoint_re_set ();
453 insert_breakpoints ();
454 }
455
456 /* The child has exited or execed: resume threads of the parent the
457 user wanted to be executing. */
458
459 static int
460 proceed_after_vfork_done (struct thread_info *thread,
461 void *arg)
462 {
463 int pid = * (int *) arg;
464
465 if (ptid_get_pid (thread->ptid) == pid
466 && is_running (thread->ptid)
467 && !is_executing (thread->ptid)
468 && !thread->stop_requested
469 && thread->stop_signal == TARGET_SIGNAL_0)
470 {
471 if (debug_infrun)
472 fprintf_unfiltered (gdb_stdlog,
473 "infrun: resuming vfork parent thread %s\n",
474 target_pid_to_str (thread->ptid));
475
476 switch_to_thread (thread->ptid);
477 clear_proceed_status ();
478 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
479 }
480
481 return 0;
482 }
483
484 /* Called whenever we notice an exec or exit event, to handle
485 detaching or resuming a vfork parent. */
486
487 static void
488 handle_vfork_child_exec_or_exit (int exec)
489 {
490 struct inferior *inf = current_inferior ();
491
492 if (inf->vfork_parent)
493 {
494 int resume_parent = -1;
495
496 /* This exec or exit marks the end of the shared memory region
497 between the parent and the child. If the user wanted to
498 detach from the parent, now is the time. */
499
500 if (inf->vfork_parent->pending_detach)
501 {
502 struct thread_info *tp;
503 struct cleanup *old_chain;
504 struct program_space *pspace;
505 struct address_space *aspace;
506
507 /* follow-fork child, detach-on-fork on */
508
509 old_chain = make_cleanup_restore_current_thread ();
510
511 /* We're letting loose of the parent. */
512 tp = any_live_thread_of_process (inf->vfork_parent->pid);
513 switch_to_thread (tp->ptid);
514
515 /* We're about to detach from the parent, which implicitly
516 removes breakpoints from its address space. There's a
517 catch here: we want to reuse the spaces for the child,
518 but, parent/child are still sharing the pspace at this
519 point, although the exec in reality makes the kernel give
520 the child a fresh set of new pages. The problem here is
521 that the breakpoints module being unaware of this, would
522 likely chose the child process to write to the parent
523 address space. Swapping the child temporarily away from
524 the spaces has the desired effect. Yes, this is "sort
525 of" a hack. */
526
527 pspace = inf->pspace;
528 aspace = inf->aspace;
529 inf->aspace = NULL;
530 inf->pspace = NULL;
531
532 if (debug_infrun || info_verbose)
533 {
534 target_terminal_ours ();
535
536 if (exec)
537 fprintf_filtered (gdb_stdlog,
538 "Detaching vfork parent process %d after child exec.\n",
539 inf->vfork_parent->pid);
540 else
541 fprintf_filtered (gdb_stdlog,
542 "Detaching vfork parent process %d after child exit.\n",
543 inf->vfork_parent->pid);
544 }
545
546 target_detach (NULL, 0);
547
548 /* Put it back. */
549 inf->pspace = pspace;
550 inf->aspace = aspace;
551
552 do_cleanups (old_chain);
553 }
554 else if (exec)
555 {
556 /* We're staying attached to the parent, so, really give the
557 child a new address space. */
558 inf->pspace = add_program_space (maybe_new_address_space ());
559 inf->aspace = inf->pspace->aspace;
560 inf->removable = 1;
561 set_current_program_space (inf->pspace);
562
563 resume_parent = inf->vfork_parent->pid;
564
565 /* Break the bonds. */
566 inf->vfork_parent->vfork_child = NULL;
567 }
568 else
569 {
570 struct cleanup *old_chain;
571 struct program_space *pspace;
572
573 /* If this is a vfork child exiting, then the pspace and
574 aspaces were shared with the parent. Since we're
575 reporting the process exit, we'll be mourning all that is
576 found in the address space, and switching to null_ptid,
577 preparing to start a new inferior. But, since we don't
578 want to clobber the parent's address/program spaces, we
579 go ahead and create a new one for this exiting
580 inferior. */
581
582 /* Switch to null_ptid, so that clone_program_space doesn't want
583 to read the selected frame of a dead process. */
584 old_chain = save_inferior_ptid ();
585 inferior_ptid = null_ptid;
586
587 /* This inferior is dead, so avoid giving the breakpoints
588 module the option to write through to it (cloning a
589 program space resets breakpoints). */
590 inf->aspace = NULL;
591 inf->pspace = NULL;
592 pspace = add_program_space (maybe_new_address_space ());
593 set_current_program_space (pspace);
594 inf->removable = 1;
595 clone_program_space (pspace, inf->vfork_parent->pspace);
596 inf->pspace = pspace;
597 inf->aspace = pspace->aspace;
598
599 /* Put back inferior_ptid. We'll continue mourning this
600 inferior. */
601 do_cleanups (old_chain);
602
603 resume_parent = inf->vfork_parent->pid;
604 /* Break the bonds. */
605 inf->vfork_parent->vfork_child = NULL;
606 }
607
608 inf->vfork_parent = NULL;
609
610 gdb_assert (current_program_space == inf->pspace);
611
612 if (non_stop && resume_parent != -1)
613 {
614 /* If the user wanted the parent to be running, let it go
615 free now. */
616 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
617
618 if (debug_infrun)
619 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
620 resume_parent);
621
622 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
623
624 do_cleanups (old_chain);
625 }
626 }
627 }
628
629 /* Enum strings for "set|show displaced-stepping". */
630
631 static const char follow_exec_mode_new[] = "new";
632 static const char follow_exec_mode_same[] = "same";
633 static const char *follow_exec_mode_names[] =
634 {
635 follow_exec_mode_new,
636 follow_exec_mode_same,
637 NULL,
638 };
639
640 static const char *follow_exec_mode_string = follow_exec_mode_same;
641 static void
642 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
643 struct cmd_list_element *c, const char *value)
644 {
645 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
646 }
647
648 /* EXECD_PATHNAME is assumed to be non-NULL. */
649
650 static void
651 follow_exec (ptid_t pid, char *execd_pathname)
652 {
653 struct thread_info *th = inferior_thread ();
654 struct inferior *inf = current_inferior ();
655
656 /* This is an exec event that we actually wish to pay attention to.
657 Refresh our symbol table to the newly exec'd program, remove any
658 momentary bp's, etc.
659
660 If there are breakpoints, they aren't really inserted now,
661 since the exec() transformed our inferior into a fresh set
662 of instructions.
663
664 We want to preserve symbolic breakpoints on the list, since
665 we have hopes that they can be reset after the new a.out's
666 symbol table is read.
667
668 However, any "raw" breakpoints must be removed from the list
669 (e.g., the solib bp's), since their address is probably invalid
670 now.
671
672 And, we DON'T want to call delete_breakpoints() here, since
673 that may write the bp's "shadow contents" (the instruction
674 value that was overwritten witha TRAP instruction). Since
675 we now have a new a.out, those shadow contents aren't valid. */
676
677 mark_breakpoints_out ();
678
679 update_breakpoints_after_exec ();
680
681 /* If there was one, it's gone now. We cannot truly step-to-next
682 statement through an exec(). */
683 th->step_resume_breakpoint = NULL;
684 th->step_range_start = 0;
685 th->step_range_end = 0;
686
687 /* The target reports the exec event to the main thread, even if
688 some other thread does the exec, and even if the main thread was
689 already stopped --- if debugging in non-stop mode, it's possible
690 the user had the main thread held stopped in the previous image
691 --- release it now. This is the same behavior as step-over-exec
692 with scheduler-locking on in all-stop mode. */
693 th->stop_requested = 0;
694
695 /* What is this a.out's name? */
696 printf_unfiltered (_("%s is executing new program: %s\n"),
697 target_pid_to_str (inferior_ptid),
698 execd_pathname);
699
700 /* We've followed the inferior through an exec. Therefore, the
701 inferior has essentially been killed & reborn. */
702
703 gdb_flush (gdb_stdout);
704
705 breakpoint_init_inferior (inf_execd);
706
707 if (gdb_sysroot && *gdb_sysroot)
708 {
709 char *name = alloca (strlen (gdb_sysroot)
710 + strlen (execd_pathname)
711 + 1);
712 strcpy (name, gdb_sysroot);
713 strcat (name, execd_pathname);
714 execd_pathname = name;
715 }
716
717 /* Reset the shared library package. This ensures that we get a
718 shlib event when the child reaches "_start", at which point the
719 dld will have had a chance to initialize the child. */
720 /* Also, loading a symbol file below may trigger symbol lookups, and
721 we don't want those to be satisfied by the libraries of the
722 previous incarnation of this process. */
723 no_shared_libraries (NULL, 0);
724
725 if (follow_exec_mode_string == follow_exec_mode_new)
726 {
727 struct program_space *pspace;
728
729 /* The user wants to keep the old inferior and program spaces
730 around. Create a new fresh one, and switch to it. */
731
732 inf = add_inferior (current_inferior ()->pid);
733 pspace = add_program_space (maybe_new_address_space ());
734 inf->pspace = pspace;
735 inf->aspace = pspace->aspace;
736
737 exit_inferior_num_silent (current_inferior ()->num);
738
739 set_current_inferior (inf);
740 set_current_program_space (pspace);
741 }
742
743 gdb_assert (current_program_space == inf->pspace);
744
745 /* That a.out is now the one to use. */
746 exec_file_attach (execd_pathname, 0);
747
748 /* Load the main file's symbols. */
749 symbol_file_add_main (execd_pathname, 0);
750
751 #ifdef SOLIB_CREATE_INFERIOR_HOOK
752 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
753 #else
754 solib_create_inferior_hook (0);
755 #endif
756
757 jit_inferior_created_hook ();
758
759 /* Reinsert all breakpoints. (Those which were symbolic have
760 been reset to the proper address in the new a.out, thanks
761 to symbol_file_command...) */
762 insert_breakpoints ();
763
764 /* The next resume of this inferior should bring it to the shlib
765 startup breakpoints. (If the user had also set bp's on
766 "main" from the old (parent) process, then they'll auto-
767 matically get reset there in the new process.) */
768 }
769
770 /* Non-zero if we just simulating a single-step. This is needed
771 because we cannot remove the breakpoints in the inferior process
772 until after the `wait' in `wait_for_inferior'. */
773 static int singlestep_breakpoints_inserted_p = 0;
774
775 /* The thread we inserted single-step breakpoints for. */
776 static ptid_t singlestep_ptid;
777
778 /* PC when we started this single-step. */
779 static CORE_ADDR singlestep_pc;
780
781 /* If another thread hit the singlestep breakpoint, we save the original
782 thread here so that we can resume single-stepping it later. */
783 static ptid_t saved_singlestep_ptid;
784 static int stepping_past_singlestep_breakpoint;
785
786 /* If not equal to null_ptid, this means that after stepping over breakpoint
787 is finished, we need to switch to deferred_step_ptid, and step it.
788
789 The use case is when one thread has hit a breakpoint, and then the user
790 has switched to another thread and issued 'step'. We need to step over
791 breakpoint in the thread which hit the breakpoint, but then continue
792 stepping the thread user has selected. */
793 static ptid_t deferred_step_ptid;
794 \f
795 /* Displaced stepping. */
796
797 /* In non-stop debugging mode, we must take special care to manage
798 breakpoints properly; in particular, the traditional strategy for
799 stepping a thread past a breakpoint it has hit is unsuitable.
800 'Displaced stepping' is a tactic for stepping one thread past a
801 breakpoint it has hit while ensuring that other threads running
802 concurrently will hit the breakpoint as they should.
803
804 The traditional way to step a thread T off a breakpoint in a
805 multi-threaded program in all-stop mode is as follows:
806
807 a0) Initially, all threads are stopped, and breakpoints are not
808 inserted.
809 a1) We single-step T, leaving breakpoints uninserted.
810 a2) We insert breakpoints, and resume all threads.
811
812 In non-stop debugging, however, this strategy is unsuitable: we
813 don't want to have to stop all threads in the system in order to
814 continue or step T past a breakpoint. Instead, we use displaced
815 stepping:
816
817 n0) Initially, T is stopped, other threads are running, and
818 breakpoints are inserted.
819 n1) We copy the instruction "under" the breakpoint to a separate
820 location, outside the main code stream, making any adjustments
821 to the instruction, register, and memory state as directed by
822 T's architecture.
823 n2) We single-step T over the instruction at its new location.
824 n3) We adjust the resulting register and memory state as directed
825 by T's architecture. This includes resetting T's PC to point
826 back into the main instruction stream.
827 n4) We resume T.
828
829 This approach depends on the following gdbarch methods:
830
831 - gdbarch_max_insn_length and gdbarch_displaced_step_location
832 indicate where to copy the instruction, and how much space must
833 be reserved there. We use these in step n1.
834
835 - gdbarch_displaced_step_copy_insn copies a instruction to a new
836 address, and makes any necessary adjustments to the instruction,
837 register contents, and memory. We use this in step n1.
838
839 - gdbarch_displaced_step_fixup adjusts registers and memory after
840 we have successfuly single-stepped the instruction, to yield the
841 same effect the instruction would have had if we had executed it
842 at its original address. We use this in step n3.
843
844 - gdbarch_displaced_step_free_closure provides cleanup.
845
846 The gdbarch_displaced_step_copy_insn and
847 gdbarch_displaced_step_fixup functions must be written so that
848 copying an instruction with gdbarch_displaced_step_copy_insn,
849 single-stepping across the copied instruction, and then applying
850 gdbarch_displaced_insn_fixup should have the same effects on the
851 thread's memory and registers as stepping the instruction in place
852 would have. Exactly which responsibilities fall to the copy and
853 which fall to the fixup is up to the author of those functions.
854
855 See the comments in gdbarch.sh for details.
856
857 Note that displaced stepping and software single-step cannot
858 currently be used in combination, although with some care I think
859 they could be made to. Software single-step works by placing
860 breakpoints on all possible subsequent instructions; if the
861 displaced instruction is a PC-relative jump, those breakpoints
862 could fall in very strange places --- on pages that aren't
863 executable, or at addresses that are not proper instruction
864 boundaries. (We do generally let other threads run while we wait
865 to hit the software single-step breakpoint, and they might
866 encounter such a corrupted instruction.) One way to work around
867 this would be to have gdbarch_displaced_step_copy_insn fully
868 simulate the effect of PC-relative instructions (and return NULL)
869 on architectures that use software single-stepping.
870
871 In non-stop mode, we can have independent and simultaneous step
872 requests, so more than one thread may need to simultaneously step
873 over a breakpoint. The current implementation assumes there is
874 only one scratch space per process. In this case, we have to
875 serialize access to the scratch space. If thread A wants to step
876 over a breakpoint, but we are currently waiting for some other
877 thread to complete a displaced step, we leave thread A stopped and
878 place it in the displaced_step_request_queue. Whenever a displaced
879 step finishes, we pick the next thread in the queue and start a new
880 displaced step operation on it. See displaced_step_prepare and
881 displaced_step_fixup for details. */
882
883 struct displaced_step_request
884 {
885 ptid_t ptid;
886 struct displaced_step_request *next;
887 };
888
889 /* Per-inferior displaced stepping state. */
890 struct displaced_step_inferior_state
891 {
892 /* Pointer to next in linked list. */
893 struct displaced_step_inferior_state *next;
894
895 /* The process this displaced step state refers to. */
896 int pid;
897
898 /* A queue of pending displaced stepping requests. One entry per
899 thread that needs to do a displaced step. */
900 struct displaced_step_request *step_request_queue;
901
902 /* If this is not null_ptid, this is the thread carrying out a
903 displaced single-step in process PID. This thread's state will
904 require fixing up once it has completed its step. */
905 ptid_t step_ptid;
906
907 /* The architecture the thread had when we stepped it. */
908 struct gdbarch *step_gdbarch;
909
910 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
911 for post-step cleanup. */
912 struct displaced_step_closure *step_closure;
913
914 /* The address of the original instruction, and the copy we
915 made. */
916 CORE_ADDR step_original, step_copy;
917
918 /* Saved contents of copy area. */
919 gdb_byte *step_saved_copy;
920 };
921
922 /* The list of states of processes involved in displaced stepping
923 presently. */
924 static struct displaced_step_inferior_state *displaced_step_inferior_states;
925
926 /* Get the displaced stepping state of process PID. */
927
928 static struct displaced_step_inferior_state *
929 get_displaced_stepping_state (int pid)
930 {
931 struct displaced_step_inferior_state *state;
932
933 for (state = displaced_step_inferior_states;
934 state != NULL;
935 state = state->next)
936 if (state->pid == pid)
937 return state;
938
939 return NULL;
940 }
941
942 /* Add a new displaced stepping state for process PID to the displaced
943 stepping state list, or return a pointer to an already existing
944 entry, if it already exists. Never returns NULL. */
945
946 static struct displaced_step_inferior_state *
947 add_displaced_stepping_state (int pid)
948 {
949 struct displaced_step_inferior_state *state;
950
951 for (state = displaced_step_inferior_states;
952 state != NULL;
953 state = state->next)
954 if (state->pid == pid)
955 return state;
956
957 state = xcalloc (1, sizeof (*state));
958 state->pid = pid;
959 state->next = displaced_step_inferior_states;
960 displaced_step_inferior_states = state;
961
962 return state;
963 }
964
965 /* Remove the displaced stepping state of process PID. */
966
967 static void
968 remove_displaced_stepping_state (int pid)
969 {
970 struct displaced_step_inferior_state *it, **prev_next_p;
971
972 gdb_assert (pid != 0);
973
974 it = displaced_step_inferior_states;
975 prev_next_p = &displaced_step_inferior_states;
976 while (it)
977 {
978 if (it->pid == pid)
979 {
980 *prev_next_p = it->next;
981 xfree (it);
982 return;
983 }
984
985 prev_next_p = &it->next;
986 it = *prev_next_p;
987 }
988 }
989
990 static void
991 infrun_inferior_exit (struct inferior *inf)
992 {
993 remove_displaced_stepping_state (inf->pid);
994 }
995
996 /* Enum strings for "set|show displaced-stepping". */
997
998 static const char can_use_displaced_stepping_auto[] = "auto";
999 static const char can_use_displaced_stepping_on[] = "on";
1000 static const char can_use_displaced_stepping_off[] = "off";
1001 static const char *can_use_displaced_stepping_enum[] =
1002 {
1003 can_use_displaced_stepping_auto,
1004 can_use_displaced_stepping_on,
1005 can_use_displaced_stepping_off,
1006 NULL,
1007 };
1008
1009 /* If ON, and the architecture supports it, GDB will use displaced
1010 stepping to step over breakpoints. If OFF, or if the architecture
1011 doesn't support it, GDB will instead use the traditional
1012 hold-and-step approach. If AUTO (which is the default), GDB will
1013 decide which technique to use to step over breakpoints depending on
1014 which of all-stop or non-stop mode is active --- displaced stepping
1015 in non-stop mode; hold-and-step in all-stop mode. */
1016
1017 static const char *can_use_displaced_stepping =
1018 can_use_displaced_stepping_auto;
1019
1020 static void
1021 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1022 struct cmd_list_element *c,
1023 const char *value)
1024 {
1025 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1026 fprintf_filtered (file, _("\
1027 Debugger's willingness to use displaced stepping to step over \
1028 breakpoints is %s (currently %s).\n"),
1029 value, non_stop ? "on" : "off");
1030 else
1031 fprintf_filtered (file, _("\
1032 Debugger's willingness to use displaced stepping to step over \
1033 breakpoints is %s.\n"), value);
1034 }
1035
1036 /* Return non-zero if displaced stepping can/should be used to step
1037 over breakpoints. */
1038
1039 static int
1040 use_displaced_stepping (struct gdbarch *gdbarch)
1041 {
1042 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1043 && non_stop)
1044 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1045 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1046 && !RECORD_IS_USED);
1047 }
1048
1049 /* Clean out any stray displaced stepping state. */
1050 static void
1051 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1052 {
1053 /* Indicate that there is no cleanup pending. */
1054 displaced->step_ptid = null_ptid;
1055
1056 if (displaced->step_closure)
1057 {
1058 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1059 displaced->step_closure);
1060 displaced->step_closure = NULL;
1061 }
1062 }
1063
1064 static void
1065 displaced_step_clear_cleanup (void *arg)
1066 {
1067 struct displaced_step_inferior_state *state = arg;
1068
1069 displaced_step_clear (state);
1070 }
1071
1072 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1073 void
1074 displaced_step_dump_bytes (struct ui_file *file,
1075 const gdb_byte *buf,
1076 size_t len)
1077 {
1078 int i;
1079
1080 for (i = 0; i < len; i++)
1081 fprintf_unfiltered (file, "%02x ", buf[i]);
1082 fputs_unfiltered ("\n", file);
1083 }
1084
1085 /* Prepare to single-step, using displaced stepping.
1086
1087 Note that we cannot use displaced stepping when we have a signal to
1088 deliver. If we have a signal to deliver and an instruction to step
1089 over, then after the step, there will be no indication from the
1090 target whether the thread entered a signal handler or ignored the
1091 signal and stepped over the instruction successfully --- both cases
1092 result in a simple SIGTRAP. In the first case we mustn't do a
1093 fixup, and in the second case we must --- but we can't tell which.
1094 Comments in the code for 'random signals' in handle_inferior_event
1095 explain how we handle this case instead.
1096
1097 Returns 1 if preparing was successful -- this thread is going to be
1098 stepped now; or 0 if displaced stepping this thread got queued. */
1099 static int
1100 displaced_step_prepare (ptid_t ptid)
1101 {
1102 struct cleanup *old_cleanups, *ignore_cleanups;
1103 struct regcache *regcache = get_thread_regcache (ptid);
1104 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1105 CORE_ADDR original, copy;
1106 ULONGEST len;
1107 struct displaced_step_closure *closure;
1108 struct displaced_step_inferior_state *displaced;
1109
1110 /* We should never reach this function if the architecture does not
1111 support displaced stepping. */
1112 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1113
1114 /* We have to displaced step one thread at a time, as we only have
1115 access to a single scratch space per inferior. */
1116
1117 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1118
1119 if (!ptid_equal (displaced->step_ptid, null_ptid))
1120 {
1121 /* Already waiting for a displaced step to finish. Defer this
1122 request and place in queue. */
1123 struct displaced_step_request *req, *new_req;
1124
1125 if (debug_displaced)
1126 fprintf_unfiltered (gdb_stdlog,
1127 "displaced: defering step of %s\n",
1128 target_pid_to_str (ptid));
1129
1130 new_req = xmalloc (sizeof (*new_req));
1131 new_req->ptid = ptid;
1132 new_req->next = NULL;
1133
1134 if (displaced->step_request_queue)
1135 {
1136 for (req = displaced->step_request_queue;
1137 req && req->next;
1138 req = req->next)
1139 ;
1140 req->next = new_req;
1141 }
1142 else
1143 displaced->step_request_queue = new_req;
1144
1145 return 0;
1146 }
1147 else
1148 {
1149 if (debug_displaced)
1150 fprintf_unfiltered (gdb_stdlog,
1151 "displaced: stepping %s now\n",
1152 target_pid_to_str (ptid));
1153 }
1154
1155 displaced_step_clear (displaced);
1156
1157 old_cleanups = save_inferior_ptid ();
1158 inferior_ptid = ptid;
1159
1160 original = regcache_read_pc (regcache);
1161
1162 copy = gdbarch_displaced_step_location (gdbarch);
1163 len = gdbarch_max_insn_length (gdbarch);
1164
1165 /* Save the original contents of the copy area. */
1166 displaced->step_saved_copy = xmalloc (len);
1167 ignore_cleanups = make_cleanup (free_current_contents,
1168 &displaced->step_saved_copy);
1169 read_memory (copy, displaced->step_saved_copy, len);
1170 if (debug_displaced)
1171 {
1172 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1173 paddress (gdbarch, copy));
1174 displaced_step_dump_bytes (gdb_stdlog,
1175 displaced->step_saved_copy,
1176 len);
1177 };
1178
1179 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1180 original, copy, regcache);
1181
1182 /* We don't support the fully-simulated case at present. */
1183 gdb_assert (closure);
1184
1185 /* Save the information we need to fix things up if the step
1186 succeeds. */
1187 displaced->step_ptid = ptid;
1188 displaced->step_gdbarch = gdbarch;
1189 displaced->step_closure = closure;
1190 displaced->step_original = original;
1191 displaced->step_copy = copy;
1192
1193 make_cleanup (displaced_step_clear_cleanup, displaced);
1194
1195 /* Resume execution at the copy. */
1196 regcache_write_pc (regcache, copy);
1197
1198 discard_cleanups (ignore_cleanups);
1199
1200 do_cleanups (old_cleanups);
1201
1202 if (debug_displaced)
1203 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1204 paddress (gdbarch, copy));
1205
1206 return 1;
1207 }
1208
1209 static void
1210 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1211 {
1212 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1213 inferior_ptid = ptid;
1214 write_memory (memaddr, myaddr, len);
1215 do_cleanups (ptid_cleanup);
1216 }
1217
1218 static void
1219 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1220 {
1221 struct cleanup *old_cleanups;
1222 struct displaced_step_inferior_state *displaced
1223 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1224
1225 /* Was any thread of this process doing a displaced step? */
1226 if (displaced == NULL)
1227 return;
1228
1229 /* Was this event for the pid we displaced? */
1230 if (ptid_equal (displaced->step_ptid, null_ptid)
1231 || ! ptid_equal (displaced->step_ptid, event_ptid))
1232 return;
1233
1234 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1235
1236 /* Restore the contents of the copy area. */
1237 {
1238 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1239 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1240 displaced->step_saved_copy, len);
1241 if (debug_displaced)
1242 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1243 paddress (displaced->step_gdbarch,
1244 displaced->step_copy));
1245 }
1246
1247 /* Did the instruction complete successfully? */
1248 if (signal == TARGET_SIGNAL_TRAP)
1249 {
1250 /* Fix up the resulting state. */
1251 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1252 displaced->step_closure,
1253 displaced->step_original,
1254 displaced->step_copy,
1255 get_thread_regcache (displaced->step_ptid));
1256 }
1257 else
1258 {
1259 /* Since the instruction didn't complete, all we can do is
1260 relocate the PC. */
1261 struct regcache *regcache = get_thread_regcache (event_ptid);
1262 CORE_ADDR pc = regcache_read_pc (regcache);
1263 pc = displaced->step_original + (pc - displaced->step_copy);
1264 regcache_write_pc (regcache, pc);
1265 }
1266
1267 do_cleanups (old_cleanups);
1268
1269 displaced->step_ptid = null_ptid;
1270
1271 /* Are there any pending displaced stepping requests? If so, run
1272 one now. Leave the state object around, since we're likely to
1273 need it again soon. */
1274 while (displaced->step_request_queue)
1275 {
1276 struct displaced_step_request *head;
1277 ptid_t ptid;
1278 struct regcache *regcache;
1279 struct gdbarch *gdbarch;
1280 CORE_ADDR actual_pc;
1281 struct address_space *aspace;
1282
1283 head = displaced->step_request_queue;
1284 ptid = head->ptid;
1285 displaced->step_request_queue = head->next;
1286 xfree (head);
1287
1288 context_switch (ptid);
1289
1290 regcache = get_thread_regcache (ptid);
1291 actual_pc = regcache_read_pc (regcache);
1292 aspace = get_regcache_aspace (regcache);
1293
1294 if (breakpoint_here_p (aspace, actual_pc))
1295 {
1296 if (debug_displaced)
1297 fprintf_unfiltered (gdb_stdlog,
1298 "displaced: stepping queued %s now\n",
1299 target_pid_to_str (ptid));
1300
1301 displaced_step_prepare (ptid);
1302
1303 gdbarch = get_regcache_arch (regcache);
1304
1305 if (debug_displaced)
1306 {
1307 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1308 gdb_byte buf[4];
1309
1310 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1311 paddress (gdbarch, actual_pc));
1312 read_memory (actual_pc, buf, sizeof (buf));
1313 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1314 }
1315
1316 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1317 displaced->step_closure))
1318 target_resume (ptid, 1, TARGET_SIGNAL_0);
1319 else
1320 target_resume (ptid, 0, TARGET_SIGNAL_0);
1321
1322 /* Done, we're stepping a thread. */
1323 break;
1324 }
1325 else
1326 {
1327 int step;
1328 struct thread_info *tp = inferior_thread ();
1329
1330 /* The breakpoint we were sitting under has since been
1331 removed. */
1332 tp->trap_expected = 0;
1333
1334 /* Go back to what we were trying to do. */
1335 step = currently_stepping (tp);
1336
1337 if (debug_displaced)
1338 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1339 target_pid_to_str (tp->ptid), step);
1340
1341 target_resume (ptid, step, TARGET_SIGNAL_0);
1342 tp->stop_signal = TARGET_SIGNAL_0;
1343
1344 /* This request was discarded. See if there's any other
1345 thread waiting for its turn. */
1346 }
1347 }
1348 }
1349
1350 /* Update global variables holding ptids to hold NEW_PTID if they were
1351 holding OLD_PTID. */
1352 static void
1353 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1354 {
1355 struct displaced_step_request *it;
1356 struct displaced_step_inferior_state *displaced;
1357
1358 if (ptid_equal (inferior_ptid, old_ptid))
1359 inferior_ptid = new_ptid;
1360
1361 if (ptid_equal (singlestep_ptid, old_ptid))
1362 singlestep_ptid = new_ptid;
1363
1364 if (ptid_equal (deferred_step_ptid, old_ptid))
1365 deferred_step_ptid = new_ptid;
1366
1367 for (displaced = displaced_step_inferior_states;
1368 displaced;
1369 displaced = displaced->next)
1370 {
1371 if (ptid_equal (displaced->step_ptid, old_ptid))
1372 displaced->step_ptid = new_ptid;
1373
1374 for (it = displaced->step_request_queue; it; it = it->next)
1375 if (ptid_equal (it->ptid, old_ptid))
1376 it->ptid = new_ptid;
1377 }
1378 }
1379
1380 \f
1381 /* Resuming. */
1382
1383 /* Things to clean up if we QUIT out of resume (). */
1384 static void
1385 resume_cleanups (void *ignore)
1386 {
1387 normal_stop ();
1388 }
1389
1390 static const char schedlock_off[] = "off";
1391 static const char schedlock_on[] = "on";
1392 static const char schedlock_step[] = "step";
1393 static const char *scheduler_enums[] = {
1394 schedlock_off,
1395 schedlock_on,
1396 schedlock_step,
1397 NULL
1398 };
1399 static const char *scheduler_mode = schedlock_off;
1400 static void
1401 show_scheduler_mode (struct ui_file *file, int from_tty,
1402 struct cmd_list_element *c, const char *value)
1403 {
1404 fprintf_filtered (file, _("\
1405 Mode for locking scheduler during execution is \"%s\".\n"),
1406 value);
1407 }
1408
1409 static void
1410 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1411 {
1412 if (!target_can_lock_scheduler)
1413 {
1414 scheduler_mode = schedlock_off;
1415 error (_("Target '%s' cannot support this command."), target_shortname);
1416 }
1417 }
1418
1419 /* True if execution commands resume all threads of all processes by
1420 default; otherwise, resume only threads of the current inferior
1421 process. */
1422 int sched_multi = 0;
1423
1424 /* Try to setup for software single stepping over the specified location.
1425 Return 1 if target_resume() should use hardware single step.
1426
1427 GDBARCH the current gdbarch.
1428 PC the location to step over. */
1429
1430 static int
1431 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1432 {
1433 int hw_step = 1;
1434
1435 if (gdbarch_software_single_step_p (gdbarch)
1436 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1437 {
1438 hw_step = 0;
1439 /* Do not pull these breakpoints until after a `wait' in
1440 `wait_for_inferior' */
1441 singlestep_breakpoints_inserted_p = 1;
1442 singlestep_ptid = inferior_ptid;
1443 singlestep_pc = pc;
1444 }
1445 return hw_step;
1446 }
1447
1448 /* Resume the inferior, but allow a QUIT. This is useful if the user
1449 wants to interrupt some lengthy single-stepping operation
1450 (for child processes, the SIGINT goes to the inferior, and so
1451 we get a SIGINT random_signal, but for remote debugging and perhaps
1452 other targets, that's not true).
1453
1454 STEP nonzero if we should step (zero to continue instead).
1455 SIG is the signal to give the inferior (zero for none). */
1456 void
1457 resume (int step, enum target_signal sig)
1458 {
1459 int should_resume = 1;
1460 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1461 struct regcache *regcache = get_current_regcache ();
1462 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1463 struct thread_info *tp = inferior_thread ();
1464 CORE_ADDR pc = regcache_read_pc (regcache);
1465 struct address_space *aspace = get_regcache_aspace (regcache);
1466
1467 QUIT;
1468
1469 if (debug_infrun)
1470 fprintf_unfiltered (gdb_stdlog,
1471 "infrun: resume (step=%d, signal=%d), "
1472 "trap_expected=%d\n",
1473 step, sig, tp->trap_expected);
1474
1475 /* Normally, by the time we reach `resume', the breakpoints are either
1476 removed or inserted, as appropriate. The exception is if we're sitting
1477 at a permanent breakpoint; we need to step over it, but permanent
1478 breakpoints can't be removed. So we have to test for it here. */
1479 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1480 {
1481 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1482 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1483 else
1484 error (_("\
1485 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1486 how to step past a permanent breakpoint on this architecture. Try using\n\
1487 a command like `return' or `jump' to continue execution."));
1488 }
1489
1490 /* If enabled, step over breakpoints by executing a copy of the
1491 instruction at a different address.
1492
1493 We can't use displaced stepping when we have a signal to deliver;
1494 the comments for displaced_step_prepare explain why. The
1495 comments in the handle_inferior event for dealing with 'random
1496 signals' explain what we do instead. */
1497 if (use_displaced_stepping (gdbarch)
1498 && (tp->trap_expected
1499 || (step && gdbarch_software_single_step_p (gdbarch)))
1500 && sig == TARGET_SIGNAL_0)
1501 {
1502 struct displaced_step_inferior_state *displaced;
1503
1504 if (!displaced_step_prepare (inferior_ptid))
1505 {
1506 /* Got placed in displaced stepping queue. Will be resumed
1507 later when all the currently queued displaced stepping
1508 requests finish. The thread is not executing at this point,
1509 and the call to set_executing will be made later. But we
1510 need to call set_running here, since from frontend point of view,
1511 the thread is running. */
1512 set_running (inferior_ptid, 1);
1513 discard_cleanups (old_cleanups);
1514 return;
1515 }
1516
1517 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1518 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1519 displaced->step_closure);
1520 }
1521
1522 /* Do we need to do it the hard way, w/temp breakpoints? */
1523 else if (step)
1524 step = maybe_software_singlestep (gdbarch, pc);
1525
1526 if (should_resume)
1527 {
1528 ptid_t resume_ptid;
1529
1530 /* If STEP is set, it's a request to use hardware stepping
1531 facilities. But in that case, we should never
1532 use singlestep breakpoint. */
1533 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1534
1535 /* Decide the set of threads to ask the target to resume. Start
1536 by assuming everything will be resumed, than narrow the set
1537 by applying increasingly restricting conditions. */
1538
1539 /* By default, resume all threads of all processes. */
1540 resume_ptid = RESUME_ALL;
1541
1542 /* Maybe resume only all threads of the current process. */
1543 if (!sched_multi && target_supports_multi_process ())
1544 {
1545 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1546 }
1547
1548 /* Maybe resume a single thread after all. */
1549 if (singlestep_breakpoints_inserted_p
1550 && stepping_past_singlestep_breakpoint)
1551 {
1552 /* The situation here is as follows. In thread T1 we wanted to
1553 single-step. Lacking hardware single-stepping we've
1554 set breakpoint at the PC of the next instruction -- call it
1555 P. After resuming, we've hit that breakpoint in thread T2.
1556 Now we've removed original breakpoint, inserted breakpoint
1557 at P+1, and try to step to advance T2 past breakpoint.
1558 We need to step only T2, as if T1 is allowed to freely run,
1559 it can run past P, and if other threads are allowed to run,
1560 they can hit breakpoint at P+1, and nested hits of single-step
1561 breakpoints is not something we'd want -- that's complicated
1562 to support, and has no value. */
1563 resume_ptid = inferior_ptid;
1564 }
1565 else if ((step || singlestep_breakpoints_inserted_p)
1566 && tp->trap_expected)
1567 {
1568 /* We're allowing a thread to run past a breakpoint it has
1569 hit, by single-stepping the thread with the breakpoint
1570 removed. In which case, we need to single-step only this
1571 thread, and keep others stopped, as they can miss this
1572 breakpoint if allowed to run.
1573
1574 The current code actually removes all breakpoints when
1575 doing this, not just the one being stepped over, so if we
1576 let other threads run, we can actually miss any
1577 breakpoint, not just the one at PC. */
1578 resume_ptid = inferior_ptid;
1579 }
1580 else if (non_stop)
1581 {
1582 /* With non-stop mode on, threads are always handled
1583 individually. */
1584 resume_ptid = inferior_ptid;
1585 }
1586 else if ((scheduler_mode == schedlock_on)
1587 || (scheduler_mode == schedlock_step
1588 && (step || singlestep_breakpoints_inserted_p)))
1589 {
1590 /* User-settable 'scheduler' mode requires solo thread resume. */
1591 resume_ptid = inferior_ptid;
1592 }
1593
1594 if (gdbarch_cannot_step_breakpoint (gdbarch))
1595 {
1596 /* Most targets can step a breakpoint instruction, thus
1597 executing it normally. But if this one cannot, just
1598 continue and we will hit it anyway. */
1599 if (step && breakpoint_inserted_here_p (aspace, pc))
1600 step = 0;
1601 }
1602
1603 if (debug_displaced
1604 && use_displaced_stepping (gdbarch)
1605 && tp->trap_expected)
1606 {
1607 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1608 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1609 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1610 gdb_byte buf[4];
1611
1612 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1613 paddress (resume_gdbarch, actual_pc));
1614 read_memory (actual_pc, buf, sizeof (buf));
1615 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1616 }
1617
1618 /* Install inferior's terminal modes. */
1619 target_terminal_inferior ();
1620
1621 /* Avoid confusing the next resume, if the next stop/resume
1622 happens to apply to another thread. */
1623 tp->stop_signal = TARGET_SIGNAL_0;
1624
1625 target_resume (resume_ptid, step, sig);
1626 }
1627
1628 discard_cleanups (old_cleanups);
1629 }
1630 \f
1631 /* Proceeding. */
1632
1633 /* Clear out all variables saying what to do when inferior is continued.
1634 First do this, then set the ones you want, then call `proceed'. */
1635
1636 static void
1637 clear_proceed_status_thread (struct thread_info *tp)
1638 {
1639 if (debug_infrun)
1640 fprintf_unfiltered (gdb_stdlog,
1641 "infrun: clear_proceed_status_thread (%s)\n",
1642 target_pid_to_str (tp->ptid));
1643
1644 tp->trap_expected = 0;
1645 tp->step_range_start = 0;
1646 tp->step_range_end = 0;
1647 tp->step_frame_id = null_frame_id;
1648 tp->step_stack_frame_id = null_frame_id;
1649 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1650 tp->stop_requested = 0;
1651
1652 tp->stop_step = 0;
1653
1654 tp->proceed_to_finish = 0;
1655
1656 /* Discard any remaining commands or status from previous stop. */
1657 bpstat_clear (&tp->stop_bpstat);
1658 }
1659
1660 static int
1661 clear_proceed_status_callback (struct thread_info *tp, void *data)
1662 {
1663 if (is_exited (tp->ptid))
1664 return 0;
1665
1666 clear_proceed_status_thread (tp);
1667 return 0;
1668 }
1669
1670 void
1671 clear_proceed_status (void)
1672 {
1673 if (!non_stop)
1674 {
1675 /* In all-stop mode, delete the per-thread status of all
1676 threads, even if inferior_ptid is null_ptid, there may be
1677 threads on the list. E.g., we may be launching a new
1678 process, while selecting the executable. */
1679 iterate_over_threads (clear_proceed_status_callback, NULL);
1680 }
1681
1682 if (!ptid_equal (inferior_ptid, null_ptid))
1683 {
1684 struct inferior *inferior;
1685
1686 if (non_stop)
1687 {
1688 /* If in non-stop mode, only delete the per-thread status of
1689 the current thread. */
1690 clear_proceed_status_thread (inferior_thread ());
1691 }
1692
1693 inferior = current_inferior ();
1694 inferior->stop_soon = NO_STOP_QUIETLY;
1695 }
1696
1697 stop_after_trap = 0;
1698
1699 observer_notify_about_to_proceed ();
1700
1701 if (stop_registers)
1702 {
1703 regcache_xfree (stop_registers);
1704 stop_registers = NULL;
1705 }
1706 }
1707
1708 /* Check the current thread against the thread that reported the most recent
1709 event. If a step-over is required return TRUE and set the current thread
1710 to the old thread. Otherwise return FALSE.
1711
1712 This should be suitable for any targets that support threads. */
1713
1714 static int
1715 prepare_to_proceed (int step)
1716 {
1717 ptid_t wait_ptid;
1718 struct target_waitstatus wait_status;
1719 int schedlock_enabled;
1720
1721 /* With non-stop mode on, threads are always handled individually. */
1722 gdb_assert (! non_stop);
1723
1724 /* Get the last target status returned by target_wait(). */
1725 get_last_target_status (&wait_ptid, &wait_status);
1726
1727 /* Make sure we were stopped at a breakpoint. */
1728 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1729 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1730 && wait_status.value.sig != TARGET_SIGNAL_ILL
1731 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1732 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1733 {
1734 return 0;
1735 }
1736
1737 schedlock_enabled = (scheduler_mode == schedlock_on
1738 || (scheduler_mode == schedlock_step
1739 && step));
1740
1741 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1742 if (schedlock_enabled)
1743 return 0;
1744
1745 /* Don't switch over if we're about to resume some other process
1746 other than WAIT_PTID's, and schedule-multiple is off. */
1747 if (!sched_multi
1748 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1749 return 0;
1750
1751 /* Switched over from WAIT_PID. */
1752 if (!ptid_equal (wait_ptid, minus_one_ptid)
1753 && !ptid_equal (inferior_ptid, wait_ptid))
1754 {
1755 struct regcache *regcache = get_thread_regcache (wait_ptid);
1756
1757 if (breakpoint_here_p (get_regcache_aspace (regcache),
1758 regcache_read_pc (regcache)))
1759 {
1760 /* If stepping, remember current thread to switch back to. */
1761 if (step)
1762 deferred_step_ptid = inferior_ptid;
1763
1764 /* Switch back to WAIT_PID thread. */
1765 switch_to_thread (wait_ptid);
1766
1767 /* We return 1 to indicate that there is a breakpoint here,
1768 so we need to step over it before continuing to avoid
1769 hitting it straight away. */
1770 return 1;
1771 }
1772 }
1773
1774 return 0;
1775 }
1776
1777 /* Basic routine for continuing the program in various fashions.
1778
1779 ADDR is the address to resume at, or -1 for resume where stopped.
1780 SIGGNAL is the signal to give it, or 0 for none,
1781 or -1 for act according to how it stopped.
1782 STEP is nonzero if should trap after one instruction.
1783 -1 means return after that and print nothing.
1784 You should probably set various step_... variables
1785 before calling here, if you are stepping.
1786
1787 You should call clear_proceed_status before calling proceed. */
1788
1789 void
1790 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1791 {
1792 struct regcache *regcache;
1793 struct gdbarch *gdbarch;
1794 struct thread_info *tp;
1795 CORE_ADDR pc;
1796 struct address_space *aspace;
1797 int oneproc = 0;
1798
1799 /* If we're stopped at a fork/vfork, follow the branch set by the
1800 "set follow-fork-mode" command; otherwise, we'll just proceed
1801 resuming the current thread. */
1802 if (!follow_fork ())
1803 {
1804 /* The target for some reason decided not to resume. */
1805 normal_stop ();
1806 return;
1807 }
1808
1809 regcache = get_current_regcache ();
1810 gdbarch = get_regcache_arch (regcache);
1811 aspace = get_regcache_aspace (regcache);
1812 pc = regcache_read_pc (regcache);
1813
1814 if (step > 0)
1815 step_start_function = find_pc_function (pc);
1816 if (step < 0)
1817 stop_after_trap = 1;
1818
1819 if (addr == (CORE_ADDR) -1)
1820 {
1821 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1822 && execution_direction != EXEC_REVERSE)
1823 /* There is a breakpoint at the address we will resume at,
1824 step one instruction before inserting breakpoints so that
1825 we do not stop right away (and report a second hit at this
1826 breakpoint).
1827
1828 Note, we don't do this in reverse, because we won't
1829 actually be executing the breakpoint insn anyway.
1830 We'll be (un-)executing the previous instruction. */
1831
1832 oneproc = 1;
1833 else if (gdbarch_single_step_through_delay_p (gdbarch)
1834 && gdbarch_single_step_through_delay (gdbarch,
1835 get_current_frame ()))
1836 /* We stepped onto an instruction that needs to be stepped
1837 again before re-inserting the breakpoint, do so. */
1838 oneproc = 1;
1839 }
1840 else
1841 {
1842 regcache_write_pc (regcache, addr);
1843 }
1844
1845 if (debug_infrun)
1846 fprintf_unfiltered (gdb_stdlog,
1847 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1848 paddress (gdbarch, addr), siggnal, step);
1849
1850 /* We're handling a live event, so make sure we're doing live
1851 debugging. If we're looking at traceframes while the target is
1852 running, we're going to need to get back to that mode after
1853 handling the event. */
1854 if (non_stop)
1855 {
1856 make_cleanup_restore_current_traceframe ();
1857 set_traceframe_number (-1);
1858 }
1859
1860 if (non_stop)
1861 /* In non-stop, each thread is handled individually. The context
1862 must already be set to the right thread here. */
1863 ;
1864 else
1865 {
1866 /* In a multi-threaded task we may select another thread and
1867 then continue or step.
1868
1869 But if the old thread was stopped at a breakpoint, it will
1870 immediately cause another breakpoint stop without any
1871 execution (i.e. it will report a breakpoint hit incorrectly).
1872 So we must step over it first.
1873
1874 prepare_to_proceed checks the current thread against the
1875 thread that reported the most recent event. If a step-over
1876 is required it returns TRUE and sets the current thread to
1877 the old thread. */
1878 if (prepare_to_proceed (step))
1879 oneproc = 1;
1880 }
1881
1882 /* prepare_to_proceed may change the current thread. */
1883 tp = inferior_thread ();
1884
1885 if (oneproc)
1886 {
1887 tp->trap_expected = 1;
1888 /* If displaced stepping is enabled, we can step over the
1889 breakpoint without hitting it, so leave all breakpoints
1890 inserted. Otherwise we need to disable all breakpoints, step
1891 one instruction, and then re-add them when that step is
1892 finished. */
1893 if (!use_displaced_stepping (gdbarch))
1894 remove_breakpoints ();
1895 }
1896
1897 /* We can insert breakpoints if we're not trying to step over one,
1898 or if we are stepping over one but we're using displaced stepping
1899 to do so. */
1900 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1901 insert_breakpoints ();
1902
1903 if (!non_stop)
1904 {
1905 /* Pass the last stop signal to the thread we're resuming,
1906 irrespective of whether the current thread is the thread that
1907 got the last event or not. This was historically GDB's
1908 behaviour before keeping a stop_signal per thread. */
1909
1910 struct thread_info *last_thread;
1911 ptid_t last_ptid;
1912 struct target_waitstatus last_status;
1913
1914 get_last_target_status (&last_ptid, &last_status);
1915 if (!ptid_equal (inferior_ptid, last_ptid)
1916 && !ptid_equal (last_ptid, null_ptid)
1917 && !ptid_equal (last_ptid, minus_one_ptid))
1918 {
1919 last_thread = find_thread_ptid (last_ptid);
1920 if (last_thread)
1921 {
1922 tp->stop_signal = last_thread->stop_signal;
1923 last_thread->stop_signal = TARGET_SIGNAL_0;
1924 }
1925 }
1926 }
1927
1928 if (siggnal != TARGET_SIGNAL_DEFAULT)
1929 tp->stop_signal = siggnal;
1930 /* If this signal should not be seen by program,
1931 give it zero. Used for debugging signals. */
1932 else if (!signal_program[tp->stop_signal])
1933 tp->stop_signal = TARGET_SIGNAL_0;
1934
1935 annotate_starting ();
1936
1937 /* Make sure that output from GDB appears before output from the
1938 inferior. */
1939 gdb_flush (gdb_stdout);
1940
1941 /* Refresh prev_pc value just prior to resuming. This used to be
1942 done in stop_stepping, however, setting prev_pc there did not handle
1943 scenarios such as inferior function calls or returning from
1944 a function via the return command. In those cases, the prev_pc
1945 value was not set properly for subsequent commands. The prev_pc value
1946 is used to initialize the starting line number in the ecs. With an
1947 invalid value, the gdb next command ends up stopping at the position
1948 represented by the next line table entry past our start position.
1949 On platforms that generate one line table entry per line, this
1950 is not a problem. However, on the ia64, the compiler generates
1951 extraneous line table entries that do not increase the line number.
1952 When we issue the gdb next command on the ia64 after an inferior call
1953 or a return command, we often end up a few instructions forward, still
1954 within the original line we started.
1955
1956 An attempt was made to refresh the prev_pc at the same time the
1957 execution_control_state is initialized (for instance, just before
1958 waiting for an inferior event). But this approach did not work
1959 because of platforms that use ptrace, where the pc register cannot
1960 be read unless the inferior is stopped. At that point, we are not
1961 guaranteed the inferior is stopped and so the regcache_read_pc() call
1962 can fail. Setting the prev_pc value here ensures the value is updated
1963 correctly when the inferior is stopped. */
1964 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1965
1966 /* Fill in with reasonable starting values. */
1967 init_thread_stepping_state (tp);
1968
1969 /* Reset to normal state. */
1970 init_infwait_state ();
1971
1972 /* Resume inferior. */
1973 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1974
1975 /* Wait for it to stop (if not standalone)
1976 and in any case decode why it stopped, and act accordingly. */
1977 /* Do this only if we are not using the event loop, or if the target
1978 does not support asynchronous execution. */
1979 if (!target_can_async_p ())
1980 {
1981 wait_for_inferior (0);
1982 normal_stop ();
1983 }
1984 }
1985 \f
1986
1987 /* Start remote-debugging of a machine over a serial link. */
1988
1989 void
1990 start_remote (int from_tty)
1991 {
1992 struct inferior *inferior;
1993 init_wait_for_inferior ();
1994
1995 inferior = current_inferior ();
1996 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1997
1998 /* Always go on waiting for the target, regardless of the mode. */
1999 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2000 indicate to wait_for_inferior that a target should timeout if
2001 nothing is returned (instead of just blocking). Because of this,
2002 targets expecting an immediate response need to, internally, set
2003 things up so that the target_wait() is forced to eventually
2004 timeout. */
2005 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2006 differentiate to its caller what the state of the target is after
2007 the initial open has been performed. Here we're assuming that
2008 the target has stopped. It should be possible to eventually have
2009 target_open() return to the caller an indication that the target
2010 is currently running and GDB state should be set to the same as
2011 for an async run. */
2012 wait_for_inferior (0);
2013
2014 /* Now that the inferior has stopped, do any bookkeeping like
2015 loading shared libraries. We want to do this before normal_stop,
2016 so that the displayed frame is up to date. */
2017 post_create_inferior (&current_target, from_tty);
2018
2019 normal_stop ();
2020 }
2021
2022 /* Initialize static vars when a new inferior begins. */
2023
2024 void
2025 init_wait_for_inferior (void)
2026 {
2027 /* These are meaningless until the first time through wait_for_inferior. */
2028
2029 breakpoint_init_inferior (inf_starting);
2030
2031 clear_proceed_status ();
2032
2033 stepping_past_singlestep_breakpoint = 0;
2034 deferred_step_ptid = null_ptid;
2035
2036 target_last_wait_ptid = minus_one_ptid;
2037
2038 previous_inferior_ptid = null_ptid;
2039 init_infwait_state ();
2040
2041 /* Discard any skipped inlined frames. */
2042 clear_inline_frame_state (minus_one_ptid);
2043 }
2044
2045 \f
2046 /* This enum encodes possible reasons for doing a target_wait, so that
2047 wfi can call target_wait in one place. (Ultimately the call will be
2048 moved out of the infinite loop entirely.) */
2049
2050 enum infwait_states
2051 {
2052 infwait_normal_state,
2053 infwait_thread_hop_state,
2054 infwait_step_watch_state,
2055 infwait_nonstep_watch_state
2056 };
2057
2058 /* Why did the inferior stop? Used to print the appropriate messages
2059 to the interface from within handle_inferior_event(). */
2060 enum inferior_stop_reason
2061 {
2062 /* Step, next, nexti, stepi finished. */
2063 END_STEPPING_RANGE,
2064 /* Inferior terminated by signal. */
2065 SIGNAL_EXITED,
2066 /* Inferior exited. */
2067 EXITED,
2068 /* Inferior received signal, and user asked to be notified. */
2069 SIGNAL_RECEIVED,
2070 /* Reverse execution -- target ran out of history info. */
2071 NO_HISTORY
2072 };
2073
2074 /* The PTID we'll do a target_wait on.*/
2075 ptid_t waiton_ptid;
2076
2077 /* Current inferior wait state. */
2078 enum infwait_states infwait_state;
2079
2080 /* Data to be passed around while handling an event. This data is
2081 discarded between events. */
2082 struct execution_control_state
2083 {
2084 ptid_t ptid;
2085 /* The thread that got the event, if this was a thread event; NULL
2086 otherwise. */
2087 struct thread_info *event_thread;
2088
2089 struct target_waitstatus ws;
2090 int random_signal;
2091 CORE_ADDR stop_func_start;
2092 CORE_ADDR stop_func_end;
2093 char *stop_func_name;
2094 int new_thread_event;
2095 int wait_some_more;
2096 };
2097
2098 static void handle_inferior_event (struct execution_control_state *ecs);
2099
2100 static void handle_step_into_function (struct gdbarch *gdbarch,
2101 struct execution_control_state *ecs);
2102 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2103 struct execution_control_state *ecs);
2104 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2106 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2107 struct symtab_and_line sr_sal,
2108 struct frame_id sr_id);
2109 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2110
2111 static void stop_stepping (struct execution_control_state *ecs);
2112 static void prepare_to_wait (struct execution_control_state *ecs);
2113 static void keep_going (struct execution_control_state *ecs);
2114 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2115 int stop_info);
2116
2117 /* Callback for iterate over threads. If the thread is stopped, but
2118 the user/frontend doesn't know about that yet, go through
2119 normal_stop, as if the thread had just stopped now. ARG points at
2120 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2121 ptid_is_pid(PTID) is true, applies to all threads of the process
2122 pointed at by PTID. Otherwise, apply only to the thread pointed by
2123 PTID. */
2124
2125 static int
2126 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2127 {
2128 ptid_t ptid = * (ptid_t *) arg;
2129
2130 if ((ptid_equal (info->ptid, ptid)
2131 || ptid_equal (minus_one_ptid, ptid)
2132 || (ptid_is_pid (ptid)
2133 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2134 && is_running (info->ptid)
2135 && !is_executing (info->ptid))
2136 {
2137 struct cleanup *old_chain;
2138 struct execution_control_state ecss;
2139 struct execution_control_state *ecs = &ecss;
2140
2141 memset (ecs, 0, sizeof (*ecs));
2142
2143 old_chain = make_cleanup_restore_current_thread ();
2144
2145 switch_to_thread (info->ptid);
2146
2147 /* Go through handle_inferior_event/normal_stop, so we always
2148 have consistent output as if the stop event had been
2149 reported. */
2150 ecs->ptid = info->ptid;
2151 ecs->event_thread = find_thread_ptid (info->ptid);
2152 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2153 ecs->ws.value.sig = TARGET_SIGNAL_0;
2154
2155 handle_inferior_event (ecs);
2156
2157 if (!ecs->wait_some_more)
2158 {
2159 struct thread_info *tp;
2160
2161 normal_stop ();
2162
2163 /* Finish off the continuations. The continations
2164 themselves are responsible for realising the thread
2165 didn't finish what it was supposed to do. */
2166 tp = inferior_thread ();
2167 do_all_intermediate_continuations_thread (tp);
2168 do_all_continuations_thread (tp);
2169 }
2170
2171 do_cleanups (old_chain);
2172 }
2173
2174 return 0;
2175 }
2176
2177 /* This function is attached as a "thread_stop_requested" observer.
2178 Cleanup local state that assumed the PTID was to be resumed, and
2179 report the stop to the frontend. */
2180
2181 static void
2182 infrun_thread_stop_requested (ptid_t ptid)
2183 {
2184 struct displaced_step_inferior_state *displaced;
2185
2186 /* PTID was requested to stop. Remove it from the displaced
2187 stepping queue, so we don't try to resume it automatically. */
2188
2189 for (displaced = displaced_step_inferior_states;
2190 displaced;
2191 displaced = displaced->next)
2192 {
2193 struct displaced_step_request *it, **prev_next_p;
2194
2195 it = displaced->step_request_queue;
2196 prev_next_p = &displaced->step_request_queue;
2197 while (it)
2198 {
2199 if (ptid_match (it->ptid, ptid))
2200 {
2201 *prev_next_p = it->next;
2202 it->next = NULL;
2203 xfree (it);
2204 }
2205 else
2206 {
2207 prev_next_p = &it->next;
2208 }
2209
2210 it = *prev_next_p;
2211 }
2212 }
2213
2214 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2215 }
2216
2217 static void
2218 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2219 {
2220 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2221 nullify_last_target_wait_ptid ();
2222 }
2223
2224 /* Callback for iterate_over_threads. */
2225
2226 static int
2227 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2228 {
2229 if (is_exited (info->ptid))
2230 return 0;
2231
2232 delete_step_resume_breakpoint (info);
2233 return 0;
2234 }
2235
2236 /* In all-stop, delete the step resume breakpoint of any thread that
2237 had one. In non-stop, delete the step resume breakpoint of the
2238 thread that just stopped. */
2239
2240 static void
2241 delete_step_thread_step_resume_breakpoint (void)
2242 {
2243 if (!target_has_execution
2244 || ptid_equal (inferior_ptid, null_ptid))
2245 /* If the inferior has exited, we have already deleted the step
2246 resume breakpoints out of GDB's lists. */
2247 return;
2248
2249 if (non_stop)
2250 {
2251 /* If in non-stop mode, only delete the step-resume or
2252 longjmp-resume breakpoint of the thread that just stopped
2253 stepping. */
2254 struct thread_info *tp = inferior_thread ();
2255 delete_step_resume_breakpoint (tp);
2256 }
2257 else
2258 /* In all-stop mode, delete all step-resume and longjmp-resume
2259 breakpoints of any thread that had them. */
2260 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2261 }
2262
2263 /* A cleanup wrapper. */
2264
2265 static void
2266 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2267 {
2268 delete_step_thread_step_resume_breakpoint ();
2269 }
2270
2271 /* Pretty print the results of target_wait, for debugging purposes. */
2272
2273 static void
2274 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2275 const struct target_waitstatus *ws)
2276 {
2277 char *status_string = target_waitstatus_to_string (ws);
2278 struct ui_file *tmp_stream = mem_fileopen ();
2279 char *text;
2280
2281 /* The text is split over several lines because it was getting too long.
2282 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2283 output as a unit; we want only one timestamp printed if debug_timestamp
2284 is set. */
2285
2286 fprintf_unfiltered (tmp_stream,
2287 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2288 if (PIDGET (waiton_ptid) != -1)
2289 fprintf_unfiltered (tmp_stream,
2290 " [%s]", target_pid_to_str (waiton_ptid));
2291 fprintf_unfiltered (tmp_stream, ", status) =\n");
2292 fprintf_unfiltered (tmp_stream,
2293 "infrun: %d [%s],\n",
2294 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2295 fprintf_unfiltered (tmp_stream,
2296 "infrun: %s\n",
2297 status_string);
2298
2299 text = ui_file_xstrdup (tmp_stream, NULL);
2300
2301 /* This uses %s in part to handle %'s in the text, but also to avoid
2302 a gcc error: the format attribute requires a string literal. */
2303 fprintf_unfiltered (gdb_stdlog, "%s", text);
2304
2305 xfree (status_string);
2306 xfree (text);
2307 ui_file_delete (tmp_stream);
2308 }
2309
2310 /* Prepare and stabilize the inferior for detaching it. E.g.,
2311 detaching while a thread is displaced stepping is a recipe for
2312 crashing it, as nothing would readjust the PC out of the scratch
2313 pad. */
2314
2315 void
2316 prepare_for_detach (void)
2317 {
2318 struct inferior *inf = current_inferior ();
2319 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2320 struct cleanup *old_chain_1;
2321 struct displaced_step_inferior_state *displaced;
2322
2323 displaced = get_displaced_stepping_state (inf->pid);
2324
2325 /* Is any thread of this process displaced stepping? If not,
2326 there's nothing else to do. */
2327 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2328 return;
2329
2330 if (debug_infrun)
2331 fprintf_unfiltered (gdb_stdlog,
2332 "displaced-stepping in-process while detaching");
2333
2334 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2335 inf->detaching = 1;
2336
2337 while (!ptid_equal (displaced->step_ptid, null_ptid))
2338 {
2339 struct cleanup *old_chain_2;
2340 struct execution_control_state ecss;
2341 struct execution_control_state *ecs;
2342
2343 ecs = &ecss;
2344 memset (ecs, 0, sizeof (*ecs));
2345
2346 overlay_cache_invalid = 1;
2347
2348 /* We have to invalidate the registers BEFORE calling
2349 target_wait because they can be loaded from the target while
2350 in target_wait. This makes remote debugging a bit more
2351 efficient for those targets that provide critical registers
2352 as part of their normal status mechanism. */
2353
2354 registers_changed ();
2355
2356 if (deprecated_target_wait_hook)
2357 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2358 else
2359 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2360
2361 if (debug_infrun)
2362 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2363
2364 /* If an error happens while handling the event, propagate GDB's
2365 knowledge of the executing state to the frontend/user running
2366 state. */
2367 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2368
2369 /* In non-stop mode, each thread is handled individually.
2370 Switch early, so the global state is set correctly for this
2371 thread. */
2372 if (non_stop
2373 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2374 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2375 context_switch (ecs->ptid);
2376
2377 /* Now figure out what to do with the result of the result. */
2378 handle_inferior_event (ecs);
2379
2380 /* No error, don't finish the state yet. */
2381 discard_cleanups (old_chain_2);
2382
2383 /* Breakpoints and watchpoints are not installed on the target
2384 at this point, and signals are passed directly to the
2385 inferior, so this must mean the process is gone. */
2386 if (!ecs->wait_some_more)
2387 {
2388 discard_cleanups (old_chain_1);
2389 error (_("Program exited while detaching"));
2390 }
2391 }
2392
2393 discard_cleanups (old_chain_1);
2394 }
2395
2396 /* Wait for control to return from inferior to debugger.
2397
2398 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2399 as if they were SIGTRAP signals. This can be useful during
2400 the startup sequence on some targets such as HP/UX, where
2401 we receive an EXEC event instead of the expected SIGTRAP.
2402
2403 If inferior gets a signal, we may decide to start it up again
2404 instead of returning. That is why there is a loop in this function.
2405 When this function actually returns it means the inferior
2406 should be left stopped and GDB should read more commands. */
2407
2408 void
2409 wait_for_inferior (int treat_exec_as_sigtrap)
2410 {
2411 struct cleanup *old_cleanups;
2412 struct execution_control_state ecss;
2413 struct execution_control_state *ecs;
2414
2415 if (debug_infrun)
2416 fprintf_unfiltered
2417 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2418 treat_exec_as_sigtrap);
2419
2420 old_cleanups =
2421 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2422
2423 ecs = &ecss;
2424 memset (ecs, 0, sizeof (*ecs));
2425
2426 /* We'll update this if & when we switch to a new thread. */
2427 previous_inferior_ptid = inferior_ptid;
2428
2429 while (1)
2430 {
2431 struct cleanup *old_chain;
2432
2433 /* We have to invalidate the registers BEFORE calling target_wait
2434 because they can be loaded from the target while in target_wait.
2435 This makes remote debugging a bit more efficient for those
2436 targets that provide critical registers as part of their normal
2437 status mechanism. */
2438
2439 overlay_cache_invalid = 1;
2440 registers_changed ();
2441
2442 if (deprecated_target_wait_hook)
2443 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2444 else
2445 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2446
2447 if (debug_infrun)
2448 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2449
2450 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2451 {
2452 xfree (ecs->ws.value.execd_pathname);
2453 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2454 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2455 }
2456
2457 /* If an error happens while handling the event, propagate GDB's
2458 knowledge of the executing state to the frontend/user running
2459 state. */
2460 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2461
2462 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2463 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2464 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2465
2466 /* Now figure out what to do with the result of the result. */
2467 handle_inferior_event (ecs);
2468
2469 /* No error, don't finish the state yet. */
2470 discard_cleanups (old_chain);
2471
2472 if (!ecs->wait_some_more)
2473 break;
2474 }
2475
2476 do_cleanups (old_cleanups);
2477 }
2478
2479 /* Asynchronous version of wait_for_inferior. It is called by the
2480 event loop whenever a change of state is detected on the file
2481 descriptor corresponding to the target. It can be called more than
2482 once to complete a single execution command. In such cases we need
2483 to keep the state in a global variable ECSS. If it is the last time
2484 that this function is called for a single execution command, then
2485 report to the user that the inferior has stopped, and do the
2486 necessary cleanups. */
2487
2488 void
2489 fetch_inferior_event (void *client_data)
2490 {
2491 struct execution_control_state ecss;
2492 struct execution_control_state *ecs = &ecss;
2493 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2494 struct cleanup *ts_old_chain;
2495 int was_sync = sync_execution;
2496
2497 memset (ecs, 0, sizeof (*ecs));
2498
2499 /* We'll update this if & when we switch to a new thread. */
2500 previous_inferior_ptid = inferior_ptid;
2501
2502 if (non_stop)
2503 /* In non-stop mode, the user/frontend should not notice a thread
2504 switch due to internal events. Make sure we reverse to the
2505 user selected thread and frame after handling the event and
2506 running any breakpoint commands. */
2507 make_cleanup_restore_current_thread ();
2508
2509 /* We have to invalidate the registers BEFORE calling target_wait
2510 because they can be loaded from the target while in target_wait.
2511 This makes remote debugging a bit more efficient for those
2512 targets that provide critical registers as part of their normal
2513 status mechanism. */
2514
2515 overlay_cache_invalid = 1;
2516 registers_changed ();
2517
2518 if (deprecated_target_wait_hook)
2519 ecs->ptid =
2520 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2521 else
2522 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2523
2524 if (debug_infrun)
2525 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2526
2527 if (non_stop
2528 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2529 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2530 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2531 /* In non-stop mode, each thread is handled individually. Switch
2532 early, so the global state is set correctly for this
2533 thread. */
2534 context_switch (ecs->ptid);
2535
2536 /* If an error happens while handling the event, propagate GDB's
2537 knowledge of the executing state to the frontend/user running
2538 state. */
2539 if (!non_stop)
2540 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2541 else
2542 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2543
2544 /* Now figure out what to do with the result of the result. */
2545 handle_inferior_event (ecs);
2546
2547 if (!ecs->wait_some_more)
2548 {
2549 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2550
2551 delete_step_thread_step_resume_breakpoint ();
2552
2553 /* We may not find an inferior if this was a process exit. */
2554 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2555 normal_stop ();
2556
2557 if (target_has_execution
2558 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2559 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2560 && ecs->event_thread->step_multi
2561 && ecs->event_thread->stop_step)
2562 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2563 else
2564 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2565 }
2566
2567 /* No error, don't finish the thread states yet. */
2568 discard_cleanups (ts_old_chain);
2569
2570 /* Revert thread and frame. */
2571 do_cleanups (old_chain);
2572
2573 /* If the inferior was in sync execution mode, and now isn't,
2574 restore the prompt. */
2575 if (was_sync && !sync_execution)
2576 display_gdb_prompt (0);
2577 }
2578
2579 /* Record the frame and location we're currently stepping through. */
2580 void
2581 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2582 {
2583 struct thread_info *tp = inferior_thread ();
2584
2585 tp->step_frame_id = get_frame_id (frame);
2586 tp->step_stack_frame_id = get_stack_frame_id (frame);
2587
2588 tp->current_symtab = sal.symtab;
2589 tp->current_line = sal.line;
2590 }
2591
2592 /* Clear context switchable stepping state. */
2593
2594 void
2595 init_thread_stepping_state (struct thread_info *tss)
2596 {
2597 tss->stepping_over_breakpoint = 0;
2598 tss->step_after_step_resume_breakpoint = 0;
2599 tss->stepping_through_solib_after_catch = 0;
2600 tss->stepping_through_solib_catchpoints = NULL;
2601 }
2602
2603 /* Return the cached copy of the last pid/waitstatus returned by
2604 target_wait()/deprecated_target_wait_hook(). The data is actually
2605 cached by handle_inferior_event(), which gets called immediately
2606 after target_wait()/deprecated_target_wait_hook(). */
2607
2608 void
2609 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2610 {
2611 *ptidp = target_last_wait_ptid;
2612 *status = target_last_waitstatus;
2613 }
2614
2615 void
2616 nullify_last_target_wait_ptid (void)
2617 {
2618 target_last_wait_ptid = minus_one_ptid;
2619 }
2620
2621 /* Switch thread contexts. */
2622
2623 static void
2624 context_switch (ptid_t ptid)
2625 {
2626 if (debug_infrun)
2627 {
2628 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2629 target_pid_to_str (inferior_ptid));
2630 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2631 target_pid_to_str (ptid));
2632 }
2633
2634 switch_to_thread (ptid);
2635 }
2636
2637 static void
2638 adjust_pc_after_break (struct execution_control_state *ecs)
2639 {
2640 struct regcache *regcache;
2641 struct gdbarch *gdbarch;
2642 struct address_space *aspace;
2643 CORE_ADDR breakpoint_pc;
2644
2645 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2646 we aren't, just return.
2647
2648 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2649 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2650 implemented by software breakpoints should be handled through the normal
2651 breakpoint layer.
2652
2653 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2654 different signals (SIGILL or SIGEMT for instance), but it is less
2655 clear where the PC is pointing afterwards. It may not match
2656 gdbarch_decr_pc_after_break. I don't know any specific target that
2657 generates these signals at breakpoints (the code has been in GDB since at
2658 least 1992) so I can not guess how to handle them here.
2659
2660 In earlier versions of GDB, a target with
2661 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2662 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2663 target with both of these set in GDB history, and it seems unlikely to be
2664 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2665
2666 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2667 return;
2668
2669 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2670 return;
2671
2672 /* In reverse execution, when a breakpoint is hit, the instruction
2673 under it has already been de-executed. The reported PC always
2674 points at the breakpoint address, so adjusting it further would
2675 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2676 architecture:
2677
2678 B1 0x08000000 : INSN1
2679 B2 0x08000001 : INSN2
2680 0x08000002 : INSN3
2681 PC -> 0x08000003 : INSN4
2682
2683 Say you're stopped at 0x08000003 as above. Reverse continuing
2684 from that point should hit B2 as below. Reading the PC when the
2685 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2686 been de-executed already.
2687
2688 B1 0x08000000 : INSN1
2689 B2 PC -> 0x08000001 : INSN2
2690 0x08000002 : INSN3
2691 0x08000003 : INSN4
2692
2693 We can't apply the same logic as for forward execution, because
2694 we would wrongly adjust the PC to 0x08000000, since there's a
2695 breakpoint at PC - 1. We'd then report a hit on B1, although
2696 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2697 behaviour. */
2698 if (execution_direction == EXEC_REVERSE)
2699 return;
2700
2701 /* If this target does not decrement the PC after breakpoints, then
2702 we have nothing to do. */
2703 regcache = get_thread_regcache (ecs->ptid);
2704 gdbarch = get_regcache_arch (regcache);
2705 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2706 return;
2707
2708 aspace = get_regcache_aspace (regcache);
2709
2710 /* Find the location where (if we've hit a breakpoint) the
2711 breakpoint would be. */
2712 breakpoint_pc = regcache_read_pc (regcache)
2713 - gdbarch_decr_pc_after_break (gdbarch);
2714
2715 /* Check whether there actually is a software breakpoint inserted at
2716 that location.
2717
2718 If in non-stop mode, a race condition is possible where we've
2719 removed a breakpoint, but stop events for that breakpoint were
2720 already queued and arrive later. To suppress those spurious
2721 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2722 and retire them after a number of stop events are reported. */
2723 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2724 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2725 {
2726 struct cleanup *old_cleanups = NULL;
2727 if (RECORD_IS_USED)
2728 old_cleanups = record_gdb_operation_disable_set ();
2729
2730 /* When using hardware single-step, a SIGTRAP is reported for both
2731 a completed single-step and a software breakpoint. Need to
2732 differentiate between the two, as the latter needs adjusting
2733 but the former does not.
2734
2735 The SIGTRAP can be due to a completed hardware single-step only if
2736 - we didn't insert software single-step breakpoints
2737 - the thread to be examined is still the current thread
2738 - this thread is currently being stepped
2739
2740 If any of these events did not occur, we must have stopped due
2741 to hitting a software breakpoint, and have to back up to the
2742 breakpoint address.
2743
2744 As a special case, we could have hardware single-stepped a
2745 software breakpoint. In this case (prev_pc == breakpoint_pc),
2746 we also need to back up to the breakpoint address. */
2747
2748 if (singlestep_breakpoints_inserted_p
2749 || !ptid_equal (ecs->ptid, inferior_ptid)
2750 || !currently_stepping (ecs->event_thread)
2751 || ecs->event_thread->prev_pc == breakpoint_pc)
2752 regcache_write_pc (regcache, breakpoint_pc);
2753
2754 if (RECORD_IS_USED)
2755 do_cleanups (old_cleanups);
2756 }
2757 }
2758
2759 void
2760 init_infwait_state (void)
2761 {
2762 waiton_ptid = pid_to_ptid (-1);
2763 infwait_state = infwait_normal_state;
2764 }
2765
2766 void
2767 error_is_running (void)
2768 {
2769 error (_("\
2770 Cannot execute this command while the selected thread is running."));
2771 }
2772
2773 void
2774 ensure_not_running (void)
2775 {
2776 if (is_running (inferior_ptid))
2777 error_is_running ();
2778 }
2779
2780 static int
2781 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2782 {
2783 for (frame = get_prev_frame (frame);
2784 frame != NULL;
2785 frame = get_prev_frame (frame))
2786 {
2787 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2788 return 1;
2789 if (get_frame_type (frame) != INLINE_FRAME)
2790 break;
2791 }
2792
2793 return 0;
2794 }
2795
2796 /* Auxiliary function that handles syscall entry/return events.
2797 It returns 1 if the inferior should keep going (and GDB
2798 should ignore the event), or 0 if the event deserves to be
2799 processed. */
2800
2801 static int
2802 handle_syscall_event (struct execution_control_state *ecs)
2803 {
2804 struct regcache *regcache;
2805 struct gdbarch *gdbarch;
2806 int syscall_number;
2807
2808 if (!ptid_equal (ecs->ptid, inferior_ptid))
2809 context_switch (ecs->ptid);
2810
2811 regcache = get_thread_regcache (ecs->ptid);
2812 gdbarch = get_regcache_arch (regcache);
2813 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2814 stop_pc = regcache_read_pc (regcache);
2815
2816 target_last_waitstatus.value.syscall_number = syscall_number;
2817
2818 if (catch_syscall_enabled () > 0
2819 && catching_syscall_number (syscall_number) > 0)
2820 {
2821 if (debug_infrun)
2822 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2823 syscall_number);
2824
2825 ecs->event_thread->stop_bpstat
2826 = bpstat_stop_status (get_regcache_aspace (regcache),
2827 stop_pc, ecs->ptid);
2828 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2829
2830 if (!ecs->random_signal)
2831 {
2832 /* Catchpoint hit. */
2833 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2834 return 0;
2835 }
2836 }
2837
2838 /* If no catchpoint triggered for this, then keep going. */
2839 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2840 keep_going (ecs);
2841 return 1;
2842 }
2843
2844 /* Given an execution control state that has been freshly filled in
2845 by an event from the inferior, figure out what it means and take
2846 appropriate action. */
2847
2848 static void
2849 handle_inferior_event (struct execution_control_state *ecs)
2850 {
2851 struct frame_info *frame;
2852 struct gdbarch *gdbarch;
2853 int sw_single_step_trap_p = 0;
2854 int stopped_by_watchpoint;
2855 int stepped_after_stopped_by_watchpoint = 0;
2856 struct symtab_and_line stop_pc_sal;
2857 enum stop_kind stop_soon;
2858
2859 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2860 {
2861 /* We had an event in the inferior, but we are not interested in
2862 handling it at this level. The lower layers have already
2863 done what needs to be done, if anything.
2864
2865 One of the possible circumstances for this is when the
2866 inferior produces output for the console. The inferior has
2867 not stopped, and we are ignoring the event. Another possible
2868 circumstance is any event which the lower level knows will be
2869 reported multiple times without an intervening resume. */
2870 if (debug_infrun)
2871 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2872 prepare_to_wait (ecs);
2873 return;
2874 }
2875
2876 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2877 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2878 {
2879 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2880 gdb_assert (inf);
2881 stop_soon = inf->stop_soon;
2882 }
2883 else
2884 stop_soon = NO_STOP_QUIETLY;
2885
2886 /* Cache the last pid/waitstatus. */
2887 target_last_wait_ptid = ecs->ptid;
2888 target_last_waitstatus = ecs->ws;
2889
2890 /* Always clear state belonging to the previous time we stopped. */
2891 stop_stack_dummy = STOP_NONE;
2892
2893 /* If it's a new process, add it to the thread database */
2894
2895 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2896 && !ptid_equal (ecs->ptid, minus_one_ptid)
2897 && !in_thread_list (ecs->ptid));
2898
2899 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2900 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2901 add_thread (ecs->ptid);
2902
2903 ecs->event_thread = find_thread_ptid (ecs->ptid);
2904
2905 /* Dependent on valid ECS->EVENT_THREAD. */
2906 adjust_pc_after_break (ecs);
2907
2908 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2909 reinit_frame_cache ();
2910
2911 breakpoint_retire_moribund ();
2912
2913 /* First, distinguish signals caused by the debugger from signals
2914 that have to do with the program's own actions. Note that
2915 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2916 on the operating system version. Here we detect when a SIGILL or
2917 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2918 something similar for SIGSEGV, since a SIGSEGV will be generated
2919 when we're trying to execute a breakpoint instruction on a
2920 non-executable stack. This happens for call dummy breakpoints
2921 for architectures like SPARC that place call dummies on the
2922 stack. */
2923 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2924 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2925 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2926 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2927 {
2928 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2929
2930 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2931 regcache_read_pc (regcache)))
2932 {
2933 if (debug_infrun)
2934 fprintf_unfiltered (gdb_stdlog,
2935 "infrun: Treating signal as SIGTRAP\n");
2936 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2937 }
2938 }
2939
2940 /* Mark the non-executing threads accordingly. In all-stop, all
2941 threads of all processes are stopped when we get any event
2942 reported. In non-stop mode, only the event thread stops. If
2943 we're handling a process exit in non-stop mode, there's nothing
2944 to do, as threads of the dead process are gone, and threads of
2945 any other process were left running. */
2946 if (!non_stop)
2947 set_executing (minus_one_ptid, 0);
2948 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2949 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2950 set_executing (inferior_ptid, 0);
2951
2952 switch (infwait_state)
2953 {
2954 case infwait_thread_hop_state:
2955 if (debug_infrun)
2956 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2957 break;
2958
2959 case infwait_normal_state:
2960 if (debug_infrun)
2961 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2962 break;
2963
2964 case infwait_step_watch_state:
2965 if (debug_infrun)
2966 fprintf_unfiltered (gdb_stdlog,
2967 "infrun: infwait_step_watch_state\n");
2968
2969 stepped_after_stopped_by_watchpoint = 1;
2970 break;
2971
2972 case infwait_nonstep_watch_state:
2973 if (debug_infrun)
2974 fprintf_unfiltered (gdb_stdlog,
2975 "infrun: infwait_nonstep_watch_state\n");
2976 insert_breakpoints ();
2977
2978 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2979 handle things like signals arriving and other things happening
2980 in combination correctly? */
2981 stepped_after_stopped_by_watchpoint = 1;
2982 break;
2983
2984 default:
2985 internal_error (__FILE__, __LINE__, _("bad switch"));
2986 }
2987
2988 infwait_state = infwait_normal_state;
2989 waiton_ptid = pid_to_ptid (-1);
2990
2991 switch (ecs->ws.kind)
2992 {
2993 case TARGET_WAITKIND_LOADED:
2994 if (debug_infrun)
2995 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2996 /* Ignore gracefully during startup of the inferior, as it might
2997 be the shell which has just loaded some objects, otherwise
2998 add the symbols for the newly loaded objects. Also ignore at
2999 the beginning of an attach or remote session; we will query
3000 the full list of libraries once the connection is
3001 established. */
3002 if (stop_soon == NO_STOP_QUIETLY)
3003 {
3004 /* Check for any newly added shared libraries if we're
3005 supposed to be adding them automatically. Switch
3006 terminal for any messages produced by
3007 breakpoint_re_set. */
3008 target_terminal_ours_for_output ();
3009 /* NOTE: cagney/2003-11-25: Make certain that the target
3010 stack's section table is kept up-to-date. Architectures,
3011 (e.g., PPC64), use the section table to perform
3012 operations such as address => section name and hence
3013 require the table to contain all sections (including
3014 those found in shared libraries). */
3015 #ifdef SOLIB_ADD
3016 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3017 #else
3018 solib_add (NULL, 0, &current_target, auto_solib_add);
3019 #endif
3020 target_terminal_inferior ();
3021
3022 /* If requested, stop when the dynamic linker notifies
3023 gdb of events. This allows the user to get control
3024 and place breakpoints in initializer routines for
3025 dynamically loaded objects (among other things). */
3026 if (stop_on_solib_events)
3027 {
3028 /* Make sure we print "Stopped due to solib-event" in
3029 normal_stop. */
3030 stop_print_frame = 1;
3031
3032 stop_stepping (ecs);
3033 return;
3034 }
3035
3036 /* NOTE drow/2007-05-11: This might be a good place to check
3037 for "catch load". */
3038 }
3039
3040 /* If we are skipping through a shell, or through shared library
3041 loading that we aren't interested in, resume the program. If
3042 we're running the program normally, also resume. But stop if
3043 we're attaching or setting up a remote connection. */
3044 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3045 {
3046 /* Loading of shared libraries might have changed breakpoint
3047 addresses. Make sure new breakpoints are inserted. */
3048 if (stop_soon == NO_STOP_QUIETLY
3049 && !breakpoints_always_inserted_mode ())
3050 insert_breakpoints ();
3051 resume (0, TARGET_SIGNAL_0);
3052 prepare_to_wait (ecs);
3053 return;
3054 }
3055
3056 break;
3057
3058 case TARGET_WAITKIND_SPURIOUS:
3059 if (debug_infrun)
3060 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3061 resume (0, TARGET_SIGNAL_0);
3062 prepare_to_wait (ecs);
3063 return;
3064
3065 case TARGET_WAITKIND_EXITED:
3066 if (debug_infrun)
3067 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3068 inferior_ptid = ecs->ptid;
3069 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3070 set_current_program_space (current_inferior ()->pspace);
3071 handle_vfork_child_exec_or_exit (0);
3072 target_terminal_ours (); /* Must do this before mourn anyway */
3073 print_stop_reason (EXITED, ecs->ws.value.integer);
3074
3075 /* Record the exit code in the convenience variable $_exitcode, so
3076 that the user can inspect this again later. */
3077 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3078 (LONGEST) ecs->ws.value.integer);
3079 gdb_flush (gdb_stdout);
3080 target_mourn_inferior ();
3081 singlestep_breakpoints_inserted_p = 0;
3082 stop_print_frame = 0;
3083 stop_stepping (ecs);
3084 return;
3085
3086 case TARGET_WAITKIND_SIGNALLED:
3087 if (debug_infrun)
3088 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3089 inferior_ptid = ecs->ptid;
3090 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3091 set_current_program_space (current_inferior ()->pspace);
3092 handle_vfork_child_exec_or_exit (0);
3093 stop_print_frame = 0;
3094 target_terminal_ours (); /* Must do this before mourn anyway */
3095
3096 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3097 reach here unless the inferior is dead. However, for years
3098 target_kill() was called here, which hints that fatal signals aren't
3099 really fatal on some systems. If that's true, then some changes
3100 may be needed. */
3101 target_mourn_inferior ();
3102
3103 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3104 singlestep_breakpoints_inserted_p = 0;
3105 stop_stepping (ecs);
3106 return;
3107
3108 /* The following are the only cases in which we keep going;
3109 the above cases end in a continue or goto. */
3110 case TARGET_WAITKIND_FORKED:
3111 case TARGET_WAITKIND_VFORKED:
3112 if (debug_infrun)
3113 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3114
3115 if (!ptid_equal (ecs->ptid, inferior_ptid))
3116 {
3117 context_switch (ecs->ptid);
3118 reinit_frame_cache ();
3119 }
3120
3121 /* Immediately detach breakpoints from the child before there's
3122 any chance of letting the user delete breakpoints from the
3123 breakpoint lists. If we don't do this early, it's easy to
3124 leave left over traps in the child, vis: "break foo; catch
3125 fork; c; <fork>; del; c; <child calls foo>". We only follow
3126 the fork on the last `continue', and by that time the
3127 breakpoint at "foo" is long gone from the breakpoint table.
3128 If we vforked, then we don't need to unpatch here, since both
3129 parent and child are sharing the same memory pages; we'll
3130 need to unpatch at follow/detach time instead to be certain
3131 that new breakpoints added between catchpoint hit time and
3132 vfork follow are detached. */
3133 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3134 {
3135 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3136
3137 /* This won't actually modify the breakpoint list, but will
3138 physically remove the breakpoints from the child. */
3139 detach_breakpoints (child_pid);
3140 }
3141
3142 /* In case the event is caught by a catchpoint, remember that
3143 the event is to be followed at the next resume of the thread,
3144 and not immediately. */
3145 ecs->event_thread->pending_follow = ecs->ws;
3146
3147 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3148
3149 ecs->event_thread->stop_bpstat
3150 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3151 stop_pc, ecs->ptid);
3152
3153 /* Note that we're interested in knowing the bpstat actually
3154 causes a stop, not just if it may explain the signal.
3155 Software watchpoints, for example, always appear in the
3156 bpstat. */
3157 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3158
3159 /* If no catchpoint triggered for this, then keep going. */
3160 if (ecs->random_signal)
3161 {
3162 ptid_t parent;
3163 ptid_t child;
3164 int should_resume;
3165 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3166
3167 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3168
3169 should_resume = follow_fork ();
3170
3171 parent = ecs->ptid;
3172 child = ecs->ws.value.related_pid;
3173
3174 /* In non-stop mode, also resume the other branch. */
3175 if (non_stop && !detach_fork)
3176 {
3177 if (follow_child)
3178 switch_to_thread (parent);
3179 else
3180 switch_to_thread (child);
3181
3182 ecs->event_thread = inferior_thread ();
3183 ecs->ptid = inferior_ptid;
3184 keep_going (ecs);
3185 }
3186
3187 if (follow_child)
3188 switch_to_thread (child);
3189 else
3190 switch_to_thread (parent);
3191
3192 ecs->event_thread = inferior_thread ();
3193 ecs->ptid = inferior_ptid;
3194
3195 if (should_resume)
3196 keep_going (ecs);
3197 else
3198 stop_stepping (ecs);
3199 return;
3200 }
3201 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3202 goto process_event_stop_test;
3203
3204 case TARGET_WAITKIND_VFORK_DONE:
3205 /* Done with the shared memory region. Re-insert breakpoints in
3206 the parent, and keep going. */
3207
3208 if (debug_infrun)
3209 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3210
3211 if (!ptid_equal (ecs->ptid, inferior_ptid))
3212 context_switch (ecs->ptid);
3213
3214 current_inferior ()->waiting_for_vfork_done = 0;
3215 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3216 /* This also takes care of reinserting breakpoints in the
3217 previously locked inferior. */
3218 keep_going (ecs);
3219 return;
3220
3221 case TARGET_WAITKIND_EXECD:
3222 if (debug_infrun)
3223 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3224
3225 if (!ptid_equal (ecs->ptid, inferior_ptid))
3226 {
3227 context_switch (ecs->ptid);
3228 reinit_frame_cache ();
3229 }
3230
3231 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3232
3233 /* Do whatever is necessary to the parent branch of the vfork. */
3234 handle_vfork_child_exec_or_exit (1);
3235
3236 /* This causes the eventpoints and symbol table to be reset.
3237 Must do this now, before trying to determine whether to
3238 stop. */
3239 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3240
3241 ecs->event_thread->stop_bpstat
3242 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3243 stop_pc, ecs->ptid);
3244 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3245
3246 /* Note that this may be referenced from inside
3247 bpstat_stop_status above, through inferior_has_execd. */
3248 xfree (ecs->ws.value.execd_pathname);
3249 ecs->ws.value.execd_pathname = NULL;
3250
3251 /* If no catchpoint triggered for this, then keep going. */
3252 if (ecs->random_signal)
3253 {
3254 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3255 keep_going (ecs);
3256 return;
3257 }
3258 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3259 goto process_event_stop_test;
3260
3261 /* Be careful not to try to gather much state about a thread
3262 that's in a syscall. It's frequently a losing proposition. */
3263 case TARGET_WAITKIND_SYSCALL_ENTRY:
3264 if (debug_infrun)
3265 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3266 /* Getting the current syscall number */
3267 if (handle_syscall_event (ecs) != 0)
3268 return;
3269 goto process_event_stop_test;
3270
3271 /* Before examining the threads further, step this thread to
3272 get it entirely out of the syscall. (We get notice of the
3273 event when the thread is just on the verge of exiting a
3274 syscall. Stepping one instruction seems to get it back
3275 into user code.) */
3276 case TARGET_WAITKIND_SYSCALL_RETURN:
3277 if (debug_infrun)
3278 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3279 if (handle_syscall_event (ecs) != 0)
3280 return;
3281 goto process_event_stop_test;
3282
3283 case TARGET_WAITKIND_STOPPED:
3284 if (debug_infrun)
3285 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3286 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3287 break;
3288
3289 case TARGET_WAITKIND_NO_HISTORY:
3290 /* Reverse execution: target ran out of history info. */
3291 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3292 print_stop_reason (NO_HISTORY, 0);
3293 stop_stepping (ecs);
3294 return;
3295 }
3296
3297 if (ecs->new_thread_event)
3298 {
3299 if (non_stop)
3300 /* Non-stop assumes that the target handles adding new threads
3301 to the thread list. */
3302 internal_error (__FILE__, __LINE__, "\
3303 targets should add new threads to the thread list themselves in non-stop mode.");
3304
3305 /* We may want to consider not doing a resume here in order to
3306 give the user a chance to play with the new thread. It might
3307 be good to make that a user-settable option. */
3308
3309 /* At this point, all threads are stopped (happens automatically
3310 in either the OS or the native code). Therefore we need to
3311 continue all threads in order to make progress. */
3312
3313 if (!ptid_equal (ecs->ptid, inferior_ptid))
3314 context_switch (ecs->ptid);
3315 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3316 prepare_to_wait (ecs);
3317 return;
3318 }
3319
3320 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3321 {
3322 /* Do we need to clean up the state of a thread that has
3323 completed a displaced single-step? (Doing so usually affects
3324 the PC, so do it here, before we set stop_pc.) */
3325 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3326
3327 /* If we either finished a single-step or hit a breakpoint, but
3328 the user wanted this thread to be stopped, pretend we got a
3329 SIG0 (generic unsignaled stop). */
3330
3331 if (ecs->event_thread->stop_requested
3332 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3333 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3334 }
3335
3336 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3337
3338 if (debug_infrun)
3339 {
3340 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3341 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3342 struct cleanup *old_chain = save_inferior_ptid ();
3343
3344 inferior_ptid = ecs->ptid;
3345
3346 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3347 paddress (gdbarch, stop_pc));
3348 if (target_stopped_by_watchpoint ())
3349 {
3350 CORE_ADDR addr;
3351 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3352
3353 if (target_stopped_data_address (&current_target, &addr))
3354 fprintf_unfiltered (gdb_stdlog,
3355 "infrun: stopped data address = %s\n",
3356 paddress (gdbarch, addr));
3357 else
3358 fprintf_unfiltered (gdb_stdlog,
3359 "infrun: (no data address available)\n");
3360 }
3361
3362 do_cleanups (old_chain);
3363 }
3364
3365 if (stepping_past_singlestep_breakpoint)
3366 {
3367 gdb_assert (singlestep_breakpoints_inserted_p);
3368 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3369 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3370
3371 stepping_past_singlestep_breakpoint = 0;
3372
3373 /* We've either finished single-stepping past the single-step
3374 breakpoint, or stopped for some other reason. It would be nice if
3375 we could tell, but we can't reliably. */
3376 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3377 {
3378 if (debug_infrun)
3379 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3380 /* Pull the single step breakpoints out of the target. */
3381 remove_single_step_breakpoints ();
3382 singlestep_breakpoints_inserted_p = 0;
3383
3384 ecs->random_signal = 0;
3385 ecs->event_thread->trap_expected = 0;
3386
3387 context_switch (saved_singlestep_ptid);
3388 if (deprecated_context_hook)
3389 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3390
3391 resume (1, TARGET_SIGNAL_0);
3392 prepare_to_wait (ecs);
3393 return;
3394 }
3395 }
3396
3397 if (!ptid_equal (deferred_step_ptid, null_ptid))
3398 {
3399 /* In non-stop mode, there's never a deferred_step_ptid set. */
3400 gdb_assert (!non_stop);
3401
3402 /* If we stopped for some other reason than single-stepping, ignore
3403 the fact that we were supposed to switch back. */
3404 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3405 {
3406 if (debug_infrun)
3407 fprintf_unfiltered (gdb_stdlog,
3408 "infrun: handling deferred step\n");
3409
3410 /* Pull the single step breakpoints out of the target. */
3411 if (singlestep_breakpoints_inserted_p)
3412 {
3413 remove_single_step_breakpoints ();
3414 singlestep_breakpoints_inserted_p = 0;
3415 }
3416
3417 /* Note: We do not call context_switch at this point, as the
3418 context is already set up for stepping the original thread. */
3419 switch_to_thread (deferred_step_ptid);
3420 deferred_step_ptid = null_ptid;
3421 /* Suppress spurious "Switching to ..." message. */
3422 previous_inferior_ptid = inferior_ptid;
3423
3424 resume (1, TARGET_SIGNAL_0);
3425 prepare_to_wait (ecs);
3426 return;
3427 }
3428
3429 deferred_step_ptid = null_ptid;
3430 }
3431
3432 /* See if a thread hit a thread-specific breakpoint that was meant for
3433 another thread. If so, then step that thread past the breakpoint,
3434 and continue it. */
3435
3436 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3437 {
3438 int thread_hop_needed = 0;
3439 struct address_space *aspace =
3440 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3441
3442 /* Check if a regular breakpoint has been hit before checking
3443 for a potential single step breakpoint. Otherwise, GDB will
3444 not see this breakpoint hit when stepping onto breakpoints. */
3445 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3446 {
3447 ecs->random_signal = 0;
3448 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3449 thread_hop_needed = 1;
3450 }
3451 else if (singlestep_breakpoints_inserted_p)
3452 {
3453 /* We have not context switched yet, so this should be true
3454 no matter which thread hit the singlestep breakpoint. */
3455 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3456 if (debug_infrun)
3457 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3458 "trap for %s\n",
3459 target_pid_to_str (ecs->ptid));
3460
3461 ecs->random_signal = 0;
3462 /* The call to in_thread_list is necessary because PTIDs sometimes
3463 change when we go from single-threaded to multi-threaded. If
3464 the singlestep_ptid is still in the list, assume that it is
3465 really different from ecs->ptid. */
3466 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3467 && in_thread_list (singlestep_ptid))
3468 {
3469 /* If the PC of the thread we were trying to single-step
3470 has changed, discard this event (which we were going
3471 to ignore anyway), and pretend we saw that thread
3472 trap. This prevents us continuously moving the
3473 single-step breakpoint forward, one instruction at a
3474 time. If the PC has changed, then the thread we were
3475 trying to single-step has trapped or been signalled,
3476 but the event has not been reported to GDB yet.
3477
3478 There might be some cases where this loses signal
3479 information, if a signal has arrived at exactly the
3480 same time that the PC changed, but this is the best
3481 we can do with the information available. Perhaps we
3482 should arrange to report all events for all threads
3483 when they stop, or to re-poll the remote looking for
3484 this particular thread (i.e. temporarily enable
3485 schedlock). */
3486
3487 CORE_ADDR new_singlestep_pc
3488 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3489
3490 if (new_singlestep_pc != singlestep_pc)
3491 {
3492 enum target_signal stop_signal;
3493
3494 if (debug_infrun)
3495 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3496 " but expected thread advanced also\n");
3497
3498 /* The current context still belongs to
3499 singlestep_ptid. Don't swap here, since that's
3500 the context we want to use. Just fudge our
3501 state and continue. */
3502 stop_signal = ecs->event_thread->stop_signal;
3503 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3504 ecs->ptid = singlestep_ptid;
3505 ecs->event_thread = find_thread_ptid (ecs->ptid);
3506 ecs->event_thread->stop_signal = stop_signal;
3507 stop_pc = new_singlestep_pc;
3508 }
3509 else
3510 {
3511 if (debug_infrun)
3512 fprintf_unfiltered (gdb_stdlog,
3513 "infrun: unexpected thread\n");
3514
3515 thread_hop_needed = 1;
3516 stepping_past_singlestep_breakpoint = 1;
3517 saved_singlestep_ptid = singlestep_ptid;
3518 }
3519 }
3520 }
3521
3522 if (thread_hop_needed)
3523 {
3524 struct regcache *thread_regcache;
3525 int remove_status = 0;
3526
3527 if (debug_infrun)
3528 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3529
3530 /* Switch context before touching inferior memory, the
3531 previous thread may have exited. */
3532 if (!ptid_equal (inferior_ptid, ecs->ptid))
3533 context_switch (ecs->ptid);
3534
3535 /* Saw a breakpoint, but it was hit by the wrong thread.
3536 Just continue. */
3537
3538 if (singlestep_breakpoints_inserted_p)
3539 {
3540 /* Pull the single step breakpoints out of the target. */
3541 remove_single_step_breakpoints ();
3542 singlestep_breakpoints_inserted_p = 0;
3543 }
3544
3545 /* If the arch can displace step, don't remove the
3546 breakpoints. */
3547 thread_regcache = get_thread_regcache (ecs->ptid);
3548 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3549 remove_status = remove_breakpoints ();
3550
3551 /* Did we fail to remove breakpoints? If so, try
3552 to set the PC past the bp. (There's at least
3553 one situation in which we can fail to remove
3554 the bp's: On HP-UX's that use ttrace, we can't
3555 change the address space of a vforking child
3556 process until the child exits (well, okay, not
3557 then either :-) or execs. */
3558 if (remove_status != 0)
3559 error (_("Cannot step over breakpoint hit in wrong thread"));
3560 else
3561 { /* Single step */
3562 if (!non_stop)
3563 {
3564 /* Only need to require the next event from this
3565 thread in all-stop mode. */
3566 waiton_ptid = ecs->ptid;
3567 infwait_state = infwait_thread_hop_state;
3568 }
3569
3570 ecs->event_thread->stepping_over_breakpoint = 1;
3571 keep_going (ecs);
3572 return;
3573 }
3574 }
3575 else if (singlestep_breakpoints_inserted_p)
3576 {
3577 sw_single_step_trap_p = 1;
3578 ecs->random_signal = 0;
3579 }
3580 }
3581 else
3582 ecs->random_signal = 1;
3583
3584 /* See if something interesting happened to the non-current thread. If
3585 so, then switch to that thread. */
3586 if (!ptid_equal (ecs->ptid, inferior_ptid))
3587 {
3588 if (debug_infrun)
3589 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3590
3591 context_switch (ecs->ptid);
3592
3593 if (deprecated_context_hook)
3594 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3595 }
3596
3597 /* At this point, get hold of the now-current thread's frame. */
3598 frame = get_current_frame ();
3599 gdbarch = get_frame_arch (frame);
3600
3601 if (singlestep_breakpoints_inserted_p)
3602 {
3603 /* Pull the single step breakpoints out of the target. */
3604 remove_single_step_breakpoints ();
3605 singlestep_breakpoints_inserted_p = 0;
3606 }
3607
3608 if (stepped_after_stopped_by_watchpoint)
3609 stopped_by_watchpoint = 0;
3610 else
3611 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3612
3613 /* If necessary, step over this watchpoint. We'll be back to display
3614 it in a moment. */
3615 if (stopped_by_watchpoint
3616 && (target_have_steppable_watchpoint
3617 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3618 {
3619 /* At this point, we are stopped at an instruction which has
3620 attempted to write to a piece of memory under control of
3621 a watchpoint. The instruction hasn't actually executed
3622 yet. If we were to evaluate the watchpoint expression
3623 now, we would get the old value, and therefore no change
3624 would seem to have occurred.
3625
3626 In order to make watchpoints work `right', we really need
3627 to complete the memory write, and then evaluate the
3628 watchpoint expression. We do this by single-stepping the
3629 target.
3630
3631 It may not be necessary to disable the watchpoint to stop over
3632 it. For example, the PA can (with some kernel cooperation)
3633 single step over a watchpoint without disabling the watchpoint.
3634
3635 It is far more common to need to disable a watchpoint to step
3636 the inferior over it. If we have non-steppable watchpoints,
3637 we must disable the current watchpoint; it's simplest to
3638 disable all watchpoints and breakpoints. */
3639 int hw_step = 1;
3640
3641 if (!target_have_steppable_watchpoint)
3642 remove_breakpoints ();
3643 /* Single step */
3644 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3645 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3646 waiton_ptid = ecs->ptid;
3647 if (target_have_steppable_watchpoint)
3648 infwait_state = infwait_step_watch_state;
3649 else
3650 infwait_state = infwait_nonstep_watch_state;
3651 prepare_to_wait (ecs);
3652 return;
3653 }
3654
3655 ecs->stop_func_start = 0;
3656 ecs->stop_func_end = 0;
3657 ecs->stop_func_name = 0;
3658 /* Don't care about return value; stop_func_start and stop_func_name
3659 will both be 0 if it doesn't work. */
3660 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3661 &ecs->stop_func_start, &ecs->stop_func_end);
3662 ecs->stop_func_start
3663 += gdbarch_deprecated_function_start_offset (gdbarch);
3664 ecs->event_thread->stepping_over_breakpoint = 0;
3665 bpstat_clear (&ecs->event_thread->stop_bpstat);
3666 ecs->event_thread->stop_step = 0;
3667 stop_print_frame = 1;
3668 ecs->random_signal = 0;
3669 stopped_by_random_signal = 0;
3670
3671 /* Hide inlined functions starting here, unless we just performed stepi or
3672 nexti. After stepi and nexti, always show the innermost frame (not any
3673 inline function call sites). */
3674 if (ecs->event_thread->step_range_end != 1)
3675 skip_inline_frames (ecs->ptid);
3676
3677 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3678 && ecs->event_thread->trap_expected
3679 && gdbarch_single_step_through_delay_p (gdbarch)
3680 && currently_stepping (ecs->event_thread))
3681 {
3682 /* We're trying to step off a breakpoint. Turns out that we're
3683 also on an instruction that needs to be stepped multiple
3684 times before it's been fully executing. E.g., architectures
3685 with a delay slot. It needs to be stepped twice, once for
3686 the instruction and once for the delay slot. */
3687 int step_through_delay
3688 = gdbarch_single_step_through_delay (gdbarch, frame);
3689 if (debug_infrun && step_through_delay)
3690 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3691 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3692 {
3693 /* The user issued a continue when stopped at a breakpoint.
3694 Set up for another trap and get out of here. */
3695 ecs->event_thread->stepping_over_breakpoint = 1;
3696 keep_going (ecs);
3697 return;
3698 }
3699 else if (step_through_delay)
3700 {
3701 /* The user issued a step when stopped at a breakpoint.
3702 Maybe we should stop, maybe we should not - the delay
3703 slot *might* correspond to a line of source. In any
3704 case, don't decide that here, just set
3705 ecs->stepping_over_breakpoint, making sure we
3706 single-step again before breakpoints are re-inserted. */
3707 ecs->event_thread->stepping_over_breakpoint = 1;
3708 }
3709 }
3710
3711 /* Look at the cause of the stop, and decide what to do.
3712 The alternatives are:
3713 1) stop_stepping and return; to really stop and return to the debugger,
3714 2) keep_going and return to start up again
3715 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3716 3) set ecs->random_signal to 1, and the decision between 1 and 2
3717 will be made according to the signal handling tables. */
3718
3719 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3720 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3721 || stop_soon == STOP_QUIETLY_REMOTE)
3722 {
3723 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3724 {
3725 if (debug_infrun)
3726 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3727 stop_print_frame = 0;
3728 stop_stepping (ecs);
3729 return;
3730 }
3731
3732 /* This is originated from start_remote(), start_inferior() and
3733 shared libraries hook functions. */
3734 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3735 {
3736 if (debug_infrun)
3737 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3738 stop_stepping (ecs);
3739 return;
3740 }
3741
3742 /* This originates from attach_command(). We need to overwrite
3743 the stop_signal here, because some kernels don't ignore a
3744 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3745 See more comments in inferior.h. On the other hand, if we
3746 get a non-SIGSTOP, report it to the user - assume the backend
3747 will handle the SIGSTOP if it should show up later.
3748
3749 Also consider that the attach is complete when we see a
3750 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3751 target extended-remote report it instead of a SIGSTOP
3752 (e.g. gdbserver). We already rely on SIGTRAP being our
3753 signal, so this is no exception.
3754
3755 Also consider that the attach is complete when we see a
3756 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3757 the target to stop all threads of the inferior, in case the
3758 low level attach operation doesn't stop them implicitly. If
3759 they weren't stopped implicitly, then the stub will report a
3760 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3761 other than GDB's request. */
3762 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3763 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3764 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3765 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3766 {
3767 stop_stepping (ecs);
3768 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3769 return;
3770 }
3771
3772 /* See if there is a breakpoint at the current PC. */
3773 ecs->event_thread->stop_bpstat
3774 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3775 stop_pc, ecs->ptid);
3776
3777 /* Following in case break condition called a
3778 function. */
3779 stop_print_frame = 1;
3780
3781 /* This is where we handle "moribund" watchpoints. Unlike
3782 software breakpoints traps, hardware watchpoint traps are
3783 always distinguishable from random traps. If no high-level
3784 watchpoint is associated with the reported stop data address
3785 anymore, then the bpstat does not explain the signal ---
3786 simply make sure to ignore it if `stopped_by_watchpoint' is
3787 set. */
3788
3789 if (debug_infrun
3790 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3791 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3792 && stopped_by_watchpoint)
3793 fprintf_unfiltered (gdb_stdlog, "\
3794 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3795
3796 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3797 at one stage in the past included checks for an inferior
3798 function call's call dummy's return breakpoint. The original
3799 comment, that went with the test, read:
3800
3801 ``End of a stack dummy. Some systems (e.g. Sony news) give
3802 another signal besides SIGTRAP, so check here as well as
3803 above.''
3804
3805 If someone ever tries to get call dummys on a
3806 non-executable stack to work (where the target would stop
3807 with something like a SIGSEGV), then those tests might need
3808 to be re-instated. Given, however, that the tests were only
3809 enabled when momentary breakpoints were not being used, I
3810 suspect that it won't be the case.
3811
3812 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3813 be necessary for call dummies on a non-executable stack on
3814 SPARC. */
3815
3816 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3817 ecs->random_signal
3818 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3819 || stopped_by_watchpoint
3820 || ecs->event_thread->trap_expected
3821 || (ecs->event_thread->step_range_end
3822 && ecs->event_thread->step_resume_breakpoint == NULL));
3823 else
3824 {
3825 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3826 if (!ecs->random_signal)
3827 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3828 }
3829 }
3830
3831 /* When we reach this point, we've pretty much decided
3832 that the reason for stopping must've been a random
3833 (unexpected) signal. */
3834
3835 else
3836 ecs->random_signal = 1;
3837
3838 process_event_stop_test:
3839
3840 /* Re-fetch current thread's frame in case we did a
3841 "goto process_event_stop_test" above. */
3842 frame = get_current_frame ();
3843 gdbarch = get_frame_arch (frame);
3844
3845 /* For the program's own signals, act according to
3846 the signal handling tables. */
3847
3848 if (ecs->random_signal)
3849 {
3850 /* Signal not for debugging purposes. */
3851 int printed = 0;
3852 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3853
3854 if (debug_infrun)
3855 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3856 ecs->event_thread->stop_signal);
3857
3858 stopped_by_random_signal = 1;
3859
3860 if (signal_print[ecs->event_thread->stop_signal])
3861 {
3862 printed = 1;
3863 target_terminal_ours_for_output ();
3864 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3865 }
3866 /* Always stop on signals if we're either just gaining control
3867 of the program, or the user explicitly requested this thread
3868 to remain stopped. */
3869 if (stop_soon != NO_STOP_QUIETLY
3870 || ecs->event_thread->stop_requested
3871 || (!inf->detaching
3872 && signal_stop_state (ecs->event_thread->stop_signal)))
3873 {
3874 stop_stepping (ecs);
3875 return;
3876 }
3877 /* If not going to stop, give terminal back
3878 if we took it away. */
3879 else if (printed)
3880 target_terminal_inferior ();
3881
3882 /* Clear the signal if it should not be passed. */
3883 if (signal_program[ecs->event_thread->stop_signal] == 0)
3884 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3885
3886 if (ecs->event_thread->prev_pc == stop_pc
3887 && ecs->event_thread->trap_expected
3888 && ecs->event_thread->step_resume_breakpoint == NULL)
3889 {
3890 /* We were just starting a new sequence, attempting to
3891 single-step off of a breakpoint and expecting a SIGTRAP.
3892 Instead this signal arrives. This signal will take us out
3893 of the stepping range so GDB needs to remember to, when
3894 the signal handler returns, resume stepping off that
3895 breakpoint. */
3896 /* To simplify things, "continue" is forced to use the same
3897 code paths as single-step - set a breakpoint at the
3898 signal return address and then, once hit, step off that
3899 breakpoint. */
3900 if (debug_infrun)
3901 fprintf_unfiltered (gdb_stdlog,
3902 "infrun: signal arrived while stepping over "
3903 "breakpoint\n");
3904
3905 insert_step_resume_breakpoint_at_frame (frame);
3906 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3907 keep_going (ecs);
3908 return;
3909 }
3910
3911 if (ecs->event_thread->step_range_end != 0
3912 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3913 && (ecs->event_thread->step_range_start <= stop_pc
3914 && stop_pc < ecs->event_thread->step_range_end)
3915 && frame_id_eq (get_stack_frame_id (frame),
3916 ecs->event_thread->step_stack_frame_id)
3917 && ecs->event_thread->step_resume_breakpoint == NULL)
3918 {
3919 /* The inferior is about to take a signal that will take it
3920 out of the single step range. Set a breakpoint at the
3921 current PC (which is presumably where the signal handler
3922 will eventually return) and then allow the inferior to
3923 run free.
3924
3925 Note that this is only needed for a signal delivered
3926 while in the single-step range. Nested signals aren't a
3927 problem as they eventually all return. */
3928 if (debug_infrun)
3929 fprintf_unfiltered (gdb_stdlog,
3930 "infrun: signal may take us out of "
3931 "single-step range\n");
3932
3933 insert_step_resume_breakpoint_at_frame (frame);
3934 keep_going (ecs);
3935 return;
3936 }
3937
3938 /* Note: step_resume_breakpoint may be non-NULL. This occures
3939 when either there's a nested signal, or when there's a
3940 pending signal enabled just as the signal handler returns
3941 (leaving the inferior at the step-resume-breakpoint without
3942 actually executing it). Either way continue until the
3943 breakpoint is really hit. */
3944 keep_going (ecs);
3945 return;
3946 }
3947
3948 /* Handle cases caused by hitting a breakpoint. */
3949 {
3950 CORE_ADDR jmp_buf_pc;
3951 struct bpstat_what what;
3952
3953 what = bpstat_what (ecs->event_thread->stop_bpstat);
3954
3955 if (what.call_dummy)
3956 {
3957 stop_stack_dummy = what.call_dummy;
3958 }
3959
3960 switch (what.main_action)
3961 {
3962 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3963 /* If we hit the breakpoint at longjmp while stepping, we
3964 install a momentary breakpoint at the target of the
3965 jmp_buf. */
3966
3967 if (debug_infrun)
3968 fprintf_unfiltered (gdb_stdlog,
3969 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3970
3971 ecs->event_thread->stepping_over_breakpoint = 1;
3972
3973 if (!gdbarch_get_longjmp_target_p (gdbarch)
3974 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3975 {
3976 if (debug_infrun)
3977 fprintf_unfiltered (gdb_stdlog, "\
3978 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3979 keep_going (ecs);
3980 return;
3981 }
3982
3983 /* We're going to replace the current step-resume breakpoint
3984 with a longjmp-resume breakpoint. */
3985 delete_step_resume_breakpoint (ecs->event_thread);
3986
3987 /* Insert a breakpoint at resume address. */
3988 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3989
3990 keep_going (ecs);
3991 return;
3992
3993 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3994 if (debug_infrun)
3995 fprintf_unfiltered (gdb_stdlog,
3996 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3997
3998 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3999 delete_step_resume_breakpoint (ecs->event_thread);
4000
4001 ecs->event_thread->stop_step = 1;
4002 print_stop_reason (END_STEPPING_RANGE, 0);
4003 stop_stepping (ecs);
4004 return;
4005
4006 case BPSTAT_WHAT_SINGLE:
4007 if (debug_infrun)
4008 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4009 ecs->event_thread->stepping_over_breakpoint = 1;
4010 /* Still need to check other stuff, at least the case
4011 where we are stepping and step out of the right range. */
4012 break;
4013
4014 case BPSTAT_WHAT_STOP_NOISY:
4015 if (debug_infrun)
4016 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4017 stop_print_frame = 1;
4018
4019 /* We are about to nuke the step_resume_breakpointt via the
4020 cleanup chain, so no need to worry about it here. */
4021
4022 stop_stepping (ecs);
4023 return;
4024
4025 case BPSTAT_WHAT_STOP_SILENT:
4026 if (debug_infrun)
4027 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4028 stop_print_frame = 0;
4029
4030 /* We are about to nuke the step_resume_breakpoin via the
4031 cleanup chain, so no need to worry about it here. */
4032
4033 stop_stepping (ecs);
4034 return;
4035
4036 case BPSTAT_WHAT_STEP_RESUME:
4037 if (debug_infrun)
4038 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4039
4040 delete_step_resume_breakpoint (ecs->event_thread);
4041 if (ecs->event_thread->step_after_step_resume_breakpoint)
4042 {
4043 /* Back when the step-resume breakpoint was inserted, we
4044 were trying to single-step off a breakpoint. Go back
4045 to doing that. */
4046 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4047 ecs->event_thread->stepping_over_breakpoint = 1;
4048 keep_going (ecs);
4049 return;
4050 }
4051 if (stop_pc == ecs->stop_func_start
4052 && execution_direction == EXEC_REVERSE)
4053 {
4054 /* We are stepping over a function call in reverse, and
4055 just hit the step-resume breakpoint at the start
4056 address of the function. Go back to single-stepping,
4057 which should take us back to the function call. */
4058 ecs->event_thread->stepping_over_breakpoint = 1;
4059 keep_going (ecs);
4060 return;
4061 }
4062 break;
4063
4064 case BPSTAT_WHAT_CHECK_SHLIBS:
4065 {
4066 if (debug_infrun)
4067 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4068
4069 /* Check for any newly added shared libraries if we're
4070 supposed to be adding them automatically. Switch
4071 terminal for any messages produced by
4072 breakpoint_re_set. */
4073 target_terminal_ours_for_output ();
4074 /* NOTE: cagney/2003-11-25: Make certain that the target
4075 stack's section table is kept up-to-date. Architectures,
4076 (e.g., PPC64), use the section table to perform
4077 operations such as address => section name and hence
4078 require the table to contain all sections (including
4079 those found in shared libraries). */
4080 #ifdef SOLIB_ADD
4081 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4082 #else
4083 solib_add (NULL, 0, &current_target, auto_solib_add);
4084 #endif
4085 target_terminal_inferior ();
4086
4087 /* If requested, stop when the dynamic linker notifies
4088 gdb of events. This allows the user to get control
4089 and place breakpoints in initializer routines for
4090 dynamically loaded objects (among other things). */
4091 if (stop_on_solib_events || stop_stack_dummy)
4092 {
4093 stop_stepping (ecs);
4094 return;
4095 }
4096 else
4097 {
4098 /* We want to step over this breakpoint, then keep going. */
4099 ecs->event_thread->stepping_over_breakpoint = 1;
4100 break;
4101 }
4102 }
4103 break;
4104
4105 case BPSTAT_WHAT_CHECK_JIT:
4106 if (debug_infrun)
4107 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4108
4109 /* Switch terminal for any messages produced by breakpoint_re_set. */
4110 target_terminal_ours_for_output ();
4111
4112 jit_event_handler (gdbarch);
4113
4114 target_terminal_inferior ();
4115
4116 /* We want to step over this breakpoint, then keep going. */
4117 ecs->event_thread->stepping_over_breakpoint = 1;
4118
4119 break;
4120
4121 case BPSTAT_WHAT_LAST:
4122 /* Not a real code, but listed here to shut up gcc -Wall. */
4123
4124 case BPSTAT_WHAT_KEEP_CHECKING:
4125 break;
4126 }
4127 }
4128
4129 /* We come here if we hit a breakpoint but should not
4130 stop for it. Possibly we also were stepping
4131 and should stop for that. So fall through and
4132 test for stepping. But, if not stepping,
4133 do not stop. */
4134
4135 /* In all-stop mode, if we're currently stepping but have stopped in
4136 some other thread, we need to switch back to the stepped thread. */
4137 if (!non_stop)
4138 {
4139 struct thread_info *tp;
4140 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4141 ecs->event_thread);
4142 if (tp)
4143 {
4144 /* However, if the current thread is blocked on some internal
4145 breakpoint, and we simply need to step over that breakpoint
4146 to get it going again, do that first. */
4147 if ((ecs->event_thread->trap_expected
4148 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4149 || ecs->event_thread->stepping_over_breakpoint)
4150 {
4151 keep_going (ecs);
4152 return;
4153 }
4154
4155 /* If the stepping thread exited, then don't try to switch
4156 back and resume it, which could fail in several different
4157 ways depending on the target. Instead, just keep going.
4158
4159 We can find a stepping dead thread in the thread list in
4160 two cases:
4161
4162 - The target supports thread exit events, and when the
4163 target tries to delete the thread from the thread list,
4164 inferior_ptid pointed at the exiting thread. In such
4165 case, calling delete_thread does not really remove the
4166 thread from the list; instead, the thread is left listed,
4167 with 'exited' state.
4168
4169 - The target's debug interface does not support thread
4170 exit events, and so we have no idea whatsoever if the
4171 previously stepping thread is still alive. For that
4172 reason, we need to synchronously query the target
4173 now. */
4174 if (is_exited (tp->ptid)
4175 || !target_thread_alive (tp->ptid))
4176 {
4177 if (debug_infrun)
4178 fprintf_unfiltered (gdb_stdlog, "\
4179 infrun: not switching back to stepped thread, it has vanished\n");
4180
4181 delete_thread (tp->ptid);
4182 keep_going (ecs);
4183 return;
4184 }
4185
4186 /* Otherwise, we no longer expect a trap in the current thread.
4187 Clear the trap_expected flag before switching back -- this is
4188 what keep_going would do as well, if we called it. */
4189 ecs->event_thread->trap_expected = 0;
4190
4191 if (debug_infrun)
4192 fprintf_unfiltered (gdb_stdlog,
4193 "infrun: switching back to stepped thread\n");
4194
4195 ecs->event_thread = tp;
4196 ecs->ptid = tp->ptid;
4197 context_switch (ecs->ptid);
4198 keep_going (ecs);
4199 return;
4200 }
4201 }
4202
4203 /* Are we stepping to get the inferior out of the dynamic linker's
4204 hook (and possibly the dld itself) after catching a shlib
4205 event? */
4206 if (ecs->event_thread->stepping_through_solib_after_catch)
4207 {
4208 #if defined(SOLIB_ADD)
4209 /* Have we reached our destination? If not, keep going. */
4210 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4211 {
4212 if (debug_infrun)
4213 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4214 ecs->event_thread->stepping_over_breakpoint = 1;
4215 keep_going (ecs);
4216 return;
4217 }
4218 #endif
4219 if (debug_infrun)
4220 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4221 /* Else, stop and report the catchpoint(s) whose triggering
4222 caused us to begin stepping. */
4223 ecs->event_thread->stepping_through_solib_after_catch = 0;
4224 bpstat_clear (&ecs->event_thread->stop_bpstat);
4225 ecs->event_thread->stop_bpstat
4226 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4227 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4228 stop_print_frame = 1;
4229 stop_stepping (ecs);
4230 return;
4231 }
4232
4233 if (ecs->event_thread->step_resume_breakpoint)
4234 {
4235 if (debug_infrun)
4236 fprintf_unfiltered (gdb_stdlog,
4237 "infrun: step-resume breakpoint is inserted\n");
4238
4239 /* Having a step-resume breakpoint overrides anything
4240 else having to do with stepping commands until
4241 that breakpoint is reached. */
4242 keep_going (ecs);
4243 return;
4244 }
4245
4246 if (ecs->event_thread->step_range_end == 0)
4247 {
4248 if (debug_infrun)
4249 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4250 /* Likewise if we aren't even stepping. */
4251 keep_going (ecs);
4252 return;
4253 }
4254
4255 /* Re-fetch current thread's frame in case the code above caused
4256 the frame cache to be re-initialized, making our FRAME variable
4257 a dangling pointer. */
4258 frame = get_current_frame ();
4259
4260 /* If stepping through a line, keep going if still within it.
4261
4262 Note that step_range_end is the address of the first instruction
4263 beyond the step range, and NOT the address of the last instruction
4264 within it!
4265
4266 Note also that during reverse execution, we may be stepping
4267 through a function epilogue and therefore must detect when
4268 the current-frame changes in the middle of a line. */
4269
4270 if (stop_pc >= ecs->event_thread->step_range_start
4271 && stop_pc < ecs->event_thread->step_range_end
4272 && (execution_direction != EXEC_REVERSE
4273 || frame_id_eq (get_frame_id (frame),
4274 ecs->event_thread->step_frame_id)))
4275 {
4276 if (debug_infrun)
4277 fprintf_unfiltered
4278 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4279 paddress (gdbarch, ecs->event_thread->step_range_start),
4280 paddress (gdbarch, ecs->event_thread->step_range_end));
4281
4282 /* When stepping backward, stop at beginning of line range
4283 (unless it's the function entry point, in which case
4284 keep going back to the call point). */
4285 if (stop_pc == ecs->event_thread->step_range_start
4286 && stop_pc != ecs->stop_func_start
4287 && execution_direction == EXEC_REVERSE)
4288 {
4289 ecs->event_thread->stop_step = 1;
4290 print_stop_reason (END_STEPPING_RANGE, 0);
4291 stop_stepping (ecs);
4292 }
4293 else
4294 keep_going (ecs);
4295
4296 return;
4297 }
4298
4299 /* We stepped out of the stepping range. */
4300
4301 /* If we are stepping at the source level and entered the runtime
4302 loader dynamic symbol resolution code...
4303
4304 EXEC_FORWARD: we keep on single stepping until we exit the run
4305 time loader code and reach the callee's address.
4306
4307 EXEC_REVERSE: we've already executed the callee (backward), and
4308 the runtime loader code is handled just like any other
4309 undebuggable function call. Now we need only keep stepping
4310 backward through the trampoline code, and that's handled further
4311 down, so there is nothing for us to do here. */
4312
4313 if (execution_direction != EXEC_REVERSE
4314 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4315 && in_solib_dynsym_resolve_code (stop_pc))
4316 {
4317 CORE_ADDR pc_after_resolver =
4318 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4319
4320 if (debug_infrun)
4321 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4322
4323 if (pc_after_resolver)
4324 {
4325 /* Set up a step-resume breakpoint at the address
4326 indicated by SKIP_SOLIB_RESOLVER. */
4327 struct symtab_and_line sr_sal;
4328 init_sal (&sr_sal);
4329 sr_sal.pc = pc_after_resolver;
4330 sr_sal.pspace = get_frame_program_space (frame);
4331
4332 insert_step_resume_breakpoint_at_sal (gdbarch,
4333 sr_sal, null_frame_id);
4334 }
4335
4336 keep_going (ecs);
4337 return;
4338 }
4339
4340 if (ecs->event_thread->step_range_end != 1
4341 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4342 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4343 && get_frame_type (frame) == SIGTRAMP_FRAME)
4344 {
4345 if (debug_infrun)
4346 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4347 /* The inferior, while doing a "step" or "next", has ended up in
4348 a signal trampoline (either by a signal being delivered or by
4349 the signal handler returning). Just single-step until the
4350 inferior leaves the trampoline (either by calling the handler
4351 or returning). */
4352 keep_going (ecs);
4353 return;
4354 }
4355
4356 /* Check for subroutine calls. The check for the current frame
4357 equalling the step ID is not necessary - the check of the
4358 previous frame's ID is sufficient - but it is a common case and
4359 cheaper than checking the previous frame's ID.
4360
4361 NOTE: frame_id_eq will never report two invalid frame IDs as
4362 being equal, so to get into this block, both the current and
4363 previous frame must have valid frame IDs. */
4364 /* The outer_frame_id check is a heuristic to detect stepping
4365 through startup code. If we step over an instruction which
4366 sets the stack pointer from an invalid value to a valid value,
4367 we may detect that as a subroutine call from the mythical
4368 "outermost" function. This could be fixed by marking
4369 outermost frames as !stack_p,code_p,special_p. Then the
4370 initial outermost frame, before sp was valid, would
4371 have code_addr == &_start. See the comment in frame_id_eq
4372 for more. */
4373 if (!frame_id_eq (get_stack_frame_id (frame),
4374 ecs->event_thread->step_stack_frame_id)
4375 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4376 ecs->event_thread->step_stack_frame_id)
4377 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4378 outer_frame_id)
4379 || step_start_function != find_pc_function (stop_pc))))
4380 {
4381 CORE_ADDR real_stop_pc;
4382
4383 if (debug_infrun)
4384 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4385
4386 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4387 || ((ecs->event_thread->step_range_end == 1)
4388 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4389 ecs->stop_func_start)))
4390 {
4391 /* I presume that step_over_calls is only 0 when we're
4392 supposed to be stepping at the assembly language level
4393 ("stepi"). Just stop. */
4394 /* Also, maybe we just did a "nexti" inside a prolog, so we
4395 thought it was a subroutine call but it was not. Stop as
4396 well. FENN */
4397 /* And this works the same backward as frontward. MVS */
4398 ecs->event_thread->stop_step = 1;
4399 print_stop_reason (END_STEPPING_RANGE, 0);
4400 stop_stepping (ecs);
4401 return;
4402 }
4403
4404 /* Reverse stepping through solib trampolines. */
4405
4406 if (execution_direction == EXEC_REVERSE
4407 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4408 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4409 || (ecs->stop_func_start == 0
4410 && in_solib_dynsym_resolve_code (stop_pc))))
4411 {
4412 /* Any solib trampoline code can be handled in reverse
4413 by simply continuing to single-step. We have already
4414 executed the solib function (backwards), and a few
4415 steps will take us back through the trampoline to the
4416 caller. */
4417 keep_going (ecs);
4418 return;
4419 }
4420
4421 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4422 {
4423 /* We're doing a "next".
4424
4425 Normal (forward) execution: set a breakpoint at the
4426 callee's return address (the address at which the caller
4427 will resume).
4428
4429 Reverse (backward) execution. set the step-resume
4430 breakpoint at the start of the function that we just
4431 stepped into (backwards), and continue to there. When we
4432 get there, we'll need to single-step back to the caller. */
4433
4434 if (execution_direction == EXEC_REVERSE)
4435 {
4436 struct symtab_and_line sr_sal;
4437
4438 /* Normal function call return (static or dynamic). */
4439 init_sal (&sr_sal);
4440 sr_sal.pc = ecs->stop_func_start;
4441 sr_sal.pspace = get_frame_program_space (frame);
4442 insert_step_resume_breakpoint_at_sal (gdbarch,
4443 sr_sal, null_frame_id);
4444 }
4445 else
4446 insert_step_resume_breakpoint_at_caller (frame);
4447
4448 keep_going (ecs);
4449 return;
4450 }
4451
4452 /* If we are in a function call trampoline (a stub between the
4453 calling routine and the real function), locate the real
4454 function. That's what tells us (a) whether we want to step
4455 into it at all, and (b) what prologue we want to run to the
4456 end of, if we do step into it. */
4457 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4458 if (real_stop_pc == 0)
4459 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4460 if (real_stop_pc != 0)
4461 ecs->stop_func_start = real_stop_pc;
4462
4463 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4464 {
4465 struct symtab_and_line sr_sal;
4466 init_sal (&sr_sal);
4467 sr_sal.pc = ecs->stop_func_start;
4468 sr_sal.pspace = get_frame_program_space (frame);
4469
4470 insert_step_resume_breakpoint_at_sal (gdbarch,
4471 sr_sal, null_frame_id);
4472 keep_going (ecs);
4473 return;
4474 }
4475
4476 /* If we have line number information for the function we are
4477 thinking of stepping into, step into it.
4478
4479 If there are several symtabs at that PC (e.g. with include
4480 files), just want to know whether *any* of them have line
4481 numbers. find_pc_line handles this. */
4482 {
4483 struct symtab_and_line tmp_sal;
4484
4485 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4486 tmp_sal.pspace = get_frame_program_space (frame);
4487 if (tmp_sal.line != 0)
4488 {
4489 if (execution_direction == EXEC_REVERSE)
4490 handle_step_into_function_backward (gdbarch, ecs);
4491 else
4492 handle_step_into_function (gdbarch, ecs);
4493 return;
4494 }
4495 }
4496
4497 /* If we have no line number and the step-stop-if-no-debug is
4498 set, we stop the step so that the user has a chance to switch
4499 in assembly mode. */
4500 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4501 && step_stop_if_no_debug)
4502 {
4503 ecs->event_thread->stop_step = 1;
4504 print_stop_reason (END_STEPPING_RANGE, 0);
4505 stop_stepping (ecs);
4506 return;
4507 }
4508
4509 if (execution_direction == EXEC_REVERSE)
4510 {
4511 /* Set a breakpoint at callee's start address.
4512 From there we can step once and be back in the caller. */
4513 struct symtab_and_line sr_sal;
4514 init_sal (&sr_sal);
4515 sr_sal.pc = ecs->stop_func_start;
4516 sr_sal.pspace = get_frame_program_space (frame);
4517 insert_step_resume_breakpoint_at_sal (gdbarch,
4518 sr_sal, null_frame_id);
4519 }
4520 else
4521 /* Set a breakpoint at callee's return address (the address
4522 at which the caller will resume). */
4523 insert_step_resume_breakpoint_at_caller (frame);
4524
4525 keep_going (ecs);
4526 return;
4527 }
4528
4529 /* Reverse stepping through solib trampolines. */
4530
4531 if (execution_direction == EXEC_REVERSE
4532 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4533 {
4534 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4535 || (ecs->stop_func_start == 0
4536 && in_solib_dynsym_resolve_code (stop_pc)))
4537 {
4538 /* Any solib trampoline code can be handled in reverse
4539 by simply continuing to single-step. We have already
4540 executed the solib function (backwards), and a few
4541 steps will take us back through the trampoline to the
4542 caller. */
4543 keep_going (ecs);
4544 return;
4545 }
4546 else if (in_solib_dynsym_resolve_code (stop_pc))
4547 {
4548 /* Stepped backward into the solib dynsym resolver.
4549 Set a breakpoint at its start and continue, then
4550 one more step will take us out. */
4551 struct symtab_and_line sr_sal;
4552 init_sal (&sr_sal);
4553 sr_sal.pc = ecs->stop_func_start;
4554 sr_sal.pspace = get_frame_program_space (frame);
4555 insert_step_resume_breakpoint_at_sal (gdbarch,
4556 sr_sal, null_frame_id);
4557 keep_going (ecs);
4558 return;
4559 }
4560 }
4561
4562 /* If we're in the return path from a shared library trampoline,
4563 we want to proceed through the trampoline when stepping. */
4564 if (gdbarch_in_solib_return_trampoline (gdbarch,
4565 stop_pc, ecs->stop_func_name))
4566 {
4567 /* Determine where this trampoline returns. */
4568 CORE_ADDR real_stop_pc;
4569 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4570
4571 if (debug_infrun)
4572 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4573
4574 /* Only proceed through if we know where it's going. */
4575 if (real_stop_pc)
4576 {
4577 /* And put the step-breakpoint there and go until there. */
4578 struct symtab_and_line sr_sal;
4579
4580 init_sal (&sr_sal); /* initialize to zeroes */
4581 sr_sal.pc = real_stop_pc;
4582 sr_sal.section = find_pc_overlay (sr_sal.pc);
4583 sr_sal.pspace = get_frame_program_space (frame);
4584
4585 /* Do not specify what the fp should be when we stop since
4586 on some machines the prologue is where the new fp value
4587 is established. */
4588 insert_step_resume_breakpoint_at_sal (gdbarch,
4589 sr_sal, null_frame_id);
4590
4591 /* Restart without fiddling with the step ranges or
4592 other state. */
4593 keep_going (ecs);
4594 return;
4595 }
4596 }
4597
4598 stop_pc_sal = find_pc_line (stop_pc, 0);
4599
4600 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4601 the trampoline processing logic, however, there are some trampolines
4602 that have no names, so we should do trampoline handling first. */
4603 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4604 && ecs->stop_func_name == NULL
4605 && stop_pc_sal.line == 0)
4606 {
4607 if (debug_infrun)
4608 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4609
4610 /* The inferior just stepped into, or returned to, an
4611 undebuggable function (where there is no debugging information
4612 and no line number corresponding to the address where the
4613 inferior stopped). Since we want to skip this kind of code,
4614 we keep going until the inferior returns from this
4615 function - unless the user has asked us not to (via
4616 set step-mode) or we no longer know how to get back
4617 to the call site. */
4618 if (step_stop_if_no_debug
4619 || !frame_id_p (frame_unwind_caller_id (frame)))
4620 {
4621 /* If we have no line number and the step-stop-if-no-debug
4622 is set, we stop the step so that the user has a chance to
4623 switch in assembly mode. */
4624 ecs->event_thread->stop_step = 1;
4625 print_stop_reason (END_STEPPING_RANGE, 0);
4626 stop_stepping (ecs);
4627 return;
4628 }
4629 else
4630 {
4631 /* Set a breakpoint at callee's return address (the address
4632 at which the caller will resume). */
4633 insert_step_resume_breakpoint_at_caller (frame);
4634 keep_going (ecs);
4635 return;
4636 }
4637 }
4638
4639 if (ecs->event_thread->step_range_end == 1)
4640 {
4641 /* It is stepi or nexti. We always want to stop stepping after
4642 one instruction. */
4643 if (debug_infrun)
4644 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4645 ecs->event_thread->stop_step = 1;
4646 print_stop_reason (END_STEPPING_RANGE, 0);
4647 stop_stepping (ecs);
4648 return;
4649 }
4650
4651 if (stop_pc_sal.line == 0)
4652 {
4653 /* We have no line number information. That means to stop
4654 stepping (does this always happen right after one instruction,
4655 when we do "s" in a function with no line numbers,
4656 or can this happen as a result of a return or longjmp?). */
4657 if (debug_infrun)
4658 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4659 ecs->event_thread->stop_step = 1;
4660 print_stop_reason (END_STEPPING_RANGE, 0);
4661 stop_stepping (ecs);
4662 return;
4663 }
4664
4665 /* Look for "calls" to inlined functions, part one. If the inline
4666 frame machinery detected some skipped call sites, we have entered
4667 a new inline function. */
4668
4669 if (frame_id_eq (get_frame_id (get_current_frame ()),
4670 ecs->event_thread->step_frame_id)
4671 && inline_skipped_frames (ecs->ptid))
4672 {
4673 struct symtab_and_line call_sal;
4674
4675 if (debug_infrun)
4676 fprintf_unfiltered (gdb_stdlog,
4677 "infrun: stepped into inlined function\n");
4678
4679 find_frame_sal (get_current_frame (), &call_sal);
4680
4681 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4682 {
4683 /* For "step", we're going to stop. But if the call site
4684 for this inlined function is on the same source line as
4685 we were previously stepping, go down into the function
4686 first. Otherwise stop at the call site. */
4687
4688 if (call_sal.line == ecs->event_thread->current_line
4689 && call_sal.symtab == ecs->event_thread->current_symtab)
4690 step_into_inline_frame (ecs->ptid);
4691
4692 ecs->event_thread->stop_step = 1;
4693 print_stop_reason (END_STEPPING_RANGE, 0);
4694 stop_stepping (ecs);
4695 return;
4696 }
4697 else
4698 {
4699 /* For "next", we should stop at the call site if it is on a
4700 different source line. Otherwise continue through the
4701 inlined function. */
4702 if (call_sal.line == ecs->event_thread->current_line
4703 && call_sal.symtab == ecs->event_thread->current_symtab)
4704 keep_going (ecs);
4705 else
4706 {
4707 ecs->event_thread->stop_step = 1;
4708 print_stop_reason (END_STEPPING_RANGE, 0);
4709 stop_stepping (ecs);
4710 }
4711 return;
4712 }
4713 }
4714
4715 /* Look for "calls" to inlined functions, part two. If we are still
4716 in the same real function we were stepping through, but we have
4717 to go further up to find the exact frame ID, we are stepping
4718 through a more inlined call beyond its call site. */
4719
4720 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4721 && !frame_id_eq (get_frame_id (get_current_frame ()),
4722 ecs->event_thread->step_frame_id)
4723 && stepped_in_from (get_current_frame (),
4724 ecs->event_thread->step_frame_id))
4725 {
4726 if (debug_infrun)
4727 fprintf_unfiltered (gdb_stdlog,
4728 "infrun: stepping through inlined function\n");
4729
4730 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4731 keep_going (ecs);
4732 else
4733 {
4734 ecs->event_thread->stop_step = 1;
4735 print_stop_reason (END_STEPPING_RANGE, 0);
4736 stop_stepping (ecs);
4737 }
4738 return;
4739 }
4740
4741 if ((stop_pc == stop_pc_sal.pc)
4742 && (ecs->event_thread->current_line != stop_pc_sal.line
4743 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4744 {
4745 /* We are at the start of a different line. So stop. Note that
4746 we don't stop if we step into the middle of a different line.
4747 That is said to make things like for (;;) statements work
4748 better. */
4749 if (debug_infrun)
4750 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4751 ecs->event_thread->stop_step = 1;
4752 print_stop_reason (END_STEPPING_RANGE, 0);
4753 stop_stepping (ecs);
4754 return;
4755 }
4756
4757 /* We aren't done stepping.
4758
4759 Optimize by setting the stepping range to the line.
4760 (We might not be in the original line, but if we entered a
4761 new line in mid-statement, we continue stepping. This makes
4762 things like for(;;) statements work better.) */
4763
4764 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4765 ecs->event_thread->step_range_end = stop_pc_sal.end;
4766 set_step_info (frame, stop_pc_sal);
4767
4768 if (debug_infrun)
4769 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4770 keep_going (ecs);
4771 }
4772
4773 /* Is thread TP in the middle of single-stepping? */
4774
4775 static int
4776 currently_stepping (struct thread_info *tp)
4777 {
4778 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4779 || tp->trap_expected
4780 || tp->stepping_through_solib_after_catch
4781 || bpstat_should_step ());
4782 }
4783
4784 /* Returns true if any thread *but* the one passed in "data" is in the
4785 middle of stepping or of handling a "next". */
4786
4787 static int
4788 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4789 {
4790 if (tp == data)
4791 return 0;
4792
4793 return (tp->step_range_end
4794 || tp->trap_expected
4795 || tp->stepping_through_solib_after_catch);
4796 }
4797
4798 /* Inferior has stepped into a subroutine call with source code that
4799 we should not step over. Do step to the first line of code in
4800 it. */
4801
4802 static void
4803 handle_step_into_function (struct gdbarch *gdbarch,
4804 struct execution_control_state *ecs)
4805 {
4806 struct symtab *s;
4807 struct symtab_and_line stop_func_sal, sr_sal;
4808
4809 s = find_pc_symtab (stop_pc);
4810 if (s && s->language != language_asm)
4811 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4812 ecs->stop_func_start);
4813
4814 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4815 /* Use the step_resume_break to step until the end of the prologue,
4816 even if that involves jumps (as it seems to on the vax under
4817 4.2). */
4818 /* If the prologue ends in the middle of a source line, continue to
4819 the end of that source line (if it is still within the function).
4820 Otherwise, just go to end of prologue. */
4821 if (stop_func_sal.end
4822 && stop_func_sal.pc != ecs->stop_func_start
4823 && stop_func_sal.end < ecs->stop_func_end)
4824 ecs->stop_func_start = stop_func_sal.end;
4825
4826 /* Architectures which require breakpoint adjustment might not be able
4827 to place a breakpoint at the computed address. If so, the test
4828 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4829 ecs->stop_func_start to an address at which a breakpoint may be
4830 legitimately placed.
4831
4832 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4833 made, GDB will enter an infinite loop when stepping through
4834 optimized code consisting of VLIW instructions which contain
4835 subinstructions corresponding to different source lines. On
4836 FR-V, it's not permitted to place a breakpoint on any but the
4837 first subinstruction of a VLIW instruction. When a breakpoint is
4838 set, GDB will adjust the breakpoint address to the beginning of
4839 the VLIW instruction. Thus, we need to make the corresponding
4840 adjustment here when computing the stop address. */
4841
4842 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4843 {
4844 ecs->stop_func_start
4845 = gdbarch_adjust_breakpoint_address (gdbarch,
4846 ecs->stop_func_start);
4847 }
4848
4849 if (ecs->stop_func_start == stop_pc)
4850 {
4851 /* We are already there: stop now. */
4852 ecs->event_thread->stop_step = 1;
4853 print_stop_reason (END_STEPPING_RANGE, 0);
4854 stop_stepping (ecs);
4855 return;
4856 }
4857 else
4858 {
4859 /* Put the step-breakpoint there and go until there. */
4860 init_sal (&sr_sal); /* initialize to zeroes */
4861 sr_sal.pc = ecs->stop_func_start;
4862 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4863 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4864
4865 /* Do not specify what the fp should be when we stop since on
4866 some machines the prologue is where the new fp value is
4867 established. */
4868 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4869
4870 /* And make sure stepping stops right away then. */
4871 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4872 }
4873 keep_going (ecs);
4874 }
4875
4876 /* Inferior has stepped backward into a subroutine call with source
4877 code that we should not step over. Do step to the beginning of the
4878 last line of code in it. */
4879
4880 static void
4881 handle_step_into_function_backward (struct gdbarch *gdbarch,
4882 struct execution_control_state *ecs)
4883 {
4884 struct symtab *s;
4885 struct symtab_and_line stop_func_sal;
4886
4887 s = find_pc_symtab (stop_pc);
4888 if (s && s->language != language_asm)
4889 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4890 ecs->stop_func_start);
4891
4892 stop_func_sal = find_pc_line (stop_pc, 0);
4893
4894 /* OK, we're just going to keep stepping here. */
4895 if (stop_func_sal.pc == stop_pc)
4896 {
4897 /* We're there already. Just stop stepping now. */
4898 ecs->event_thread->stop_step = 1;
4899 print_stop_reason (END_STEPPING_RANGE, 0);
4900 stop_stepping (ecs);
4901 }
4902 else
4903 {
4904 /* Else just reset the step range and keep going.
4905 No step-resume breakpoint, they don't work for
4906 epilogues, which can have multiple entry paths. */
4907 ecs->event_thread->step_range_start = stop_func_sal.pc;
4908 ecs->event_thread->step_range_end = stop_func_sal.end;
4909 keep_going (ecs);
4910 }
4911 return;
4912 }
4913
4914 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4915 This is used to both functions and to skip over code. */
4916
4917 static void
4918 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4919 struct symtab_and_line sr_sal,
4920 struct frame_id sr_id)
4921 {
4922 /* There should never be more than one step-resume or longjmp-resume
4923 breakpoint per thread, so we should never be setting a new
4924 step_resume_breakpoint when one is already active. */
4925 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4926
4927 if (debug_infrun)
4928 fprintf_unfiltered (gdb_stdlog,
4929 "infrun: inserting step-resume breakpoint at %s\n",
4930 paddress (gdbarch, sr_sal.pc));
4931
4932 inferior_thread ()->step_resume_breakpoint
4933 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4934 }
4935
4936 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4937 to skip a potential signal handler.
4938
4939 This is called with the interrupted function's frame. The signal
4940 handler, when it returns, will resume the interrupted function at
4941 RETURN_FRAME.pc. */
4942
4943 static void
4944 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4945 {
4946 struct symtab_and_line sr_sal;
4947 struct gdbarch *gdbarch;
4948
4949 gdb_assert (return_frame != NULL);
4950 init_sal (&sr_sal); /* initialize to zeros */
4951
4952 gdbarch = get_frame_arch (return_frame);
4953 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4954 sr_sal.section = find_pc_overlay (sr_sal.pc);
4955 sr_sal.pspace = get_frame_program_space (return_frame);
4956
4957 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4958 get_stack_frame_id (return_frame));
4959 }
4960
4961 /* Similar to insert_step_resume_breakpoint_at_frame, except
4962 but a breakpoint at the previous frame's PC. This is used to
4963 skip a function after stepping into it (for "next" or if the called
4964 function has no debugging information).
4965
4966 The current function has almost always been reached by single
4967 stepping a call or return instruction. NEXT_FRAME belongs to the
4968 current function, and the breakpoint will be set at the caller's
4969 resume address.
4970
4971 This is a separate function rather than reusing
4972 insert_step_resume_breakpoint_at_frame in order to avoid
4973 get_prev_frame, which may stop prematurely (see the implementation
4974 of frame_unwind_caller_id for an example). */
4975
4976 static void
4977 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4978 {
4979 struct symtab_and_line sr_sal;
4980 struct gdbarch *gdbarch;
4981
4982 /* We shouldn't have gotten here if we don't know where the call site
4983 is. */
4984 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4985
4986 init_sal (&sr_sal); /* initialize to zeros */
4987
4988 gdbarch = frame_unwind_caller_arch (next_frame);
4989 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4990 frame_unwind_caller_pc (next_frame));
4991 sr_sal.section = find_pc_overlay (sr_sal.pc);
4992 sr_sal.pspace = frame_unwind_program_space (next_frame);
4993
4994 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4995 frame_unwind_caller_id (next_frame));
4996 }
4997
4998 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4999 new breakpoint at the target of a jmp_buf. The handling of
5000 longjmp-resume uses the same mechanisms used for handling
5001 "step-resume" breakpoints. */
5002
5003 static void
5004 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5005 {
5006 /* There should never be more than one step-resume or longjmp-resume
5007 breakpoint per thread, so we should never be setting a new
5008 longjmp_resume_breakpoint when one is already active. */
5009 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5010
5011 if (debug_infrun)
5012 fprintf_unfiltered (gdb_stdlog,
5013 "infrun: inserting longjmp-resume breakpoint at %s\n",
5014 paddress (gdbarch, pc));
5015
5016 inferior_thread ()->step_resume_breakpoint =
5017 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5018 }
5019
5020 static void
5021 stop_stepping (struct execution_control_state *ecs)
5022 {
5023 if (debug_infrun)
5024 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5025
5026 /* Let callers know we don't want to wait for the inferior anymore. */
5027 ecs->wait_some_more = 0;
5028 }
5029
5030 /* This function handles various cases where we need to continue
5031 waiting for the inferior. */
5032 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5033
5034 static void
5035 keep_going (struct execution_control_state *ecs)
5036 {
5037 /* Make sure normal_stop is called if we get a QUIT handled before
5038 reaching resume. */
5039 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5040
5041 /* Save the pc before execution, to compare with pc after stop. */
5042 ecs->event_thread->prev_pc
5043 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5044
5045 /* If we did not do break;, it means we should keep running the
5046 inferior and not return to debugger. */
5047
5048 if (ecs->event_thread->trap_expected
5049 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5050 {
5051 /* We took a signal (which we are supposed to pass through to
5052 the inferior, else we'd not get here) and we haven't yet
5053 gotten our trap. Simply continue. */
5054
5055 discard_cleanups (old_cleanups);
5056 resume (currently_stepping (ecs->event_thread),
5057 ecs->event_thread->stop_signal);
5058 }
5059 else
5060 {
5061 /* Either the trap was not expected, but we are continuing
5062 anyway (the user asked that this signal be passed to the
5063 child)
5064 -- or --
5065 The signal was SIGTRAP, e.g. it was our signal, but we
5066 decided we should resume from it.
5067
5068 We're going to run this baby now!
5069
5070 Note that insert_breakpoints won't try to re-insert
5071 already inserted breakpoints. Therefore, we don't
5072 care if breakpoints were already inserted, or not. */
5073
5074 if (ecs->event_thread->stepping_over_breakpoint)
5075 {
5076 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5077 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5078 /* Since we can't do a displaced step, we have to remove
5079 the breakpoint while we step it. To keep things
5080 simple, we remove them all. */
5081 remove_breakpoints ();
5082 }
5083 else
5084 {
5085 struct gdb_exception e;
5086 /* Stop stepping when inserting breakpoints
5087 has failed. */
5088 TRY_CATCH (e, RETURN_MASK_ERROR)
5089 {
5090 insert_breakpoints ();
5091 }
5092 if (e.reason < 0)
5093 {
5094 exception_print (gdb_stderr, e);
5095 stop_stepping (ecs);
5096 return;
5097 }
5098 }
5099
5100 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5101
5102 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5103 specifies that such a signal should be delivered to the
5104 target program).
5105
5106 Typically, this would occure when a user is debugging a
5107 target monitor on a simulator: the target monitor sets a
5108 breakpoint; the simulator encounters this break-point and
5109 halts the simulation handing control to GDB; GDB, noteing
5110 that the break-point isn't valid, returns control back to the
5111 simulator; the simulator then delivers the hardware
5112 equivalent of a SIGNAL_TRAP to the program being debugged. */
5113
5114 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5115 && !signal_program[ecs->event_thread->stop_signal])
5116 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5117
5118 discard_cleanups (old_cleanups);
5119 resume (currently_stepping (ecs->event_thread),
5120 ecs->event_thread->stop_signal);
5121 }
5122
5123 prepare_to_wait (ecs);
5124 }
5125
5126 /* This function normally comes after a resume, before
5127 handle_inferior_event exits. It takes care of any last bits of
5128 housekeeping, and sets the all-important wait_some_more flag. */
5129
5130 static void
5131 prepare_to_wait (struct execution_control_state *ecs)
5132 {
5133 if (debug_infrun)
5134 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5135
5136 /* This is the old end of the while loop. Let everybody know we
5137 want to wait for the inferior some more and get called again
5138 soon. */
5139 ecs->wait_some_more = 1;
5140 }
5141
5142 /* Print why the inferior has stopped. We always print something when
5143 the inferior exits, or receives a signal. The rest of the cases are
5144 dealt with later on in normal_stop() and print_it_typical(). Ideally
5145 there should be a call to this function from handle_inferior_event()
5146 each time stop_stepping() is called.*/
5147 static void
5148 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5149 {
5150 switch (stop_reason)
5151 {
5152 case END_STEPPING_RANGE:
5153 /* We are done with a step/next/si/ni command. */
5154 /* For now print nothing. */
5155 /* Print a message only if not in the middle of doing a "step n"
5156 operation for n > 1 */
5157 if (!inferior_thread ()->step_multi
5158 || !inferior_thread ()->stop_step)
5159 if (ui_out_is_mi_like_p (uiout))
5160 ui_out_field_string
5161 (uiout, "reason",
5162 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5163 break;
5164 case SIGNAL_EXITED:
5165 /* The inferior was terminated by a signal. */
5166 annotate_signalled ();
5167 if (ui_out_is_mi_like_p (uiout))
5168 ui_out_field_string
5169 (uiout, "reason",
5170 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5171 ui_out_text (uiout, "\nProgram terminated with signal ");
5172 annotate_signal_name ();
5173 ui_out_field_string (uiout, "signal-name",
5174 target_signal_to_name (stop_info));
5175 annotate_signal_name_end ();
5176 ui_out_text (uiout, ", ");
5177 annotate_signal_string ();
5178 ui_out_field_string (uiout, "signal-meaning",
5179 target_signal_to_string (stop_info));
5180 annotate_signal_string_end ();
5181 ui_out_text (uiout, ".\n");
5182 ui_out_text (uiout, "The program no longer exists.\n");
5183 break;
5184 case EXITED:
5185 /* The inferior program is finished. */
5186 annotate_exited (stop_info);
5187 if (stop_info)
5188 {
5189 if (ui_out_is_mi_like_p (uiout))
5190 ui_out_field_string (uiout, "reason",
5191 async_reason_lookup (EXEC_ASYNC_EXITED));
5192 ui_out_text (uiout, "\nProgram exited with code ");
5193 ui_out_field_fmt (uiout, "exit-code", "0%o",
5194 (unsigned int) stop_info);
5195 ui_out_text (uiout, ".\n");
5196 }
5197 else
5198 {
5199 if (ui_out_is_mi_like_p (uiout))
5200 ui_out_field_string
5201 (uiout, "reason",
5202 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5203 ui_out_text (uiout, "\nProgram exited normally.\n");
5204 }
5205 /* Support the --return-child-result option. */
5206 return_child_result_value = stop_info;
5207 break;
5208 case SIGNAL_RECEIVED:
5209 /* Signal received. The signal table tells us to print about
5210 it. */
5211 annotate_signal ();
5212
5213 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5214 {
5215 struct thread_info *t = inferior_thread ();
5216
5217 ui_out_text (uiout, "\n[");
5218 ui_out_field_string (uiout, "thread-name",
5219 target_pid_to_str (t->ptid));
5220 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5221 ui_out_text (uiout, " stopped");
5222 }
5223 else
5224 {
5225 ui_out_text (uiout, "\nProgram received signal ");
5226 annotate_signal_name ();
5227 if (ui_out_is_mi_like_p (uiout))
5228 ui_out_field_string
5229 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5230 ui_out_field_string (uiout, "signal-name",
5231 target_signal_to_name (stop_info));
5232 annotate_signal_name_end ();
5233 ui_out_text (uiout, ", ");
5234 annotate_signal_string ();
5235 ui_out_field_string (uiout, "signal-meaning",
5236 target_signal_to_string (stop_info));
5237 annotate_signal_string_end ();
5238 }
5239 ui_out_text (uiout, ".\n");
5240 break;
5241 case NO_HISTORY:
5242 /* Reverse execution: target ran out of history info. */
5243 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5244 break;
5245 default:
5246 internal_error (__FILE__, __LINE__,
5247 _("print_stop_reason: unrecognized enum value"));
5248 break;
5249 }
5250 }
5251 \f
5252
5253 /* Here to return control to GDB when the inferior stops for real.
5254 Print appropriate messages, remove breakpoints, give terminal our modes.
5255
5256 STOP_PRINT_FRAME nonzero means print the executing frame
5257 (pc, function, args, file, line number and line text).
5258 BREAKPOINTS_FAILED nonzero means stop was due to error
5259 attempting to insert breakpoints. */
5260
5261 void
5262 normal_stop (void)
5263 {
5264 struct target_waitstatus last;
5265 ptid_t last_ptid;
5266 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5267
5268 get_last_target_status (&last_ptid, &last);
5269
5270 /* If an exception is thrown from this point on, make sure to
5271 propagate GDB's knowledge of the executing state to the
5272 frontend/user running state. A QUIT is an easy exception to see
5273 here, so do this before any filtered output. */
5274 if (!non_stop)
5275 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5276 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5277 && last.kind != TARGET_WAITKIND_EXITED)
5278 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5279
5280 /* In non-stop mode, we don't want GDB to switch threads behind the
5281 user's back, to avoid races where the user is typing a command to
5282 apply to thread x, but GDB switches to thread y before the user
5283 finishes entering the command. */
5284
5285 /* As with the notification of thread events, we want to delay
5286 notifying the user that we've switched thread context until
5287 the inferior actually stops.
5288
5289 There's no point in saying anything if the inferior has exited.
5290 Note that SIGNALLED here means "exited with a signal", not
5291 "received a signal". */
5292 if (!non_stop
5293 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5294 && target_has_execution
5295 && last.kind != TARGET_WAITKIND_SIGNALLED
5296 && last.kind != TARGET_WAITKIND_EXITED)
5297 {
5298 target_terminal_ours_for_output ();
5299 printf_filtered (_("[Switching to %s]\n"),
5300 target_pid_to_str (inferior_ptid));
5301 annotate_thread_changed ();
5302 previous_inferior_ptid = inferior_ptid;
5303 }
5304
5305 if (!breakpoints_always_inserted_mode () && target_has_execution)
5306 {
5307 if (remove_breakpoints ())
5308 {
5309 target_terminal_ours_for_output ();
5310 printf_filtered (_("\
5311 Cannot remove breakpoints because program is no longer writable.\n\
5312 Further execution is probably impossible.\n"));
5313 }
5314 }
5315
5316 /* If an auto-display called a function and that got a signal,
5317 delete that auto-display to avoid an infinite recursion. */
5318
5319 if (stopped_by_random_signal)
5320 disable_current_display ();
5321
5322 /* Don't print a message if in the middle of doing a "step n"
5323 operation for n > 1 */
5324 if (target_has_execution
5325 && last.kind != TARGET_WAITKIND_SIGNALLED
5326 && last.kind != TARGET_WAITKIND_EXITED
5327 && inferior_thread ()->step_multi
5328 && inferior_thread ()->stop_step)
5329 goto done;
5330
5331 target_terminal_ours ();
5332
5333 /* Set the current source location. This will also happen if we
5334 display the frame below, but the current SAL will be incorrect
5335 during a user hook-stop function. */
5336 if (has_stack_frames () && !stop_stack_dummy)
5337 set_current_sal_from_frame (get_current_frame (), 1);
5338
5339 /* Let the user/frontend see the threads as stopped. */
5340 do_cleanups (old_chain);
5341
5342 /* Look up the hook_stop and run it (CLI internally handles problem
5343 of stop_command's pre-hook not existing). */
5344 if (stop_command)
5345 catch_errors (hook_stop_stub, stop_command,
5346 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5347
5348 if (!has_stack_frames ())
5349 goto done;
5350
5351 if (last.kind == TARGET_WAITKIND_SIGNALLED
5352 || last.kind == TARGET_WAITKIND_EXITED)
5353 goto done;
5354
5355 /* Select innermost stack frame - i.e., current frame is frame 0,
5356 and current location is based on that.
5357 Don't do this on return from a stack dummy routine,
5358 or if the program has exited. */
5359
5360 if (!stop_stack_dummy)
5361 {
5362 select_frame (get_current_frame ());
5363
5364 /* Print current location without a level number, if
5365 we have changed functions or hit a breakpoint.
5366 Print source line if we have one.
5367 bpstat_print() contains the logic deciding in detail
5368 what to print, based on the event(s) that just occurred. */
5369
5370 /* If --batch-silent is enabled then there's no need to print the current
5371 source location, and to try risks causing an error message about
5372 missing source files. */
5373 if (stop_print_frame && !batch_silent)
5374 {
5375 int bpstat_ret;
5376 int source_flag;
5377 int do_frame_printing = 1;
5378 struct thread_info *tp = inferior_thread ();
5379
5380 bpstat_ret = bpstat_print (tp->stop_bpstat);
5381 switch (bpstat_ret)
5382 {
5383 case PRINT_UNKNOWN:
5384 /* If we had hit a shared library event breakpoint,
5385 bpstat_print would print out this message. If we hit
5386 an OS-level shared library event, do the same
5387 thing. */
5388 if (last.kind == TARGET_WAITKIND_LOADED)
5389 {
5390 printf_filtered (_("Stopped due to shared library event\n"));
5391 source_flag = SRC_LINE; /* something bogus */
5392 do_frame_printing = 0;
5393 break;
5394 }
5395
5396 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5397 (or should) carry around the function and does (or
5398 should) use that when doing a frame comparison. */
5399 if (tp->stop_step
5400 && frame_id_eq (tp->step_frame_id,
5401 get_frame_id (get_current_frame ()))
5402 && step_start_function == find_pc_function (stop_pc))
5403 source_flag = SRC_LINE; /* finished step, just print source line */
5404 else
5405 source_flag = SRC_AND_LOC; /* print location and source line */
5406 break;
5407 case PRINT_SRC_AND_LOC:
5408 source_flag = SRC_AND_LOC; /* print location and source line */
5409 break;
5410 case PRINT_SRC_ONLY:
5411 source_flag = SRC_LINE;
5412 break;
5413 case PRINT_NOTHING:
5414 source_flag = SRC_LINE; /* something bogus */
5415 do_frame_printing = 0;
5416 break;
5417 default:
5418 internal_error (__FILE__, __LINE__, _("Unknown value."));
5419 }
5420
5421 /* The behavior of this routine with respect to the source
5422 flag is:
5423 SRC_LINE: Print only source line
5424 LOCATION: Print only location
5425 SRC_AND_LOC: Print location and source line */
5426 if (do_frame_printing)
5427 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5428
5429 /* Display the auto-display expressions. */
5430 do_displays ();
5431 }
5432 }
5433
5434 /* Save the function value return registers, if we care.
5435 We might be about to restore their previous contents. */
5436 if (inferior_thread ()->proceed_to_finish)
5437 {
5438 /* This should not be necessary. */
5439 if (stop_registers)
5440 regcache_xfree (stop_registers);
5441
5442 /* NB: The copy goes through to the target picking up the value of
5443 all the registers. */
5444 stop_registers = regcache_dup (get_current_regcache ());
5445 }
5446
5447 if (stop_stack_dummy == STOP_STACK_DUMMY)
5448 {
5449 /* Pop the empty frame that contains the stack dummy.
5450 This also restores inferior state prior to the call
5451 (struct inferior_thread_state). */
5452 struct frame_info *frame = get_current_frame ();
5453 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5454 frame_pop (frame);
5455 /* frame_pop() calls reinit_frame_cache as the last thing it does
5456 which means there's currently no selected frame. We don't need
5457 to re-establish a selected frame if the dummy call returns normally,
5458 that will be done by restore_inferior_status. However, we do have
5459 to handle the case where the dummy call is returning after being
5460 stopped (e.g. the dummy call previously hit a breakpoint). We
5461 can't know which case we have so just always re-establish a
5462 selected frame here. */
5463 select_frame (get_current_frame ());
5464 }
5465
5466 done:
5467 annotate_stopped ();
5468
5469 /* Suppress the stop observer if we're in the middle of:
5470
5471 - a step n (n > 1), as there still more steps to be done.
5472
5473 - a "finish" command, as the observer will be called in
5474 finish_command_continuation, so it can include the inferior
5475 function's return value.
5476
5477 - calling an inferior function, as we pretend we inferior didn't
5478 run at all. The return value of the call is handled by the
5479 expression evaluator, through call_function_by_hand. */
5480
5481 if (!target_has_execution
5482 || last.kind == TARGET_WAITKIND_SIGNALLED
5483 || last.kind == TARGET_WAITKIND_EXITED
5484 || (!inferior_thread ()->step_multi
5485 && !(inferior_thread ()->stop_bpstat
5486 && inferior_thread ()->proceed_to_finish)
5487 && !inferior_thread ()->in_infcall))
5488 {
5489 if (!ptid_equal (inferior_ptid, null_ptid))
5490 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5491 stop_print_frame);
5492 else
5493 observer_notify_normal_stop (NULL, stop_print_frame);
5494 }
5495
5496 if (target_has_execution)
5497 {
5498 if (last.kind != TARGET_WAITKIND_SIGNALLED
5499 && last.kind != TARGET_WAITKIND_EXITED)
5500 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5501 Delete any breakpoint that is to be deleted at the next stop. */
5502 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5503 }
5504
5505 /* Try to get rid of automatically added inferiors that are no
5506 longer needed. Keeping those around slows down things linearly.
5507 Note that this never removes the current inferior. */
5508 prune_inferiors ();
5509 }
5510
5511 static int
5512 hook_stop_stub (void *cmd)
5513 {
5514 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5515 return (0);
5516 }
5517 \f
5518 int
5519 signal_stop_state (int signo)
5520 {
5521 return signal_stop[signo];
5522 }
5523
5524 int
5525 signal_print_state (int signo)
5526 {
5527 return signal_print[signo];
5528 }
5529
5530 int
5531 signal_pass_state (int signo)
5532 {
5533 return signal_program[signo];
5534 }
5535
5536 int
5537 signal_stop_update (int signo, int state)
5538 {
5539 int ret = signal_stop[signo];
5540 signal_stop[signo] = state;
5541 return ret;
5542 }
5543
5544 int
5545 signal_print_update (int signo, int state)
5546 {
5547 int ret = signal_print[signo];
5548 signal_print[signo] = state;
5549 return ret;
5550 }
5551
5552 int
5553 signal_pass_update (int signo, int state)
5554 {
5555 int ret = signal_program[signo];
5556 signal_program[signo] = state;
5557 return ret;
5558 }
5559
5560 static void
5561 sig_print_header (void)
5562 {
5563 printf_filtered (_("\
5564 Signal Stop\tPrint\tPass to program\tDescription\n"));
5565 }
5566
5567 static void
5568 sig_print_info (enum target_signal oursig)
5569 {
5570 const char *name = target_signal_to_name (oursig);
5571 int name_padding = 13 - strlen (name);
5572
5573 if (name_padding <= 0)
5574 name_padding = 0;
5575
5576 printf_filtered ("%s", name);
5577 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5578 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5579 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5580 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5581 printf_filtered ("%s\n", target_signal_to_string (oursig));
5582 }
5583
5584 /* Specify how various signals in the inferior should be handled. */
5585
5586 static void
5587 handle_command (char *args, int from_tty)
5588 {
5589 char **argv;
5590 int digits, wordlen;
5591 int sigfirst, signum, siglast;
5592 enum target_signal oursig;
5593 int allsigs;
5594 int nsigs;
5595 unsigned char *sigs;
5596 struct cleanup *old_chain;
5597
5598 if (args == NULL)
5599 {
5600 error_no_arg (_("signal to handle"));
5601 }
5602
5603 /* Allocate and zero an array of flags for which signals to handle. */
5604
5605 nsigs = (int) TARGET_SIGNAL_LAST;
5606 sigs = (unsigned char *) alloca (nsigs);
5607 memset (sigs, 0, nsigs);
5608
5609 /* Break the command line up into args. */
5610
5611 argv = gdb_buildargv (args);
5612 old_chain = make_cleanup_freeargv (argv);
5613
5614 /* Walk through the args, looking for signal oursigs, signal names, and
5615 actions. Signal numbers and signal names may be interspersed with
5616 actions, with the actions being performed for all signals cumulatively
5617 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5618
5619 while (*argv != NULL)
5620 {
5621 wordlen = strlen (*argv);
5622 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5623 {;
5624 }
5625 allsigs = 0;
5626 sigfirst = siglast = -1;
5627
5628 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5629 {
5630 /* Apply action to all signals except those used by the
5631 debugger. Silently skip those. */
5632 allsigs = 1;
5633 sigfirst = 0;
5634 siglast = nsigs - 1;
5635 }
5636 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5637 {
5638 SET_SIGS (nsigs, sigs, signal_stop);
5639 SET_SIGS (nsigs, sigs, signal_print);
5640 }
5641 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5642 {
5643 UNSET_SIGS (nsigs, sigs, signal_program);
5644 }
5645 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5646 {
5647 SET_SIGS (nsigs, sigs, signal_print);
5648 }
5649 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5650 {
5651 SET_SIGS (nsigs, sigs, signal_program);
5652 }
5653 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5654 {
5655 UNSET_SIGS (nsigs, sigs, signal_stop);
5656 }
5657 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5658 {
5659 SET_SIGS (nsigs, sigs, signal_program);
5660 }
5661 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5662 {
5663 UNSET_SIGS (nsigs, sigs, signal_print);
5664 UNSET_SIGS (nsigs, sigs, signal_stop);
5665 }
5666 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5667 {
5668 UNSET_SIGS (nsigs, sigs, signal_program);
5669 }
5670 else if (digits > 0)
5671 {
5672 /* It is numeric. The numeric signal refers to our own
5673 internal signal numbering from target.h, not to host/target
5674 signal number. This is a feature; users really should be
5675 using symbolic names anyway, and the common ones like
5676 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5677
5678 sigfirst = siglast = (int)
5679 target_signal_from_command (atoi (*argv));
5680 if ((*argv)[digits] == '-')
5681 {
5682 siglast = (int)
5683 target_signal_from_command (atoi ((*argv) + digits + 1));
5684 }
5685 if (sigfirst > siglast)
5686 {
5687 /* Bet he didn't figure we'd think of this case... */
5688 signum = sigfirst;
5689 sigfirst = siglast;
5690 siglast = signum;
5691 }
5692 }
5693 else
5694 {
5695 oursig = target_signal_from_name (*argv);
5696 if (oursig != TARGET_SIGNAL_UNKNOWN)
5697 {
5698 sigfirst = siglast = (int) oursig;
5699 }
5700 else
5701 {
5702 /* Not a number and not a recognized flag word => complain. */
5703 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5704 }
5705 }
5706
5707 /* If any signal numbers or symbol names were found, set flags for
5708 which signals to apply actions to. */
5709
5710 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5711 {
5712 switch ((enum target_signal) signum)
5713 {
5714 case TARGET_SIGNAL_TRAP:
5715 case TARGET_SIGNAL_INT:
5716 if (!allsigs && !sigs[signum])
5717 {
5718 if (query (_("%s is used by the debugger.\n\
5719 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5720 {
5721 sigs[signum] = 1;
5722 }
5723 else
5724 {
5725 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5726 gdb_flush (gdb_stdout);
5727 }
5728 }
5729 break;
5730 case TARGET_SIGNAL_0:
5731 case TARGET_SIGNAL_DEFAULT:
5732 case TARGET_SIGNAL_UNKNOWN:
5733 /* Make sure that "all" doesn't print these. */
5734 break;
5735 default:
5736 sigs[signum] = 1;
5737 break;
5738 }
5739 }
5740
5741 argv++;
5742 }
5743
5744 for (signum = 0; signum < nsigs; signum++)
5745 if (sigs[signum])
5746 {
5747 target_notice_signals (inferior_ptid);
5748
5749 if (from_tty)
5750 {
5751 /* Show the results. */
5752 sig_print_header ();
5753 for (; signum < nsigs; signum++)
5754 if (sigs[signum])
5755 sig_print_info (signum);
5756 }
5757
5758 break;
5759 }
5760
5761 do_cleanups (old_chain);
5762 }
5763
5764 static void
5765 xdb_handle_command (char *args, int from_tty)
5766 {
5767 char **argv;
5768 struct cleanup *old_chain;
5769
5770 if (args == NULL)
5771 error_no_arg (_("xdb command"));
5772
5773 /* Break the command line up into args. */
5774
5775 argv = gdb_buildargv (args);
5776 old_chain = make_cleanup_freeargv (argv);
5777 if (argv[1] != (char *) NULL)
5778 {
5779 char *argBuf;
5780 int bufLen;
5781
5782 bufLen = strlen (argv[0]) + 20;
5783 argBuf = (char *) xmalloc (bufLen);
5784 if (argBuf)
5785 {
5786 int validFlag = 1;
5787 enum target_signal oursig;
5788
5789 oursig = target_signal_from_name (argv[0]);
5790 memset (argBuf, 0, bufLen);
5791 if (strcmp (argv[1], "Q") == 0)
5792 sprintf (argBuf, "%s %s", argv[0], "noprint");
5793 else
5794 {
5795 if (strcmp (argv[1], "s") == 0)
5796 {
5797 if (!signal_stop[oursig])
5798 sprintf (argBuf, "%s %s", argv[0], "stop");
5799 else
5800 sprintf (argBuf, "%s %s", argv[0], "nostop");
5801 }
5802 else if (strcmp (argv[1], "i") == 0)
5803 {
5804 if (!signal_program[oursig])
5805 sprintf (argBuf, "%s %s", argv[0], "pass");
5806 else
5807 sprintf (argBuf, "%s %s", argv[0], "nopass");
5808 }
5809 else if (strcmp (argv[1], "r") == 0)
5810 {
5811 if (!signal_print[oursig])
5812 sprintf (argBuf, "%s %s", argv[0], "print");
5813 else
5814 sprintf (argBuf, "%s %s", argv[0], "noprint");
5815 }
5816 else
5817 validFlag = 0;
5818 }
5819 if (validFlag)
5820 handle_command (argBuf, from_tty);
5821 else
5822 printf_filtered (_("Invalid signal handling flag.\n"));
5823 if (argBuf)
5824 xfree (argBuf);
5825 }
5826 }
5827 do_cleanups (old_chain);
5828 }
5829
5830 /* Print current contents of the tables set by the handle command.
5831 It is possible we should just be printing signals actually used
5832 by the current target (but for things to work right when switching
5833 targets, all signals should be in the signal tables). */
5834
5835 static void
5836 signals_info (char *signum_exp, int from_tty)
5837 {
5838 enum target_signal oursig;
5839 sig_print_header ();
5840
5841 if (signum_exp)
5842 {
5843 /* First see if this is a symbol name. */
5844 oursig = target_signal_from_name (signum_exp);
5845 if (oursig == TARGET_SIGNAL_UNKNOWN)
5846 {
5847 /* No, try numeric. */
5848 oursig =
5849 target_signal_from_command (parse_and_eval_long (signum_exp));
5850 }
5851 sig_print_info (oursig);
5852 return;
5853 }
5854
5855 printf_filtered ("\n");
5856 /* These ugly casts brought to you by the native VAX compiler. */
5857 for (oursig = TARGET_SIGNAL_FIRST;
5858 (int) oursig < (int) TARGET_SIGNAL_LAST;
5859 oursig = (enum target_signal) ((int) oursig + 1))
5860 {
5861 QUIT;
5862
5863 if (oursig != TARGET_SIGNAL_UNKNOWN
5864 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5865 sig_print_info (oursig);
5866 }
5867
5868 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5869 }
5870
5871 /* The $_siginfo convenience variable is a bit special. We don't know
5872 for sure the type of the value until we actually have a chance to
5873 fetch the data. The type can change depending on gdbarch, so it it
5874 also dependent on which thread you have selected.
5875
5876 1. making $_siginfo be an internalvar that creates a new value on
5877 access.
5878
5879 2. making the value of $_siginfo be an lval_computed value. */
5880
5881 /* This function implements the lval_computed support for reading a
5882 $_siginfo value. */
5883
5884 static void
5885 siginfo_value_read (struct value *v)
5886 {
5887 LONGEST transferred;
5888
5889 transferred =
5890 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5891 NULL,
5892 value_contents_all_raw (v),
5893 value_offset (v),
5894 TYPE_LENGTH (value_type (v)));
5895
5896 if (transferred != TYPE_LENGTH (value_type (v)))
5897 error (_("Unable to read siginfo"));
5898 }
5899
5900 /* This function implements the lval_computed support for writing a
5901 $_siginfo value. */
5902
5903 static void
5904 siginfo_value_write (struct value *v, struct value *fromval)
5905 {
5906 LONGEST transferred;
5907
5908 transferred = target_write (&current_target,
5909 TARGET_OBJECT_SIGNAL_INFO,
5910 NULL,
5911 value_contents_all_raw (fromval),
5912 value_offset (v),
5913 TYPE_LENGTH (value_type (fromval)));
5914
5915 if (transferred != TYPE_LENGTH (value_type (fromval)))
5916 error (_("Unable to write siginfo"));
5917 }
5918
5919 static struct lval_funcs siginfo_value_funcs =
5920 {
5921 siginfo_value_read,
5922 siginfo_value_write
5923 };
5924
5925 /* Return a new value with the correct type for the siginfo object of
5926 the current thread using architecture GDBARCH. Return a void value
5927 if there's no object available. */
5928
5929 static struct value *
5930 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5931 {
5932 if (target_has_stack
5933 && !ptid_equal (inferior_ptid, null_ptid)
5934 && gdbarch_get_siginfo_type_p (gdbarch))
5935 {
5936 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5937 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5938 }
5939
5940 return allocate_value (builtin_type (gdbarch)->builtin_void);
5941 }
5942
5943 \f
5944 /* Inferior thread state.
5945 These are details related to the inferior itself, and don't include
5946 things like what frame the user had selected or what gdb was doing
5947 with the target at the time.
5948 For inferior function calls these are things we want to restore
5949 regardless of whether the function call successfully completes
5950 or the dummy frame has to be manually popped. */
5951
5952 struct inferior_thread_state
5953 {
5954 enum target_signal stop_signal;
5955 CORE_ADDR stop_pc;
5956 struct regcache *registers;
5957 };
5958
5959 struct inferior_thread_state *
5960 save_inferior_thread_state (void)
5961 {
5962 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5963 struct thread_info *tp = inferior_thread ();
5964
5965 inf_state->stop_signal = tp->stop_signal;
5966 inf_state->stop_pc = stop_pc;
5967
5968 inf_state->registers = regcache_dup (get_current_regcache ());
5969
5970 return inf_state;
5971 }
5972
5973 /* Restore inferior session state to INF_STATE. */
5974
5975 void
5976 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5977 {
5978 struct thread_info *tp = inferior_thread ();
5979
5980 tp->stop_signal = inf_state->stop_signal;
5981 stop_pc = inf_state->stop_pc;
5982
5983 /* The inferior can be gone if the user types "print exit(0)"
5984 (and perhaps other times). */
5985 if (target_has_execution)
5986 /* NB: The register write goes through to the target. */
5987 regcache_cpy (get_current_regcache (), inf_state->registers);
5988 regcache_xfree (inf_state->registers);
5989 xfree (inf_state);
5990 }
5991
5992 static void
5993 do_restore_inferior_thread_state_cleanup (void *state)
5994 {
5995 restore_inferior_thread_state (state);
5996 }
5997
5998 struct cleanup *
5999 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6000 {
6001 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6002 }
6003
6004 void
6005 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6006 {
6007 regcache_xfree (inf_state->registers);
6008 xfree (inf_state);
6009 }
6010
6011 struct regcache *
6012 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6013 {
6014 return inf_state->registers;
6015 }
6016
6017 /* Session related state for inferior function calls.
6018 These are the additional bits of state that need to be restored
6019 when an inferior function call successfully completes. */
6020
6021 struct inferior_status
6022 {
6023 bpstat stop_bpstat;
6024 int stop_step;
6025 enum stop_stack_kind stop_stack_dummy;
6026 int stopped_by_random_signal;
6027 int stepping_over_breakpoint;
6028 CORE_ADDR step_range_start;
6029 CORE_ADDR step_range_end;
6030 struct frame_id step_frame_id;
6031 struct frame_id step_stack_frame_id;
6032 enum step_over_calls_kind step_over_calls;
6033 CORE_ADDR step_resume_break_address;
6034 int stop_after_trap;
6035 int stop_soon;
6036
6037 /* ID if the selected frame when the inferior function call was made. */
6038 struct frame_id selected_frame_id;
6039
6040 int proceed_to_finish;
6041 int in_infcall;
6042 };
6043
6044 /* Save all of the information associated with the inferior<==>gdb
6045 connection. */
6046
6047 struct inferior_status *
6048 save_inferior_status (void)
6049 {
6050 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6051 struct thread_info *tp = inferior_thread ();
6052 struct inferior *inf = current_inferior ();
6053
6054 inf_status->stop_step = tp->stop_step;
6055 inf_status->stop_stack_dummy = stop_stack_dummy;
6056 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6057 inf_status->stepping_over_breakpoint = tp->trap_expected;
6058 inf_status->step_range_start = tp->step_range_start;
6059 inf_status->step_range_end = tp->step_range_end;
6060 inf_status->step_frame_id = tp->step_frame_id;
6061 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6062 inf_status->step_over_calls = tp->step_over_calls;
6063 inf_status->stop_after_trap = stop_after_trap;
6064 inf_status->stop_soon = inf->stop_soon;
6065 /* Save original bpstat chain here; replace it with copy of chain.
6066 If caller's caller is walking the chain, they'll be happier if we
6067 hand them back the original chain when restore_inferior_status is
6068 called. */
6069 inf_status->stop_bpstat = tp->stop_bpstat;
6070 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6071 inf_status->proceed_to_finish = tp->proceed_to_finish;
6072 inf_status->in_infcall = tp->in_infcall;
6073
6074 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6075
6076 return inf_status;
6077 }
6078
6079 static int
6080 restore_selected_frame (void *args)
6081 {
6082 struct frame_id *fid = (struct frame_id *) args;
6083 struct frame_info *frame;
6084
6085 frame = frame_find_by_id (*fid);
6086
6087 /* If inf_status->selected_frame_id is NULL, there was no previously
6088 selected frame. */
6089 if (frame == NULL)
6090 {
6091 warning (_("Unable to restore previously selected frame."));
6092 return 0;
6093 }
6094
6095 select_frame (frame);
6096
6097 return (1);
6098 }
6099
6100 /* Restore inferior session state to INF_STATUS. */
6101
6102 void
6103 restore_inferior_status (struct inferior_status *inf_status)
6104 {
6105 struct thread_info *tp = inferior_thread ();
6106 struct inferior *inf = current_inferior ();
6107
6108 tp->stop_step = inf_status->stop_step;
6109 stop_stack_dummy = inf_status->stop_stack_dummy;
6110 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6111 tp->trap_expected = inf_status->stepping_over_breakpoint;
6112 tp->step_range_start = inf_status->step_range_start;
6113 tp->step_range_end = inf_status->step_range_end;
6114 tp->step_frame_id = inf_status->step_frame_id;
6115 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6116 tp->step_over_calls = inf_status->step_over_calls;
6117 stop_after_trap = inf_status->stop_after_trap;
6118 inf->stop_soon = inf_status->stop_soon;
6119 bpstat_clear (&tp->stop_bpstat);
6120 tp->stop_bpstat = inf_status->stop_bpstat;
6121 inf_status->stop_bpstat = NULL;
6122 tp->proceed_to_finish = inf_status->proceed_to_finish;
6123 tp->in_infcall = inf_status->in_infcall;
6124
6125 if (target_has_stack)
6126 {
6127 /* The point of catch_errors is that if the stack is clobbered,
6128 walking the stack might encounter a garbage pointer and
6129 error() trying to dereference it. */
6130 if (catch_errors
6131 (restore_selected_frame, &inf_status->selected_frame_id,
6132 "Unable to restore previously selected frame:\n",
6133 RETURN_MASK_ERROR) == 0)
6134 /* Error in restoring the selected frame. Select the innermost
6135 frame. */
6136 select_frame (get_current_frame ());
6137 }
6138
6139 xfree (inf_status);
6140 }
6141
6142 static void
6143 do_restore_inferior_status_cleanup (void *sts)
6144 {
6145 restore_inferior_status (sts);
6146 }
6147
6148 struct cleanup *
6149 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6150 {
6151 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6152 }
6153
6154 void
6155 discard_inferior_status (struct inferior_status *inf_status)
6156 {
6157 /* See save_inferior_status for info on stop_bpstat. */
6158 bpstat_clear (&inf_status->stop_bpstat);
6159 xfree (inf_status);
6160 }
6161 \f
6162 int
6163 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6164 {
6165 struct target_waitstatus last;
6166 ptid_t last_ptid;
6167
6168 get_last_target_status (&last_ptid, &last);
6169
6170 if (last.kind != TARGET_WAITKIND_FORKED)
6171 return 0;
6172
6173 if (!ptid_equal (last_ptid, pid))
6174 return 0;
6175
6176 *child_pid = last.value.related_pid;
6177 return 1;
6178 }
6179
6180 int
6181 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6182 {
6183 struct target_waitstatus last;
6184 ptid_t last_ptid;
6185
6186 get_last_target_status (&last_ptid, &last);
6187
6188 if (last.kind != TARGET_WAITKIND_VFORKED)
6189 return 0;
6190
6191 if (!ptid_equal (last_ptid, pid))
6192 return 0;
6193
6194 *child_pid = last.value.related_pid;
6195 return 1;
6196 }
6197
6198 int
6199 inferior_has_execd (ptid_t pid, char **execd_pathname)
6200 {
6201 struct target_waitstatus last;
6202 ptid_t last_ptid;
6203
6204 get_last_target_status (&last_ptid, &last);
6205
6206 if (last.kind != TARGET_WAITKIND_EXECD)
6207 return 0;
6208
6209 if (!ptid_equal (last_ptid, pid))
6210 return 0;
6211
6212 *execd_pathname = xstrdup (last.value.execd_pathname);
6213 return 1;
6214 }
6215
6216 int
6217 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6218 {
6219 struct target_waitstatus last;
6220 ptid_t last_ptid;
6221
6222 get_last_target_status (&last_ptid, &last);
6223
6224 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6225 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6226 return 0;
6227
6228 if (!ptid_equal (last_ptid, pid))
6229 return 0;
6230
6231 *syscall_number = last.value.syscall_number;
6232 return 1;
6233 }
6234
6235 /* Oft used ptids */
6236 ptid_t null_ptid;
6237 ptid_t minus_one_ptid;
6238
6239 /* Create a ptid given the necessary PID, LWP, and TID components. */
6240
6241 ptid_t
6242 ptid_build (int pid, long lwp, long tid)
6243 {
6244 ptid_t ptid;
6245
6246 ptid.pid = pid;
6247 ptid.lwp = lwp;
6248 ptid.tid = tid;
6249 return ptid;
6250 }
6251
6252 /* Create a ptid from just a pid. */
6253
6254 ptid_t
6255 pid_to_ptid (int pid)
6256 {
6257 return ptid_build (pid, 0, 0);
6258 }
6259
6260 /* Fetch the pid (process id) component from a ptid. */
6261
6262 int
6263 ptid_get_pid (ptid_t ptid)
6264 {
6265 return ptid.pid;
6266 }
6267
6268 /* Fetch the lwp (lightweight process) component from a ptid. */
6269
6270 long
6271 ptid_get_lwp (ptid_t ptid)
6272 {
6273 return ptid.lwp;
6274 }
6275
6276 /* Fetch the tid (thread id) component from a ptid. */
6277
6278 long
6279 ptid_get_tid (ptid_t ptid)
6280 {
6281 return ptid.tid;
6282 }
6283
6284 /* ptid_equal() is used to test equality of two ptids. */
6285
6286 int
6287 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6288 {
6289 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6290 && ptid1.tid == ptid2.tid);
6291 }
6292
6293 /* Returns true if PTID represents a process. */
6294
6295 int
6296 ptid_is_pid (ptid_t ptid)
6297 {
6298 if (ptid_equal (minus_one_ptid, ptid))
6299 return 0;
6300 if (ptid_equal (null_ptid, ptid))
6301 return 0;
6302
6303 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6304 }
6305
6306 int
6307 ptid_match (ptid_t ptid, ptid_t filter)
6308 {
6309 /* Since both parameters have the same type, prevent easy mistakes
6310 from happening. */
6311 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6312 && !ptid_equal (ptid, null_ptid));
6313
6314 if (ptid_equal (filter, minus_one_ptid))
6315 return 1;
6316 if (ptid_is_pid (filter)
6317 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6318 return 1;
6319 else if (ptid_equal (ptid, filter))
6320 return 1;
6321
6322 return 0;
6323 }
6324
6325 /* restore_inferior_ptid() will be used by the cleanup machinery
6326 to restore the inferior_ptid value saved in a call to
6327 save_inferior_ptid(). */
6328
6329 static void
6330 restore_inferior_ptid (void *arg)
6331 {
6332 ptid_t *saved_ptid_ptr = arg;
6333 inferior_ptid = *saved_ptid_ptr;
6334 xfree (arg);
6335 }
6336
6337 /* Save the value of inferior_ptid so that it may be restored by a
6338 later call to do_cleanups(). Returns the struct cleanup pointer
6339 needed for later doing the cleanup. */
6340
6341 struct cleanup *
6342 save_inferior_ptid (void)
6343 {
6344 ptid_t *saved_ptid_ptr;
6345
6346 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6347 *saved_ptid_ptr = inferior_ptid;
6348 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6349 }
6350 \f
6351
6352 /* User interface for reverse debugging:
6353 Set exec-direction / show exec-direction commands
6354 (returns error unless target implements to_set_exec_direction method). */
6355
6356 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6357 static const char exec_forward[] = "forward";
6358 static const char exec_reverse[] = "reverse";
6359 static const char *exec_direction = exec_forward;
6360 static const char *exec_direction_names[] = {
6361 exec_forward,
6362 exec_reverse,
6363 NULL
6364 };
6365
6366 static void
6367 set_exec_direction_func (char *args, int from_tty,
6368 struct cmd_list_element *cmd)
6369 {
6370 if (target_can_execute_reverse)
6371 {
6372 if (!strcmp (exec_direction, exec_forward))
6373 execution_direction = EXEC_FORWARD;
6374 else if (!strcmp (exec_direction, exec_reverse))
6375 execution_direction = EXEC_REVERSE;
6376 }
6377 }
6378
6379 static void
6380 show_exec_direction_func (struct ui_file *out, int from_tty,
6381 struct cmd_list_element *cmd, const char *value)
6382 {
6383 switch (execution_direction) {
6384 case EXEC_FORWARD:
6385 fprintf_filtered (out, _("Forward.\n"));
6386 break;
6387 case EXEC_REVERSE:
6388 fprintf_filtered (out, _("Reverse.\n"));
6389 break;
6390 case EXEC_ERROR:
6391 default:
6392 fprintf_filtered (out,
6393 _("Forward (target `%s' does not support exec-direction).\n"),
6394 target_shortname);
6395 break;
6396 }
6397 }
6398
6399 /* User interface for non-stop mode. */
6400
6401 int non_stop = 0;
6402 static int non_stop_1 = 0;
6403
6404 static void
6405 set_non_stop (char *args, int from_tty,
6406 struct cmd_list_element *c)
6407 {
6408 if (target_has_execution)
6409 {
6410 non_stop_1 = non_stop;
6411 error (_("Cannot change this setting while the inferior is running."));
6412 }
6413
6414 non_stop = non_stop_1;
6415 }
6416
6417 static void
6418 show_non_stop (struct ui_file *file, int from_tty,
6419 struct cmd_list_element *c, const char *value)
6420 {
6421 fprintf_filtered (file,
6422 _("Controlling the inferior in non-stop mode is %s.\n"),
6423 value);
6424 }
6425
6426 static void
6427 show_schedule_multiple (struct ui_file *file, int from_tty,
6428 struct cmd_list_element *c, const char *value)
6429 {
6430 fprintf_filtered (file, _("\
6431 Resuming the execution of threads of all processes is %s.\n"), value);
6432 }
6433
6434 void
6435 _initialize_infrun (void)
6436 {
6437 int i;
6438 int numsigs;
6439
6440 add_info ("signals", signals_info, _("\
6441 What debugger does when program gets various signals.\n\
6442 Specify a signal as argument to print info on that signal only."));
6443 add_info_alias ("handle", "signals", 0);
6444
6445 add_com ("handle", class_run, handle_command, _("\
6446 Specify how to handle a signal.\n\
6447 Args are signals and actions to apply to those signals.\n\
6448 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6449 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6450 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6451 The special arg \"all\" is recognized to mean all signals except those\n\
6452 used by the debugger, typically SIGTRAP and SIGINT.\n\
6453 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6454 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6455 Stop means reenter debugger if this signal happens (implies print).\n\
6456 Print means print a message if this signal happens.\n\
6457 Pass means let program see this signal; otherwise program doesn't know.\n\
6458 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6459 Pass and Stop may be combined."));
6460 if (xdb_commands)
6461 {
6462 add_com ("lz", class_info, signals_info, _("\
6463 What debugger does when program gets various signals.\n\
6464 Specify a signal as argument to print info on that signal only."));
6465 add_com ("z", class_run, xdb_handle_command, _("\
6466 Specify how to handle a signal.\n\
6467 Args are signals and actions to apply to those signals.\n\
6468 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6469 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6470 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6471 The special arg \"all\" is recognized to mean all signals except those\n\
6472 used by the debugger, typically SIGTRAP and SIGINT.\n\
6473 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6474 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6475 nopass), \"Q\" (noprint)\n\
6476 Stop means reenter debugger if this signal happens (implies print).\n\
6477 Print means print a message if this signal happens.\n\
6478 Pass means let program see this signal; otherwise program doesn't know.\n\
6479 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6480 Pass and Stop may be combined."));
6481 }
6482
6483 if (!dbx_commands)
6484 stop_command = add_cmd ("stop", class_obscure,
6485 not_just_help_class_command, _("\
6486 There is no `stop' command, but you can set a hook on `stop'.\n\
6487 This allows you to set a list of commands to be run each time execution\n\
6488 of the program stops."), &cmdlist);
6489
6490 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6491 Set inferior debugging."), _("\
6492 Show inferior debugging."), _("\
6493 When non-zero, inferior specific debugging is enabled."),
6494 NULL,
6495 show_debug_infrun,
6496 &setdebuglist, &showdebuglist);
6497
6498 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6499 Set displaced stepping debugging."), _("\
6500 Show displaced stepping debugging."), _("\
6501 When non-zero, displaced stepping specific debugging is enabled."),
6502 NULL,
6503 show_debug_displaced,
6504 &setdebuglist, &showdebuglist);
6505
6506 add_setshow_boolean_cmd ("non-stop", no_class,
6507 &non_stop_1, _("\
6508 Set whether gdb controls the inferior in non-stop mode."), _("\
6509 Show whether gdb controls the inferior in non-stop mode."), _("\
6510 When debugging a multi-threaded program and this setting is\n\
6511 off (the default, also called all-stop mode), when one thread stops\n\
6512 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6513 all other threads in the program while you interact with the thread of\n\
6514 interest. When you continue or step a thread, you can allow the other\n\
6515 threads to run, or have them remain stopped, but while you inspect any\n\
6516 thread's state, all threads stop.\n\
6517 \n\
6518 In non-stop mode, when one thread stops, other threads can continue\n\
6519 to run freely. You'll be able to step each thread independently,\n\
6520 leave it stopped or free to run as needed."),
6521 set_non_stop,
6522 show_non_stop,
6523 &setlist,
6524 &showlist);
6525
6526 numsigs = (int) TARGET_SIGNAL_LAST;
6527 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6528 signal_print = (unsigned char *)
6529 xmalloc (sizeof (signal_print[0]) * numsigs);
6530 signal_program = (unsigned char *)
6531 xmalloc (sizeof (signal_program[0]) * numsigs);
6532 for (i = 0; i < numsigs; i++)
6533 {
6534 signal_stop[i] = 1;
6535 signal_print[i] = 1;
6536 signal_program[i] = 1;
6537 }
6538
6539 /* Signals caused by debugger's own actions
6540 should not be given to the program afterwards. */
6541 signal_program[TARGET_SIGNAL_TRAP] = 0;
6542 signal_program[TARGET_SIGNAL_INT] = 0;
6543
6544 /* Signals that are not errors should not normally enter the debugger. */
6545 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6546 signal_print[TARGET_SIGNAL_ALRM] = 0;
6547 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6548 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6549 signal_stop[TARGET_SIGNAL_PROF] = 0;
6550 signal_print[TARGET_SIGNAL_PROF] = 0;
6551 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6552 signal_print[TARGET_SIGNAL_CHLD] = 0;
6553 signal_stop[TARGET_SIGNAL_IO] = 0;
6554 signal_print[TARGET_SIGNAL_IO] = 0;
6555 signal_stop[TARGET_SIGNAL_POLL] = 0;
6556 signal_print[TARGET_SIGNAL_POLL] = 0;
6557 signal_stop[TARGET_SIGNAL_URG] = 0;
6558 signal_print[TARGET_SIGNAL_URG] = 0;
6559 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6560 signal_print[TARGET_SIGNAL_WINCH] = 0;
6561
6562 /* These signals are used internally by user-level thread
6563 implementations. (See signal(5) on Solaris.) Like the above
6564 signals, a healthy program receives and handles them as part of
6565 its normal operation. */
6566 signal_stop[TARGET_SIGNAL_LWP] = 0;
6567 signal_print[TARGET_SIGNAL_LWP] = 0;
6568 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6569 signal_print[TARGET_SIGNAL_WAITING] = 0;
6570 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6571 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6572
6573 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6574 &stop_on_solib_events, _("\
6575 Set stopping for shared library events."), _("\
6576 Show stopping for shared library events."), _("\
6577 If nonzero, gdb will give control to the user when the dynamic linker\n\
6578 notifies gdb of shared library events. The most common event of interest\n\
6579 to the user would be loading/unloading of a new library."),
6580 NULL,
6581 show_stop_on_solib_events,
6582 &setlist, &showlist);
6583
6584 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6585 follow_fork_mode_kind_names,
6586 &follow_fork_mode_string, _("\
6587 Set debugger response to a program call of fork or vfork."), _("\
6588 Show debugger response to a program call of fork or vfork."), _("\
6589 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6590 parent - the original process is debugged after a fork\n\
6591 child - the new process is debugged after a fork\n\
6592 The unfollowed process will continue to run.\n\
6593 By default, the debugger will follow the parent process."),
6594 NULL,
6595 show_follow_fork_mode_string,
6596 &setlist, &showlist);
6597
6598 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6599 follow_exec_mode_names,
6600 &follow_exec_mode_string, _("\
6601 Set debugger response to a program call of exec."), _("\
6602 Show debugger response to a program call of exec."), _("\
6603 An exec call replaces the program image of a process.\n\
6604 \n\
6605 follow-exec-mode can be:\n\
6606 \n\
6607 new - the debugger creates a new inferior and rebinds the process \n\
6608 to this new inferior. The program the process was running before\n\
6609 the exec call can be restarted afterwards by restarting the original\n\
6610 inferior.\n\
6611 \n\
6612 same - the debugger keeps the process bound to the same inferior.\n\
6613 The new executable image replaces the previous executable loaded in\n\
6614 the inferior. Restarting the inferior after the exec call restarts\n\
6615 the executable the process was running after the exec call.\n\
6616 \n\
6617 By default, the debugger will use the same inferior."),
6618 NULL,
6619 show_follow_exec_mode_string,
6620 &setlist, &showlist);
6621
6622 add_setshow_enum_cmd ("scheduler-locking", class_run,
6623 scheduler_enums, &scheduler_mode, _("\
6624 Set mode for locking scheduler during execution."), _("\
6625 Show mode for locking scheduler during execution."), _("\
6626 off == no locking (threads may preempt at any time)\n\
6627 on == full locking (no thread except the current thread may run)\n\
6628 step == scheduler locked during every single-step operation.\n\
6629 In this mode, no other thread may run during a step command.\n\
6630 Other threads may run while stepping over a function call ('next')."),
6631 set_schedlock_func, /* traps on target vector */
6632 show_scheduler_mode,
6633 &setlist, &showlist);
6634
6635 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6636 Set mode for resuming threads of all processes."), _("\
6637 Show mode for resuming threads of all processes."), _("\
6638 When on, execution commands (such as 'continue' or 'next') resume all\n\
6639 threads of all processes. When off (which is the default), execution\n\
6640 commands only resume the threads of the current process. The set of\n\
6641 threads that are resumed is further refined by the scheduler-locking\n\
6642 mode (see help set scheduler-locking)."),
6643 NULL,
6644 show_schedule_multiple,
6645 &setlist, &showlist);
6646
6647 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6648 Set mode of the step operation."), _("\
6649 Show mode of the step operation."), _("\
6650 When set, doing a step over a function without debug line information\n\
6651 will stop at the first instruction of that function. Otherwise, the\n\
6652 function is skipped and the step command stops at a different source line."),
6653 NULL,
6654 show_step_stop_if_no_debug,
6655 &setlist, &showlist);
6656
6657 add_setshow_enum_cmd ("displaced-stepping", class_run,
6658 can_use_displaced_stepping_enum,
6659 &can_use_displaced_stepping, _("\
6660 Set debugger's willingness to use displaced stepping."), _("\
6661 Show debugger's willingness to use displaced stepping."), _("\
6662 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6663 supported by the target architecture. If off, gdb will not use displaced\n\
6664 stepping to step over breakpoints, even if such is supported by the target\n\
6665 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6666 if the target architecture supports it and non-stop mode is active, but will not\n\
6667 use it in all-stop mode (see help set non-stop)."),
6668 NULL,
6669 show_can_use_displaced_stepping,
6670 &setlist, &showlist);
6671
6672 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6673 &exec_direction, _("Set direction of execution.\n\
6674 Options are 'forward' or 'reverse'."),
6675 _("Show direction of execution (forward/reverse)."),
6676 _("Tells gdb whether to execute forward or backward."),
6677 set_exec_direction_func, show_exec_direction_func,
6678 &setlist, &showlist);
6679
6680 /* Set/show detach-on-fork: user-settable mode. */
6681
6682 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6683 Set whether gdb will detach the child of a fork."), _("\
6684 Show whether gdb will detach the child of a fork."), _("\
6685 Tells gdb whether to detach the child of a fork."),
6686 NULL, NULL, &setlist, &showlist);
6687
6688 /* ptid initializations */
6689 null_ptid = ptid_build (0, 0, 0);
6690 minus_one_ptid = ptid_build (-1, 0, 0);
6691 inferior_ptid = null_ptid;
6692 target_last_wait_ptid = minus_one_ptid;
6693
6694 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6695 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6696 observer_attach_thread_exit (infrun_thread_thread_exit);
6697 observer_attach_inferior_exit (infrun_inferior_exit);
6698
6699 /* Explicitly create without lookup, since that tries to create a
6700 value with a void typed value, and when we get here, gdbarch
6701 isn't initialized yet. At this point, we're quite sure there
6702 isn't another convenience variable of the same name. */
6703 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6704 }
This page took 0.266959 seconds and 4 git commands to generate.