2010-05-17 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 /* Default behavior is to detach newly forked processes (legacy). */
112 int detach_fork = 1;
113
114 int debug_displaced = 0;
115 static void
116 show_debug_displaced (struct ui_file *file, int from_tty,
117 struct cmd_list_element *c, const char *value)
118 {
119 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120 }
121
122 static int debug_infrun = 0;
123 static void
124 show_debug_infrun (struct ui_file *file, int from_tty,
125 struct cmd_list_element *c, const char *value)
126 {
127 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128 }
129
130 /* If the program uses ELF-style shared libraries, then calls to
131 functions in shared libraries go through stubs, which live in a
132 table called the PLT (Procedure Linkage Table). The first time the
133 function is called, the stub sends control to the dynamic linker,
134 which looks up the function's real address, patches the stub so
135 that future calls will go directly to the function, and then passes
136 control to the function.
137
138 If we are stepping at the source level, we don't want to see any of
139 this --- we just want to skip over the stub and the dynamic linker.
140 The simple approach is to single-step until control leaves the
141 dynamic linker.
142
143 However, on some systems (e.g., Red Hat's 5.2 distribution) the
144 dynamic linker calls functions in the shared C library, so you
145 can't tell from the PC alone whether the dynamic linker is still
146 running. In this case, we use a step-resume breakpoint to get us
147 past the dynamic linker, as if we were using "next" to step over a
148 function call.
149
150 in_solib_dynsym_resolve_code() says whether we're in the dynamic
151 linker code or not. Normally, this means we single-step. However,
152 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153 address where we can place a step-resume breakpoint to get past the
154 linker's symbol resolution function.
155
156 in_solib_dynsym_resolve_code() can generally be implemented in a
157 pretty portable way, by comparing the PC against the address ranges
158 of the dynamic linker's sections.
159
160 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161 it depends on internal details of the dynamic linker. It's usually
162 not too hard to figure out where to put a breakpoint, but it
163 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
164 sanity checking. If it can't figure things out, returning zero and
165 getting the (possibly confusing) stepping behavior is better than
166 signalling an error, which will obscure the change in the
167 inferior's state. */
168
169 /* This function returns TRUE if pc is the address of an instruction
170 that lies within the dynamic linker (such as the event hook, or the
171 dld itself).
172
173 This function must be used only when a dynamic linker event has
174 been caught, and the inferior is being stepped out of the hook, or
175 undefined results are guaranteed. */
176
177 #ifndef SOLIB_IN_DYNAMIC_LINKER
178 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179 #endif
180
181
182 /* Tables of how to react to signals; the user sets them. */
183
184 static unsigned char *signal_stop;
185 static unsigned char *signal_print;
186 static unsigned char *signal_program;
187
188 #define SET_SIGS(nsigs,sigs,flags) \
189 do { \
190 int signum = (nsigs); \
191 while (signum-- > 0) \
192 if ((sigs)[signum]) \
193 (flags)[signum] = 1; \
194 } while (0)
195
196 #define UNSET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 0; \
202 } while (0)
203
204 /* Value to pass to target_resume() to cause all threads to resume */
205
206 #define RESUME_ALL minus_one_ptid
207
208 /* Command list pointer for the "stop" placeholder. */
209
210 static struct cmd_list_element *stop_command;
211
212 /* Function inferior was in as of last step command. */
213
214 static struct symbol *step_start_function;
215
216 /* Nonzero if we want to give control to the user when we're notified
217 of shared library events by the dynamic linker. */
218 static int stop_on_solib_events;
219 static void
220 show_stop_on_solib_events (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
224 value);
225 }
226
227 /* Nonzero means expecting a trace trap
228 and should stop the inferior and return silently when it happens. */
229
230 int stop_after_trap;
231
232 /* Save register contents here when executing a "finish" command or are
233 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
234 Thus this contains the return value from the called function (assuming
235 values are returned in a register). */
236
237 struct regcache *stop_registers;
238
239 /* Nonzero after stop if current stack frame should be printed. */
240
241 static int stop_print_frame;
242
243 /* This is a cached copy of the pid/waitstatus of the last event
244 returned by target_wait()/deprecated_target_wait_hook(). This
245 information is returned by get_last_target_status(). */
246 static ptid_t target_last_wait_ptid;
247 static struct target_waitstatus target_last_waitstatus;
248
249 static void context_switch (ptid_t ptid);
250
251 void init_thread_stepping_state (struct thread_info *tss);
252
253 void init_infwait_state (void);
254
255 static const char follow_fork_mode_child[] = "child";
256 static const char follow_fork_mode_parent[] = "parent";
257
258 static const char *follow_fork_mode_kind_names[] = {
259 follow_fork_mode_child,
260 follow_fork_mode_parent,
261 NULL
262 };
263
264 static const char *follow_fork_mode_string = follow_fork_mode_parent;
265 static void
266 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
267 struct cmd_list_element *c, const char *value)
268 {
269 fprintf_filtered (file, _("\
270 Debugger response to a program call of fork or vfork is \"%s\".\n"),
271 value);
272 }
273 \f
274
275 /* Tell the target to follow the fork we're stopped at. Returns true
276 if the inferior should be resumed; false, if the target for some
277 reason decided it's best not to resume. */
278
279 static int
280 follow_fork (void)
281 {
282 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
283 int should_resume = 1;
284 struct thread_info *tp;
285
286 /* Copy user stepping state to the new inferior thread. FIXME: the
287 followed fork child thread should have a copy of most of the
288 parent thread structure's run control related fields, not just these.
289 Initialized to avoid "may be used uninitialized" warnings from gcc. */
290 struct breakpoint *step_resume_breakpoint = NULL;
291 CORE_ADDR step_range_start = 0;
292 CORE_ADDR step_range_end = 0;
293 struct frame_id step_frame_id = { 0 };
294
295 if (!non_stop)
296 {
297 ptid_t wait_ptid;
298 struct target_waitstatus wait_status;
299
300 /* Get the last target status returned by target_wait(). */
301 get_last_target_status (&wait_ptid, &wait_status);
302
303 /* If not stopped at a fork event, then there's nothing else to
304 do. */
305 if (wait_status.kind != TARGET_WAITKIND_FORKED
306 && wait_status.kind != TARGET_WAITKIND_VFORKED)
307 return 1;
308
309 /* Check if we switched over from WAIT_PTID, since the event was
310 reported. */
311 if (!ptid_equal (wait_ptid, minus_one_ptid)
312 && !ptid_equal (inferior_ptid, wait_ptid))
313 {
314 /* We did. Switch back to WAIT_PTID thread, to tell the
315 target to follow it (in either direction). We'll
316 afterwards refuse to resume, and inform the user what
317 happened. */
318 switch_to_thread (wait_ptid);
319 should_resume = 0;
320 }
321 }
322
323 tp = inferior_thread ();
324
325 /* If there were any forks/vforks that were caught and are now to be
326 followed, then do so now. */
327 switch (tp->pending_follow.kind)
328 {
329 case TARGET_WAITKIND_FORKED:
330 case TARGET_WAITKIND_VFORKED:
331 {
332 ptid_t parent, child;
333
334 /* If the user did a next/step, etc, over a fork call,
335 preserve the stepping state in the fork child. */
336 if (follow_child && should_resume)
337 {
338 step_resume_breakpoint
339 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
340 step_range_start = tp->step_range_start;
341 step_range_end = tp->step_range_end;
342 step_frame_id = tp->step_frame_id;
343
344 /* For now, delete the parent's sr breakpoint, otherwise,
345 parent/child sr breakpoints are considered duplicates,
346 and the child version will not be installed. Remove
347 this when the breakpoints module becomes aware of
348 inferiors and address spaces. */
349 delete_step_resume_breakpoint (tp);
350 tp->step_range_start = 0;
351 tp->step_range_end = 0;
352 tp->step_frame_id = null_frame_id;
353 }
354
355 parent = inferior_ptid;
356 child = tp->pending_follow.value.related_pid;
357
358 /* Tell the target to do whatever is necessary to follow
359 either parent or child. */
360 if (target_follow_fork (follow_child))
361 {
362 /* Target refused to follow, or there's some other reason
363 we shouldn't resume. */
364 should_resume = 0;
365 }
366 else
367 {
368 /* This pending follow fork event is now handled, one way
369 or another. The previous selected thread may be gone
370 from the lists by now, but if it is still around, need
371 to clear the pending follow request. */
372 tp = find_thread_ptid (parent);
373 if (tp)
374 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
375
376 /* This makes sure we don't try to apply the "Switched
377 over from WAIT_PID" logic above. */
378 nullify_last_target_wait_ptid ();
379
380 /* If we followed the child, switch to it... */
381 if (follow_child)
382 {
383 switch_to_thread (child);
384
385 /* ... and preserve the stepping state, in case the
386 user was stepping over the fork call. */
387 if (should_resume)
388 {
389 tp = inferior_thread ();
390 tp->step_resume_breakpoint = step_resume_breakpoint;
391 tp->step_range_start = step_range_start;
392 tp->step_range_end = step_range_end;
393 tp->step_frame_id = step_frame_id;
394 }
395 else
396 {
397 /* If we get here, it was because we're trying to
398 resume from a fork catchpoint, but, the user
399 has switched threads away from the thread that
400 forked. In that case, the resume command
401 issued is most likely not applicable to the
402 child, so just warn, and refuse to resume. */
403 warning (_("\
404 Not resuming: switched threads before following fork child.\n"));
405 }
406
407 /* Reset breakpoints in the child as appropriate. */
408 follow_inferior_reset_breakpoints ();
409 }
410 else
411 switch_to_thread (parent);
412 }
413 }
414 break;
415 case TARGET_WAITKIND_SPURIOUS:
416 /* Nothing to follow. */
417 break;
418 default:
419 internal_error (__FILE__, __LINE__,
420 "Unexpected pending_follow.kind %d\n",
421 tp->pending_follow.kind);
422 break;
423 }
424
425 return should_resume;
426 }
427
428 void
429 follow_inferior_reset_breakpoints (void)
430 {
431 struct thread_info *tp = inferior_thread ();
432
433 /* Was there a step_resume breakpoint? (There was if the user
434 did a "next" at the fork() call.) If so, explicitly reset its
435 thread number.
436
437 step_resumes are a form of bp that are made to be per-thread.
438 Since we created the step_resume bp when the parent process
439 was being debugged, and now are switching to the child process,
440 from the breakpoint package's viewpoint, that's a switch of
441 "threads". We must update the bp's notion of which thread
442 it is for, or it'll be ignored when it triggers. */
443
444 if (tp->step_resume_breakpoint)
445 breakpoint_re_set_thread (tp->step_resume_breakpoint);
446
447 /* Reinsert all breakpoints in the child. The user may have set
448 breakpoints after catching the fork, in which case those
449 were never set in the child, but only in the parent. This makes
450 sure the inserted breakpoints match the breakpoint list. */
451
452 breakpoint_re_set ();
453 insert_breakpoints ();
454 }
455
456 /* The child has exited or execed: resume threads of the parent the
457 user wanted to be executing. */
458
459 static int
460 proceed_after_vfork_done (struct thread_info *thread,
461 void *arg)
462 {
463 int pid = * (int *) arg;
464
465 if (ptid_get_pid (thread->ptid) == pid
466 && is_running (thread->ptid)
467 && !is_executing (thread->ptid)
468 && !thread->stop_requested
469 && thread->stop_signal == TARGET_SIGNAL_0)
470 {
471 if (debug_infrun)
472 fprintf_unfiltered (gdb_stdlog,
473 "infrun: resuming vfork parent thread %s\n",
474 target_pid_to_str (thread->ptid));
475
476 switch_to_thread (thread->ptid);
477 clear_proceed_status ();
478 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
479 }
480
481 return 0;
482 }
483
484 /* Called whenever we notice an exec or exit event, to handle
485 detaching or resuming a vfork parent. */
486
487 static void
488 handle_vfork_child_exec_or_exit (int exec)
489 {
490 struct inferior *inf = current_inferior ();
491
492 if (inf->vfork_parent)
493 {
494 int resume_parent = -1;
495
496 /* This exec or exit marks the end of the shared memory region
497 between the parent and the child. If the user wanted to
498 detach from the parent, now is the time. */
499
500 if (inf->vfork_parent->pending_detach)
501 {
502 struct thread_info *tp;
503 struct cleanup *old_chain;
504 struct program_space *pspace;
505 struct address_space *aspace;
506
507 /* follow-fork child, detach-on-fork on */
508
509 old_chain = make_cleanup_restore_current_thread ();
510
511 /* We're letting loose of the parent. */
512 tp = any_live_thread_of_process (inf->vfork_parent->pid);
513 switch_to_thread (tp->ptid);
514
515 /* We're about to detach from the parent, which implicitly
516 removes breakpoints from its address space. There's a
517 catch here: we want to reuse the spaces for the child,
518 but, parent/child are still sharing the pspace at this
519 point, although the exec in reality makes the kernel give
520 the child a fresh set of new pages. The problem here is
521 that the breakpoints module being unaware of this, would
522 likely chose the child process to write to the parent
523 address space. Swapping the child temporarily away from
524 the spaces has the desired effect. Yes, this is "sort
525 of" a hack. */
526
527 pspace = inf->pspace;
528 aspace = inf->aspace;
529 inf->aspace = NULL;
530 inf->pspace = NULL;
531
532 if (debug_infrun || info_verbose)
533 {
534 target_terminal_ours ();
535
536 if (exec)
537 fprintf_filtered (gdb_stdlog,
538 "Detaching vfork parent process %d after child exec.\n",
539 inf->vfork_parent->pid);
540 else
541 fprintf_filtered (gdb_stdlog,
542 "Detaching vfork parent process %d after child exit.\n",
543 inf->vfork_parent->pid);
544 }
545
546 target_detach (NULL, 0);
547
548 /* Put it back. */
549 inf->pspace = pspace;
550 inf->aspace = aspace;
551
552 do_cleanups (old_chain);
553 }
554 else if (exec)
555 {
556 /* We're staying attached to the parent, so, really give the
557 child a new address space. */
558 inf->pspace = add_program_space (maybe_new_address_space ());
559 inf->aspace = inf->pspace->aspace;
560 inf->removable = 1;
561 set_current_program_space (inf->pspace);
562
563 resume_parent = inf->vfork_parent->pid;
564
565 /* Break the bonds. */
566 inf->vfork_parent->vfork_child = NULL;
567 }
568 else
569 {
570 struct cleanup *old_chain;
571 struct program_space *pspace;
572
573 /* If this is a vfork child exiting, then the pspace and
574 aspaces were shared with the parent. Since we're
575 reporting the process exit, we'll be mourning all that is
576 found in the address space, and switching to null_ptid,
577 preparing to start a new inferior. But, since we don't
578 want to clobber the parent's address/program spaces, we
579 go ahead and create a new one for this exiting
580 inferior. */
581
582 /* Switch to null_ptid, so that clone_program_space doesn't want
583 to read the selected frame of a dead process. */
584 old_chain = save_inferior_ptid ();
585 inferior_ptid = null_ptid;
586
587 /* This inferior is dead, so avoid giving the breakpoints
588 module the option to write through to it (cloning a
589 program space resets breakpoints). */
590 inf->aspace = NULL;
591 inf->pspace = NULL;
592 pspace = add_program_space (maybe_new_address_space ());
593 set_current_program_space (pspace);
594 inf->removable = 1;
595 clone_program_space (pspace, inf->vfork_parent->pspace);
596 inf->pspace = pspace;
597 inf->aspace = pspace->aspace;
598
599 /* Put back inferior_ptid. We'll continue mourning this
600 inferior. */
601 do_cleanups (old_chain);
602
603 resume_parent = inf->vfork_parent->pid;
604 /* Break the bonds. */
605 inf->vfork_parent->vfork_child = NULL;
606 }
607
608 inf->vfork_parent = NULL;
609
610 gdb_assert (current_program_space == inf->pspace);
611
612 if (non_stop && resume_parent != -1)
613 {
614 /* If the user wanted the parent to be running, let it go
615 free now. */
616 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
617
618 if (debug_infrun)
619 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
620 resume_parent);
621
622 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
623
624 do_cleanups (old_chain);
625 }
626 }
627 }
628
629 /* Enum strings for "set|show displaced-stepping". */
630
631 static const char follow_exec_mode_new[] = "new";
632 static const char follow_exec_mode_same[] = "same";
633 static const char *follow_exec_mode_names[] =
634 {
635 follow_exec_mode_new,
636 follow_exec_mode_same,
637 NULL,
638 };
639
640 static const char *follow_exec_mode_string = follow_exec_mode_same;
641 static void
642 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
643 struct cmd_list_element *c, const char *value)
644 {
645 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
646 }
647
648 /* EXECD_PATHNAME is assumed to be non-NULL. */
649
650 static void
651 follow_exec (ptid_t pid, char *execd_pathname)
652 {
653 struct thread_info *th = inferior_thread ();
654 struct inferior *inf = current_inferior ();
655
656 /* This is an exec event that we actually wish to pay attention to.
657 Refresh our symbol table to the newly exec'd program, remove any
658 momentary bp's, etc.
659
660 If there are breakpoints, they aren't really inserted now,
661 since the exec() transformed our inferior into a fresh set
662 of instructions.
663
664 We want to preserve symbolic breakpoints on the list, since
665 we have hopes that they can be reset after the new a.out's
666 symbol table is read.
667
668 However, any "raw" breakpoints must be removed from the list
669 (e.g., the solib bp's), since their address is probably invalid
670 now.
671
672 And, we DON'T want to call delete_breakpoints() here, since
673 that may write the bp's "shadow contents" (the instruction
674 value that was overwritten witha TRAP instruction). Since
675 we now have a new a.out, those shadow contents aren't valid. */
676
677 mark_breakpoints_out ();
678
679 update_breakpoints_after_exec ();
680
681 /* If there was one, it's gone now. We cannot truly step-to-next
682 statement through an exec(). */
683 th->step_resume_breakpoint = NULL;
684 th->step_range_start = 0;
685 th->step_range_end = 0;
686
687 /* The target reports the exec event to the main thread, even if
688 some other thread does the exec, and even if the main thread was
689 already stopped --- if debugging in non-stop mode, it's possible
690 the user had the main thread held stopped in the previous image
691 --- release it now. This is the same behavior as step-over-exec
692 with scheduler-locking on in all-stop mode. */
693 th->stop_requested = 0;
694
695 /* What is this a.out's name? */
696 printf_unfiltered (_("%s is executing new program: %s\n"),
697 target_pid_to_str (inferior_ptid),
698 execd_pathname);
699
700 /* We've followed the inferior through an exec. Therefore, the
701 inferior has essentially been killed & reborn. */
702
703 gdb_flush (gdb_stdout);
704
705 breakpoint_init_inferior (inf_execd);
706
707 if (gdb_sysroot && *gdb_sysroot)
708 {
709 char *name = alloca (strlen (gdb_sysroot)
710 + strlen (execd_pathname)
711 + 1);
712
713 strcpy (name, gdb_sysroot);
714 strcat (name, execd_pathname);
715 execd_pathname = name;
716 }
717
718 /* Reset the shared library package. This ensures that we get a
719 shlib event when the child reaches "_start", at which point the
720 dld will have had a chance to initialize the child. */
721 /* Also, loading a symbol file below may trigger symbol lookups, and
722 we don't want those to be satisfied by the libraries of the
723 previous incarnation of this process. */
724 no_shared_libraries (NULL, 0);
725
726 if (follow_exec_mode_string == follow_exec_mode_new)
727 {
728 struct program_space *pspace;
729
730 /* The user wants to keep the old inferior and program spaces
731 around. Create a new fresh one, and switch to it. */
732
733 inf = add_inferior (current_inferior ()->pid);
734 pspace = add_program_space (maybe_new_address_space ());
735 inf->pspace = pspace;
736 inf->aspace = pspace->aspace;
737
738 exit_inferior_num_silent (current_inferior ()->num);
739
740 set_current_inferior (inf);
741 set_current_program_space (pspace);
742 }
743
744 gdb_assert (current_program_space == inf->pspace);
745
746 /* That a.out is now the one to use. */
747 exec_file_attach (execd_pathname, 0);
748
749 /* Load the main file's symbols. */
750 symbol_file_add_main (execd_pathname, 0);
751
752 #ifdef SOLIB_CREATE_INFERIOR_HOOK
753 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
754 #else
755 solib_create_inferior_hook (0);
756 #endif
757
758 jit_inferior_created_hook ();
759
760 /* Reinsert all breakpoints. (Those which were symbolic have
761 been reset to the proper address in the new a.out, thanks
762 to symbol_file_command...) */
763 insert_breakpoints ();
764
765 /* The next resume of this inferior should bring it to the shlib
766 startup breakpoints. (If the user had also set bp's on
767 "main" from the old (parent) process, then they'll auto-
768 matically get reset there in the new process.) */
769 }
770
771 /* Non-zero if we just simulating a single-step. This is needed
772 because we cannot remove the breakpoints in the inferior process
773 until after the `wait' in `wait_for_inferior'. */
774 static int singlestep_breakpoints_inserted_p = 0;
775
776 /* The thread we inserted single-step breakpoints for. */
777 static ptid_t singlestep_ptid;
778
779 /* PC when we started this single-step. */
780 static CORE_ADDR singlestep_pc;
781
782 /* If another thread hit the singlestep breakpoint, we save the original
783 thread here so that we can resume single-stepping it later. */
784 static ptid_t saved_singlestep_ptid;
785 static int stepping_past_singlestep_breakpoint;
786
787 /* If not equal to null_ptid, this means that after stepping over breakpoint
788 is finished, we need to switch to deferred_step_ptid, and step it.
789
790 The use case is when one thread has hit a breakpoint, and then the user
791 has switched to another thread and issued 'step'. We need to step over
792 breakpoint in the thread which hit the breakpoint, but then continue
793 stepping the thread user has selected. */
794 static ptid_t deferred_step_ptid;
795 \f
796 /* Displaced stepping. */
797
798 /* In non-stop debugging mode, we must take special care to manage
799 breakpoints properly; in particular, the traditional strategy for
800 stepping a thread past a breakpoint it has hit is unsuitable.
801 'Displaced stepping' is a tactic for stepping one thread past a
802 breakpoint it has hit while ensuring that other threads running
803 concurrently will hit the breakpoint as they should.
804
805 The traditional way to step a thread T off a breakpoint in a
806 multi-threaded program in all-stop mode is as follows:
807
808 a0) Initially, all threads are stopped, and breakpoints are not
809 inserted.
810 a1) We single-step T, leaving breakpoints uninserted.
811 a2) We insert breakpoints, and resume all threads.
812
813 In non-stop debugging, however, this strategy is unsuitable: we
814 don't want to have to stop all threads in the system in order to
815 continue or step T past a breakpoint. Instead, we use displaced
816 stepping:
817
818 n0) Initially, T is stopped, other threads are running, and
819 breakpoints are inserted.
820 n1) We copy the instruction "under" the breakpoint to a separate
821 location, outside the main code stream, making any adjustments
822 to the instruction, register, and memory state as directed by
823 T's architecture.
824 n2) We single-step T over the instruction at its new location.
825 n3) We adjust the resulting register and memory state as directed
826 by T's architecture. This includes resetting T's PC to point
827 back into the main instruction stream.
828 n4) We resume T.
829
830 This approach depends on the following gdbarch methods:
831
832 - gdbarch_max_insn_length and gdbarch_displaced_step_location
833 indicate where to copy the instruction, and how much space must
834 be reserved there. We use these in step n1.
835
836 - gdbarch_displaced_step_copy_insn copies a instruction to a new
837 address, and makes any necessary adjustments to the instruction,
838 register contents, and memory. We use this in step n1.
839
840 - gdbarch_displaced_step_fixup adjusts registers and memory after
841 we have successfuly single-stepped the instruction, to yield the
842 same effect the instruction would have had if we had executed it
843 at its original address. We use this in step n3.
844
845 - gdbarch_displaced_step_free_closure provides cleanup.
846
847 The gdbarch_displaced_step_copy_insn and
848 gdbarch_displaced_step_fixup functions must be written so that
849 copying an instruction with gdbarch_displaced_step_copy_insn,
850 single-stepping across the copied instruction, and then applying
851 gdbarch_displaced_insn_fixup should have the same effects on the
852 thread's memory and registers as stepping the instruction in place
853 would have. Exactly which responsibilities fall to the copy and
854 which fall to the fixup is up to the author of those functions.
855
856 See the comments in gdbarch.sh for details.
857
858 Note that displaced stepping and software single-step cannot
859 currently be used in combination, although with some care I think
860 they could be made to. Software single-step works by placing
861 breakpoints on all possible subsequent instructions; if the
862 displaced instruction is a PC-relative jump, those breakpoints
863 could fall in very strange places --- on pages that aren't
864 executable, or at addresses that are not proper instruction
865 boundaries. (We do generally let other threads run while we wait
866 to hit the software single-step breakpoint, and they might
867 encounter such a corrupted instruction.) One way to work around
868 this would be to have gdbarch_displaced_step_copy_insn fully
869 simulate the effect of PC-relative instructions (and return NULL)
870 on architectures that use software single-stepping.
871
872 In non-stop mode, we can have independent and simultaneous step
873 requests, so more than one thread may need to simultaneously step
874 over a breakpoint. The current implementation assumes there is
875 only one scratch space per process. In this case, we have to
876 serialize access to the scratch space. If thread A wants to step
877 over a breakpoint, but we are currently waiting for some other
878 thread to complete a displaced step, we leave thread A stopped and
879 place it in the displaced_step_request_queue. Whenever a displaced
880 step finishes, we pick the next thread in the queue and start a new
881 displaced step operation on it. See displaced_step_prepare and
882 displaced_step_fixup for details. */
883
884 struct displaced_step_request
885 {
886 ptid_t ptid;
887 struct displaced_step_request *next;
888 };
889
890 /* Per-inferior displaced stepping state. */
891 struct displaced_step_inferior_state
892 {
893 /* Pointer to next in linked list. */
894 struct displaced_step_inferior_state *next;
895
896 /* The process this displaced step state refers to. */
897 int pid;
898
899 /* A queue of pending displaced stepping requests. One entry per
900 thread that needs to do a displaced step. */
901 struct displaced_step_request *step_request_queue;
902
903 /* If this is not null_ptid, this is the thread carrying out a
904 displaced single-step in process PID. This thread's state will
905 require fixing up once it has completed its step. */
906 ptid_t step_ptid;
907
908 /* The architecture the thread had when we stepped it. */
909 struct gdbarch *step_gdbarch;
910
911 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
912 for post-step cleanup. */
913 struct displaced_step_closure *step_closure;
914
915 /* The address of the original instruction, and the copy we
916 made. */
917 CORE_ADDR step_original, step_copy;
918
919 /* Saved contents of copy area. */
920 gdb_byte *step_saved_copy;
921 };
922
923 /* The list of states of processes involved in displaced stepping
924 presently. */
925 static struct displaced_step_inferior_state *displaced_step_inferior_states;
926
927 /* Get the displaced stepping state of process PID. */
928
929 static struct displaced_step_inferior_state *
930 get_displaced_stepping_state (int pid)
931 {
932 struct displaced_step_inferior_state *state;
933
934 for (state = displaced_step_inferior_states;
935 state != NULL;
936 state = state->next)
937 if (state->pid == pid)
938 return state;
939
940 return NULL;
941 }
942
943 /* Add a new displaced stepping state for process PID to the displaced
944 stepping state list, or return a pointer to an already existing
945 entry, if it already exists. Never returns NULL. */
946
947 static struct displaced_step_inferior_state *
948 add_displaced_stepping_state (int pid)
949 {
950 struct displaced_step_inferior_state *state;
951
952 for (state = displaced_step_inferior_states;
953 state != NULL;
954 state = state->next)
955 if (state->pid == pid)
956 return state;
957
958 state = xcalloc (1, sizeof (*state));
959 state->pid = pid;
960 state->next = displaced_step_inferior_states;
961 displaced_step_inferior_states = state;
962
963 return state;
964 }
965
966 /* Remove the displaced stepping state of process PID. */
967
968 static void
969 remove_displaced_stepping_state (int pid)
970 {
971 struct displaced_step_inferior_state *it, **prev_next_p;
972
973 gdb_assert (pid != 0);
974
975 it = displaced_step_inferior_states;
976 prev_next_p = &displaced_step_inferior_states;
977 while (it)
978 {
979 if (it->pid == pid)
980 {
981 *prev_next_p = it->next;
982 xfree (it);
983 return;
984 }
985
986 prev_next_p = &it->next;
987 it = *prev_next_p;
988 }
989 }
990
991 static void
992 infrun_inferior_exit (struct inferior *inf)
993 {
994 remove_displaced_stepping_state (inf->pid);
995 }
996
997 /* Enum strings for "set|show displaced-stepping". */
998
999 static const char can_use_displaced_stepping_auto[] = "auto";
1000 static const char can_use_displaced_stepping_on[] = "on";
1001 static const char can_use_displaced_stepping_off[] = "off";
1002 static const char *can_use_displaced_stepping_enum[] =
1003 {
1004 can_use_displaced_stepping_auto,
1005 can_use_displaced_stepping_on,
1006 can_use_displaced_stepping_off,
1007 NULL,
1008 };
1009
1010 /* If ON, and the architecture supports it, GDB will use displaced
1011 stepping to step over breakpoints. If OFF, or if the architecture
1012 doesn't support it, GDB will instead use the traditional
1013 hold-and-step approach. If AUTO (which is the default), GDB will
1014 decide which technique to use to step over breakpoints depending on
1015 which of all-stop or non-stop mode is active --- displaced stepping
1016 in non-stop mode; hold-and-step in all-stop mode. */
1017
1018 static const char *can_use_displaced_stepping =
1019 can_use_displaced_stepping_auto;
1020
1021 static void
1022 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1023 struct cmd_list_element *c,
1024 const char *value)
1025 {
1026 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1027 fprintf_filtered (file, _("\
1028 Debugger's willingness to use displaced stepping to step over \
1029 breakpoints is %s (currently %s).\n"),
1030 value, non_stop ? "on" : "off");
1031 else
1032 fprintf_filtered (file, _("\
1033 Debugger's willingness to use displaced stepping to step over \
1034 breakpoints is %s.\n"), value);
1035 }
1036
1037 /* Return non-zero if displaced stepping can/should be used to step
1038 over breakpoints. */
1039
1040 static int
1041 use_displaced_stepping (struct gdbarch *gdbarch)
1042 {
1043 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1044 && non_stop)
1045 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1046 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1047 && !RECORD_IS_USED);
1048 }
1049
1050 /* Clean out any stray displaced stepping state. */
1051 static void
1052 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1053 {
1054 /* Indicate that there is no cleanup pending. */
1055 displaced->step_ptid = null_ptid;
1056
1057 if (displaced->step_closure)
1058 {
1059 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1060 displaced->step_closure);
1061 displaced->step_closure = NULL;
1062 }
1063 }
1064
1065 static void
1066 displaced_step_clear_cleanup (void *arg)
1067 {
1068 struct displaced_step_inferior_state *state = arg;
1069
1070 displaced_step_clear (state);
1071 }
1072
1073 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1074 void
1075 displaced_step_dump_bytes (struct ui_file *file,
1076 const gdb_byte *buf,
1077 size_t len)
1078 {
1079 int i;
1080
1081 for (i = 0; i < len; i++)
1082 fprintf_unfiltered (file, "%02x ", buf[i]);
1083 fputs_unfiltered ("\n", file);
1084 }
1085
1086 /* Prepare to single-step, using displaced stepping.
1087
1088 Note that we cannot use displaced stepping when we have a signal to
1089 deliver. If we have a signal to deliver and an instruction to step
1090 over, then after the step, there will be no indication from the
1091 target whether the thread entered a signal handler or ignored the
1092 signal and stepped over the instruction successfully --- both cases
1093 result in a simple SIGTRAP. In the first case we mustn't do a
1094 fixup, and in the second case we must --- but we can't tell which.
1095 Comments in the code for 'random signals' in handle_inferior_event
1096 explain how we handle this case instead.
1097
1098 Returns 1 if preparing was successful -- this thread is going to be
1099 stepped now; or 0 if displaced stepping this thread got queued. */
1100 static int
1101 displaced_step_prepare (ptid_t ptid)
1102 {
1103 struct cleanup *old_cleanups, *ignore_cleanups;
1104 struct regcache *regcache = get_thread_regcache (ptid);
1105 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1106 CORE_ADDR original, copy;
1107 ULONGEST len;
1108 struct displaced_step_closure *closure;
1109 struct displaced_step_inferior_state *displaced;
1110
1111 /* We should never reach this function if the architecture does not
1112 support displaced stepping. */
1113 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1114
1115 /* We have to displaced step one thread at a time, as we only have
1116 access to a single scratch space per inferior. */
1117
1118 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1119
1120 if (!ptid_equal (displaced->step_ptid, null_ptid))
1121 {
1122 /* Already waiting for a displaced step to finish. Defer this
1123 request and place in queue. */
1124 struct displaced_step_request *req, *new_req;
1125
1126 if (debug_displaced)
1127 fprintf_unfiltered (gdb_stdlog,
1128 "displaced: defering step of %s\n",
1129 target_pid_to_str (ptid));
1130
1131 new_req = xmalloc (sizeof (*new_req));
1132 new_req->ptid = ptid;
1133 new_req->next = NULL;
1134
1135 if (displaced->step_request_queue)
1136 {
1137 for (req = displaced->step_request_queue;
1138 req && req->next;
1139 req = req->next)
1140 ;
1141 req->next = new_req;
1142 }
1143 else
1144 displaced->step_request_queue = new_req;
1145
1146 return 0;
1147 }
1148 else
1149 {
1150 if (debug_displaced)
1151 fprintf_unfiltered (gdb_stdlog,
1152 "displaced: stepping %s now\n",
1153 target_pid_to_str (ptid));
1154 }
1155
1156 displaced_step_clear (displaced);
1157
1158 old_cleanups = save_inferior_ptid ();
1159 inferior_ptid = ptid;
1160
1161 original = regcache_read_pc (regcache);
1162
1163 copy = gdbarch_displaced_step_location (gdbarch);
1164 len = gdbarch_max_insn_length (gdbarch);
1165
1166 /* Save the original contents of the copy area. */
1167 displaced->step_saved_copy = xmalloc (len);
1168 ignore_cleanups = make_cleanup (free_current_contents,
1169 &displaced->step_saved_copy);
1170 read_memory (copy, displaced->step_saved_copy, len);
1171 if (debug_displaced)
1172 {
1173 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1174 paddress (gdbarch, copy));
1175 displaced_step_dump_bytes (gdb_stdlog,
1176 displaced->step_saved_copy,
1177 len);
1178 };
1179
1180 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1181 original, copy, regcache);
1182
1183 /* We don't support the fully-simulated case at present. */
1184 gdb_assert (closure);
1185
1186 /* Save the information we need to fix things up if the step
1187 succeeds. */
1188 displaced->step_ptid = ptid;
1189 displaced->step_gdbarch = gdbarch;
1190 displaced->step_closure = closure;
1191 displaced->step_original = original;
1192 displaced->step_copy = copy;
1193
1194 make_cleanup (displaced_step_clear_cleanup, displaced);
1195
1196 /* Resume execution at the copy. */
1197 regcache_write_pc (regcache, copy);
1198
1199 discard_cleanups (ignore_cleanups);
1200
1201 do_cleanups (old_cleanups);
1202
1203 if (debug_displaced)
1204 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1205 paddress (gdbarch, copy));
1206
1207 return 1;
1208 }
1209
1210 static void
1211 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1212 {
1213 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1214
1215 inferior_ptid = ptid;
1216 write_memory (memaddr, myaddr, len);
1217 do_cleanups (ptid_cleanup);
1218 }
1219
1220 static void
1221 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1222 {
1223 struct cleanup *old_cleanups;
1224 struct displaced_step_inferior_state *displaced
1225 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1226
1227 /* Was any thread of this process doing a displaced step? */
1228 if (displaced == NULL)
1229 return;
1230
1231 /* Was this event for the pid we displaced? */
1232 if (ptid_equal (displaced->step_ptid, null_ptid)
1233 || ! ptid_equal (displaced->step_ptid, event_ptid))
1234 return;
1235
1236 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1237
1238 /* Restore the contents of the copy area. */
1239 {
1240 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1241
1242 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1243 displaced->step_saved_copy, len);
1244 if (debug_displaced)
1245 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1246 paddress (displaced->step_gdbarch,
1247 displaced->step_copy));
1248 }
1249
1250 /* Did the instruction complete successfully? */
1251 if (signal == TARGET_SIGNAL_TRAP)
1252 {
1253 /* Fix up the resulting state. */
1254 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1255 displaced->step_closure,
1256 displaced->step_original,
1257 displaced->step_copy,
1258 get_thread_regcache (displaced->step_ptid));
1259 }
1260 else
1261 {
1262 /* Since the instruction didn't complete, all we can do is
1263 relocate the PC. */
1264 struct regcache *regcache = get_thread_regcache (event_ptid);
1265 CORE_ADDR pc = regcache_read_pc (regcache);
1266
1267 pc = displaced->step_original + (pc - displaced->step_copy);
1268 regcache_write_pc (regcache, pc);
1269 }
1270
1271 do_cleanups (old_cleanups);
1272
1273 displaced->step_ptid = null_ptid;
1274
1275 /* Are there any pending displaced stepping requests? If so, run
1276 one now. Leave the state object around, since we're likely to
1277 need it again soon. */
1278 while (displaced->step_request_queue)
1279 {
1280 struct displaced_step_request *head;
1281 ptid_t ptid;
1282 struct regcache *regcache;
1283 struct gdbarch *gdbarch;
1284 CORE_ADDR actual_pc;
1285 struct address_space *aspace;
1286
1287 head = displaced->step_request_queue;
1288 ptid = head->ptid;
1289 displaced->step_request_queue = head->next;
1290 xfree (head);
1291
1292 context_switch (ptid);
1293
1294 regcache = get_thread_regcache (ptid);
1295 actual_pc = regcache_read_pc (regcache);
1296 aspace = get_regcache_aspace (regcache);
1297
1298 if (breakpoint_here_p (aspace, actual_pc))
1299 {
1300 if (debug_displaced)
1301 fprintf_unfiltered (gdb_stdlog,
1302 "displaced: stepping queued %s now\n",
1303 target_pid_to_str (ptid));
1304
1305 displaced_step_prepare (ptid);
1306
1307 gdbarch = get_regcache_arch (regcache);
1308
1309 if (debug_displaced)
1310 {
1311 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1312 gdb_byte buf[4];
1313
1314 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1315 paddress (gdbarch, actual_pc));
1316 read_memory (actual_pc, buf, sizeof (buf));
1317 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1318 }
1319
1320 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1321 displaced->step_closure))
1322 target_resume (ptid, 1, TARGET_SIGNAL_0);
1323 else
1324 target_resume (ptid, 0, TARGET_SIGNAL_0);
1325
1326 /* Done, we're stepping a thread. */
1327 break;
1328 }
1329 else
1330 {
1331 int step;
1332 struct thread_info *tp = inferior_thread ();
1333
1334 /* The breakpoint we were sitting under has since been
1335 removed. */
1336 tp->trap_expected = 0;
1337
1338 /* Go back to what we were trying to do. */
1339 step = currently_stepping (tp);
1340
1341 if (debug_displaced)
1342 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1343 target_pid_to_str (tp->ptid), step);
1344
1345 target_resume (ptid, step, TARGET_SIGNAL_0);
1346 tp->stop_signal = TARGET_SIGNAL_0;
1347
1348 /* This request was discarded. See if there's any other
1349 thread waiting for its turn. */
1350 }
1351 }
1352 }
1353
1354 /* Update global variables holding ptids to hold NEW_PTID if they were
1355 holding OLD_PTID. */
1356 static void
1357 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1358 {
1359 struct displaced_step_request *it;
1360 struct displaced_step_inferior_state *displaced;
1361
1362 if (ptid_equal (inferior_ptid, old_ptid))
1363 inferior_ptid = new_ptid;
1364
1365 if (ptid_equal (singlestep_ptid, old_ptid))
1366 singlestep_ptid = new_ptid;
1367
1368 if (ptid_equal (deferred_step_ptid, old_ptid))
1369 deferred_step_ptid = new_ptid;
1370
1371 for (displaced = displaced_step_inferior_states;
1372 displaced;
1373 displaced = displaced->next)
1374 {
1375 if (ptid_equal (displaced->step_ptid, old_ptid))
1376 displaced->step_ptid = new_ptid;
1377
1378 for (it = displaced->step_request_queue; it; it = it->next)
1379 if (ptid_equal (it->ptid, old_ptid))
1380 it->ptid = new_ptid;
1381 }
1382 }
1383
1384 \f
1385 /* Resuming. */
1386
1387 /* Things to clean up if we QUIT out of resume (). */
1388 static void
1389 resume_cleanups (void *ignore)
1390 {
1391 normal_stop ();
1392 }
1393
1394 static const char schedlock_off[] = "off";
1395 static const char schedlock_on[] = "on";
1396 static const char schedlock_step[] = "step";
1397 static const char *scheduler_enums[] = {
1398 schedlock_off,
1399 schedlock_on,
1400 schedlock_step,
1401 NULL
1402 };
1403 static const char *scheduler_mode = schedlock_off;
1404 static void
1405 show_scheduler_mode (struct ui_file *file, int from_tty,
1406 struct cmd_list_element *c, const char *value)
1407 {
1408 fprintf_filtered (file, _("\
1409 Mode for locking scheduler during execution is \"%s\".\n"),
1410 value);
1411 }
1412
1413 static void
1414 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1415 {
1416 if (!target_can_lock_scheduler)
1417 {
1418 scheduler_mode = schedlock_off;
1419 error (_("Target '%s' cannot support this command."), target_shortname);
1420 }
1421 }
1422
1423 /* True if execution commands resume all threads of all processes by
1424 default; otherwise, resume only threads of the current inferior
1425 process. */
1426 int sched_multi = 0;
1427
1428 /* Try to setup for software single stepping over the specified location.
1429 Return 1 if target_resume() should use hardware single step.
1430
1431 GDBARCH the current gdbarch.
1432 PC the location to step over. */
1433
1434 static int
1435 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1436 {
1437 int hw_step = 1;
1438
1439 if (gdbarch_software_single_step_p (gdbarch)
1440 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1441 {
1442 hw_step = 0;
1443 /* Do not pull these breakpoints until after a `wait' in
1444 `wait_for_inferior' */
1445 singlestep_breakpoints_inserted_p = 1;
1446 singlestep_ptid = inferior_ptid;
1447 singlestep_pc = pc;
1448 }
1449 return hw_step;
1450 }
1451
1452 /* Resume the inferior, but allow a QUIT. This is useful if the user
1453 wants to interrupt some lengthy single-stepping operation
1454 (for child processes, the SIGINT goes to the inferior, and so
1455 we get a SIGINT random_signal, but for remote debugging and perhaps
1456 other targets, that's not true).
1457
1458 STEP nonzero if we should step (zero to continue instead).
1459 SIG is the signal to give the inferior (zero for none). */
1460 void
1461 resume (int step, enum target_signal sig)
1462 {
1463 int should_resume = 1;
1464 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1465 struct regcache *regcache = get_current_regcache ();
1466 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1467 struct thread_info *tp = inferior_thread ();
1468 CORE_ADDR pc = regcache_read_pc (regcache);
1469 struct address_space *aspace = get_regcache_aspace (regcache);
1470
1471 QUIT;
1472
1473 if (debug_infrun)
1474 fprintf_unfiltered (gdb_stdlog,
1475 "infrun: resume (step=%d, signal=%d), "
1476 "trap_expected=%d\n",
1477 step, sig, tp->trap_expected);
1478
1479 /* Normally, by the time we reach `resume', the breakpoints are either
1480 removed or inserted, as appropriate. The exception is if we're sitting
1481 at a permanent breakpoint; we need to step over it, but permanent
1482 breakpoints can't be removed. So we have to test for it here. */
1483 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1484 {
1485 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1486 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1487 else
1488 error (_("\
1489 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1490 how to step past a permanent breakpoint on this architecture. Try using\n\
1491 a command like `return' or `jump' to continue execution."));
1492 }
1493
1494 /* If enabled, step over breakpoints by executing a copy of the
1495 instruction at a different address.
1496
1497 We can't use displaced stepping when we have a signal to deliver;
1498 the comments for displaced_step_prepare explain why. The
1499 comments in the handle_inferior event for dealing with 'random
1500 signals' explain what we do instead. */
1501 if (use_displaced_stepping (gdbarch)
1502 && (tp->trap_expected
1503 || (step && gdbarch_software_single_step_p (gdbarch)))
1504 && sig == TARGET_SIGNAL_0)
1505 {
1506 struct displaced_step_inferior_state *displaced;
1507
1508 if (!displaced_step_prepare (inferior_ptid))
1509 {
1510 /* Got placed in displaced stepping queue. Will be resumed
1511 later when all the currently queued displaced stepping
1512 requests finish. The thread is not executing at this point,
1513 and the call to set_executing will be made later. But we
1514 need to call set_running here, since from frontend point of view,
1515 the thread is running. */
1516 set_running (inferior_ptid, 1);
1517 discard_cleanups (old_cleanups);
1518 return;
1519 }
1520
1521 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1522 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1523 displaced->step_closure);
1524 }
1525
1526 /* Do we need to do it the hard way, w/temp breakpoints? */
1527 else if (step)
1528 step = maybe_software_singlestep (gdbarch, pc);
1529
1530 if (should_resume)
1531 {
1532 ptid_t resume_ptid;
1533
1534 /* If STEP is set, it's a request to use hardware stepping
1535 facilities. But in that case, we should never
1536 use singlestep breakpoint. */
1537 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1538
1539 /* Decide the set of threads to ask the target to resume. Start
1540 by assuming everything will be resumed, than narrow the set
1541 by applying increasingly restricting conditions. */
1542
1543 /* By default, resume all threads of all processes. */
1544 resume_ptid = RESUME_ALL;
1545
1546 /* Maybe resume only all threads of the current process. */
1547 if (!sched_multi && target_supports_multi_process ())
1548 {
1549 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1550 }
1551
1552 /* Maybe resume a single thread after all. */
1553 if (singlestep_breakpoints_inserted_p
1554 && stepping_past_singlestep_breakpoint)
1555 {
1556 /* The situation here is as follows. In thread T1 we wanted to
1557 single-step. Lacking hardware single-stepping we've
1558 set breakpoint at the PC of the next instruction -- call it
1559 P. After resuming, we've hit that breakpoint in thread T2.
1560 Now we've removed original breakpoint, inserted breakpoint
1561 at P+1, and try to step to advance T2 past breakpoint.
1562 We need to step only T2, as if T1 is allowed to freely run,
1563 it can run past P, and if other threads are allowed to run,
1564 they can hit breakpoint at P+1, and nested hits of single-step
1565 breakpoints is not something we'd want -- that's complicated
1566 to support, and has no value. */
1567 resume_ptid = inferior_ptid;
1568 }
1569 else if ((step || singlestep_breakpoints_inserted_p)
1570 && tp->trap_expected)
1571 {
1572 /* We're allowing a thread to run past a breakpoint it has
1573 hit, by single-stepping the thread with the breakpoint
1574 removed. In which case, we need to single-step only this
1575 thread, and keep others stopped, as they can miss this
1576 breakpoint if allowed to run.
1577
1578 The current code actually removes all breakpoints when
1579 doing this, not just the one being stepped over, so if we
1580 let other threads run, we can actually miss any
1581 breakpoint, not just the one at PC. */
1582 resume_ptid = inferior_ptid;
1583 }
1584 else if (non_stop)
1585 {
1586 /* With non-stop mode on, threads are always handled
1587 individually. */
1588 resume_ptid = inferior_ptid;
1589 }
1590 else if ((scheduler_mode == schedlock_on)
1591 || (scheduler_mode == schedlock_step
1592 && (step || singlestep_breakpoints_inserted_p)))
1593 {
1594 /* User-settable 'scheduler' mode requires solo thread resume. */
1595 resume_ptid = inferior_ptid;
1596 }
1597
1598 if (gdbarch_cannot_step_breakpoint (gdbarch))
1599 {
1600 /* Most targets can step a breakpoint instruction, thus
1601 executing it normally. But if this one cannot, just
1602 continue and we will hit it anyway. */
1603 if (step && breakpoint_inserted_here_p (aspace, pc))
1604 step = 0;
1605 }
1606
1607 if (debug_displaced
1608 && use_displaced_stepping (gdbarch)
1609 && tp->trap_expected)
1610 {
1611 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1612 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1613 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1614 gdb_byte buf[4];
1615
1616 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1617 paddress (resume_gdbarch, actual_pc));
1618 read_memory (actual_pc, buf, sizeof (buf));
1619 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1620 }
1621
1622 /* Install inferior's terminal modes. */
1623 target_terminal_inferior ();
1624
1625 /* Avoid confusing the next resume, if the next stop/resume
1626 happens to apply to another thread. */
1627 tp->stop_signal = TARGET_SIGNAL_0;
1628
1629 target_resume (resume_ptid, step, sig);
1630 }
1631
1632 discard_cleanups (old_cleanups);
1633 }
1634 \f
1635 /* Proceeding. */
1636
1637 /* Clear out all variables saying what to do when inferior is continued.
1638 First do this, then set the ones you want, then call `proceed'. */
1639
1640 static void
1641 clear_proceed_status_thread (struct thread_info *tp)
1642 {
1643 if (debug_infrun)
1644 fprintf_unfiltered (gdb_stdlog,
1645 "infrun: clear_proceed_status_thread (%s)\n",
1646 target_pid_to_str (tp->ptid));
1647
1648 tp->trap_expected = 0;
1649 tp->step_range_start = 0;
1650 tp->step_range_end = 0;
1651 tp->step_frame_id = null_frame_id;
1652 tp->step_stack_frame_id = null_frame_id;
1653 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1654 tp->stop_requested = 0;
1655
1656 tp->stop_step = 0;
1657
1658 tp->proceed_to_finish = 0;
1659
1660 /* Discard any remaining commands or status from previous stop. */
1661 bpstat_clear (&tp->stop_bpstat);
1662 }
1663
1664 static int
1665 clear_proceed_status_callback (struct thread_info *tp, void *data)
1666 {
1667 if (is_exited (tp->ptid))
1668 return 0;
1669
1670 clear_proceed_status_thread (tp);
1671 return 0;
1672 }
1673
1674 void
1675 clear_proceed_status (void)
1676 {
1677 if (!non_stop)
1678 {
1679 /* In all-stop mode, delete the per-thread status of all
1680 threads, even if inferior_ptid is null_ptid, there may be
1681 threads on the list. E.g., we may be launching a new
1682 process, while selecting the executable. */
1683 iterate_over_threads (clear_proceed_status_callback, NULL);
1684 }
1685
1686 if (!ptid_equal (inferior_ptid, null_ptid))
1687 {
1688 struct inferior *inferior;
1689
1690 if (non_stop)
1691 {
1692 /* If in non-stop mode, only delete the per-thread status of
1693 the current thread. */
1694 clear_proceed_status_thread (inferior_thread ());
1695 }
1696
1697 inferior = current_inferior ();
1698 inferior->stop_soon = NO_STOP_QUIETLY;
1699 }
1700
1701 stop_after_trap = 0;
1702
1703 observer_notify_about_to_proceed ();
1704
1705 if (stop_registers)
1706 {
1707 regcache_xfree (stop_registers);
1708 stop_registers = NULL;
1709 }
1710 }
1711
1712 /* Check the current thread against the thread that reported the most recent
1713 event. If a step-over is required return TRUE and set the current thread
1714 to the old thread. Otherwise return FALSE.
1715
1716 This should be suitable for any targets that support threads. */
1717
1718 static int
1719 prepare_to_proceed (int step)
1720 {
1721 ptid_t wait_ptid;
1722 struct target_waitstatus wait_status;
1723 int schedlock_enabled;
1724
1725 /* With non-stop mode on, threads are always handled individually. */
1726 gdb_assert (! non_stop);
1727
1728 /* Get the last target status returned by target_wait(). */
1729 get_last_target_status (&wait_ptid, &wait_status);
1730
1731 /* Make sure we were stopped at a breakpoint. */
1732 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1733 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1734 && wait_status.value.sig != TARGET_SIGNAL_ILL
1735 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1736 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1737 {
1738 return 0;
1739 }
1740
1741 schedlock_enabled = (scheduler_mode == schedlock_on
1742 || (scheduler_mode == schedlock_step
1743 && step));
1744
1745 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1746 if (schedlock_enabled)
1747 return 0;
1748
1749 /* Don't switch over if we're about to resume some other process
1750 other than WAIT_PTID's, and schedule-multiple is off. */
1751 if (!sched_multi
1752 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1753 return 0;
1754
1755 /* Switched over from WAIT_PID. */
1756 if (!ptid_equal (wait_ptid, minus_one_ptid)
1757 && !ptid_equal (inferior_ptid, wait_ptid))
1758 {
1759 struct regcache *regcache = get_thread_regcache (wait_ptid);
1760
1761 if (breakpoint_here_p (get_regcache_aspace (regcache),
1762 regcache_read_pc (regcache)))
1763 {
1764 /* If stepping, remember current thread to switch back to. */
1765 if (step)
1766 deferred_step_ptid = inferior_ptid;
1767
1768 /* Switch back to WAIT_PID thread. */
1769 switch_to_thread (wait_ptid);
1770
1771 /* We return 1 to indicate that there is a breakpoint here,
1772 so we need to step over it before continuing to avoid
1773 hitting it straight away. */
1774 return 1;
1775 }
1776 }
1777
1778 return 0;
1779 }
1780
1781 /* Basic routine for continuing the program in various fashions.
1782
1783 ADDR is the address to resume at, or -1 for resume where stopped.
1784 SIGGNAL is the signal to give it, or 0 for none,
1785 or -1 for act according to how it stopped.
1786 STEP is nonzero if should trap after one instruction.
1787 -1 means return after that and print nothing.
1788 You should probably set various step_... variables
1789 before calling here, if you are stepping.
1790
1791 You should call clear_proceed_status before calling proceed. */
1792
1793 void
1794 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1795 {
1796 struct regcache *regcache;
1797 struct gdbarch *gdbarch;
1798 struct thread_info *tp;
1799 CORE_ADDR pc;
1800 struct address_space *aspace;
1801 int oneproc = 0;
1802
1803 /* If we're stopped at a fork/vfork, follow the branch set by the
1804 "set follow-fork-mode" command; otherwise, we'll just proceed
1805 resuming the current thread. */
1806 if (!follow_fork ())
1807 {
1808 /* The target for some reason decided not to resume. */
1809 normal_stop ();
1810 return;
1811 }
1812
1813 regcache = get_current_regcache ();
1814 gdbarch = get_regcache_arch (regcache);
1815 aspace = get_regcache_aspace (regcache);
1816 pc = regcache_read_pc (regcache);
1817
1818 if (step > 0)
1819 step_start_function = find_pc_function (pc);
1820 if (step < 0)
1821 stop_after_trap = 1;
1822
1823 if (addr == (CORE_ADDR) -1)
1824 {
1825 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1826 && execution_direction != EXEC_REVERSE)
1827 /* There is a breakpoint at the address we will resume at,
1828 step one instruction before inserting breakpoints so that
1829 we do not stop right away (and report a second hit at this
1830 breakpoint).
1831
1832 Note, we don't do this in reverse, because we won't
1833 actually be executing the breakpoint insn anyway.
1834 We'll be (un-)executing the previous instruction. */
1835
1836 oneproc = 1;
1837 else if (gdbarch_single_step_through_delay_p (gdbarch)
1838 && gdbarch_single_step_through_delay (gdbarch,
1839 get_current_frame ()))
1840 /* We stepped onto an instruction that needs to be stepped
1841 again before re-inserting the breakpoint, do so. */
1842 oneproc = 1;
1843 }
1844 else
1845 {
1846 regcache_write_pc (regcache, addr);
1847 }
1848
1849 if (debug_infrun)
1850 fprintf_unfiltered (gdb_stdlog,
1851 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1852 paddress (gdbarch, addr), siggnal, step);
1853
1854 /* We're handling a live event, so make sure we're doing live
1855 debugging. If we're looking at traceframes while the target is
1856 running, we're going to need to get back to that mode after
1857 handling the event. */
1858 if (non_stop)
1859 {
1860 make_cleanup_restore_current_traceframe ();
1861 set_traceframe_number (-1);
1862 }
1863
1864 if (non_stop)
1865 /* In non-stop, each thread is handled individually. The context
1866 must already be set to the right thread here. */
1867 ;
1868 else
1869 {
1870 /* In a multi-threaded task we may select another thread and
1871 then continue or step.
1872
1873 But if the old thread was stopped at a breakpoint, it will
1874 immediately cause another breakpoint stop without any
1875 execution (i.e. it will report a breakpoint hit incorrectly).
1876 So we must step over it first.
1877
1878 prepare_to_proceed checks the current thread against the
1879 thread that reported the most recent event. If a step-over
1880 is required it returns TRUE and sets the current thread to
1881 the old thread. */
1882 if (prepare_to_proceed (step))
1883 oneproc = 1;
1884 }
1885
1886 /* prepare_to_proceed may change the current thread. */
1887 tp = inferior_thread ();
1888
1889 if (oneproc)
1890 {
1891 tp->trap_expected = 1;
1892 /* If displaced stepping is enabled, we can step over the
1893 breakpoint without hitting it, so leave all breakpoints
1894 inserted. Otherwise we need to disable all breakpoints, step
1895 one instruction, and then re-add them when that step is
1896 finished. */
1897 if (!use_displaced_stepping (gdbarch))
1898 remove_breakpoints ();
1899 }
1900
1901 /* We can insert breakpoints if we're not trying to step over one,
1902 or if we are stepping over one but we're using displaced stepping
1903 to do so. */
1904 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1905 insert_breakpoints ();
1906
1907 if (!non_stop)
1908 {
1909 /* Pass the last stop signal to the thread we're resuming,
1910 irrespective of whether the current thread is the thread that
1911 got the last event or not. This was historically GDB's
1912 behaviour before keeping a stop_signal per thread. */
1913
1914 struct thread_info *last_thread;
1915 ptid_t last_ptid;
1916 struct target_waitstatus last_status;
1917
1918 get_last_target_status (&last_ptid, &last_status);
1919 if (!ptid_equal (inferior_ptid, last_ptid)
1920 && !ptid_equal (last_ptid, null_ptid)
1921 && !ptid_equal (last_ptid, minus_one_ptid))
1922 {
1923 last_thread = find_thread_ptid (last_ptid);
1924 if (last_thread)
1925 {
1926 tp->stop_signal = last_thread->stop_signal;
1927 last_thread->stop_signal = TARGET_SIGNAL_0;
1928 }
1929 }
1930 }
1931
1932 if (siggnal != TARGET_SIGNAL_DEFAULT)
1933 tp->stop_signal = siggnal;
1934 /* If this signal should not be seen by program,
1935 give it zero. Used for debugging signals. */
1936 else if (!signal_program[tp->stop_signal])
1937 tp->stop_signal = TARGET_SIGNAL_0;
1938
1939 annotate_starting ();
1940
1941 /* Make sure that output from GDB appears before output from the
1942 inferior. */
1943 gdb_flush (gdb_stdout);
1944
1945 /* Refresh prev_pc value just prior to resuming. This used to be
1946 done in stop_stepping, however, setting prev_pc there did not handle
1947 scenarios such as inferior function calls or returning from
1948 a function via the return command. In those cases, the prev_pc
1949 value was not set properly for subsequent commands. The prev_pc value
1950 is used to initialize the starting line number in the ecs. With an
1951 invalid value, the gdb next command ends up stopping at the position
1952 represented by the next line table entry past our start position.
1953 On platforms that generate one line table entry per line, this
1954 is not a problem. However, on the ia64, the compiler generates
1955 extraneous line table entries that do not increase the line number.
1956 When we issue the gdb next command on the ia64 after an inferior call
1957 or a return command, we often end up a few instructions forward, still
1958 within the original line we started.
1959
1960 An attempt was made to refresh the prev_pc at the same time the
1961 execution_control_state is initialized (for instance, just before
1962 waiting for an inferior event). But this approach did not work
1963 because of platforms that use ptrace, where the pc register cannot
1964 be read unless the inferior is stopped. At that point, we are not
1965 guaranteed the inferior is stopped and so the regcache_read_pc() call
1966 can fail. Setting the prev_pc value here ensures the value is updated
1967 correctly when the inferior is stopped. */
1968 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1969
1970 /* Fill in with reasonable starting values. */
1971 init_thread_stepping_state (tp);
1972
1973 /* Reset to normal state. */
1974 init_infwait_state ();
1975
1976 /* Resume inferior. */
1977 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1978
1979 /* Wait for it to stop (if not standalone)
1980 and in any case decode why it stopped, and act accordingly. */
1981 /* Do this only if we are not using the event loop, or if the target
1982 does not support asynchronous execution. */
1983 if (!target_can_async_p ())
1984 {
1985 wait_for_inferior (0);
1986 normal_stop ();
1987 }
1988 }
1989 \f
1990
1991 /* Start remote-debugging of a machine over a serial link. */
1992
1993 void
1994 start_remote (int from_tty)
1995 {
1996 struct inferior *inferior;
1997
1998 init_wait_for_inferior ();
1999 inferior = current_inferior ();
2000 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2001
2002 /* Always go on waiting for the target, regardless of the mode. */
2003 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2004 indicate to wait_for_inferior that a target should timeout if
2005 nothing is returned (instead of just blocking). Because of this,
2006 targets expecting an immediate response need to, internally, set
2007 things up so that the target_wait() is forced to eventually
2008 timeout. */
2009 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2010 differentiate to its caller what the state of the target is after
2011 the initial open has been performed. Here we're assuming that
2012 the target has stopped. It should be possible to eventually have
2013 target_open() return to the caller an indication that the target
2014 is currently running and GDB state should be set to the same as
2015 for an async run. */
2016 wait_for_inferior (0);
2017
2018 /* Now that the inferior has stopped, do any bookkeeping like
2019 loading shared libraries. We want to do this before normal_stop,
2020 so that the displayed frame is up to date. */
2021 post_create_inferior (&current_target, from_tty);
2022
2023 normal_stop ();
2024 }
2025
2026 /* Initialize static vars when a new inferior begins. */
2027
2028 void
2029 init_wait_for_inferior (void)
2030 {
2031 /* These are meaningless until the first time through wait_for_inferior. */
2032
2033 breakpoint_init_inferior (inf_starting);
2034
2035 clear_proceed_status ();
2036
2037 stepping_past_singlestep_breakpoint = 0;
2038 deferred_step_ptid = null_ptid;
2039
2040 target_last_wait_ptid = minus_one_ptid;
2041
2042 previous_inferior_ptid = null_ptid;
2043 init_infwait_state ();
2044
2045 /* Discard any skipped inlined frames. */
2046 clear_inline_frame_state (minus_one_ptid);
2047 }
2048
2049 \f
2050 /* This enum encodes possible reasons for doing a target_wait, so that
2051 wfi can call target_wait in one place. (Ultimately the call will be
2052 moved out of the infinite loop entirely.) */
2053
2054 enum infwait_states
2055 {
2056 infwait_normal_state,
2057 infwait_thread_hop_state,
2058 infwait_step_watch_state,
2059 infwait_nonstep_watch_state
2060 };
2061
2062 /* Why did the inferior stop? Used to print the appropriate messages
2063 to the interface from within handle_inferior_event(). */
2064 enum inferior_stop_reason
2065 {
2066 /* Step, next, nexti, stepi finished. */
2067 END_STEPPING_RANGE,
2068 /* Inferior terminated by signal. */
2069 SIGNAL_EXITED,
2070 /* Inferior exited. */
2071 EXITED,
2072 /* Inferior received signal, and user asked to be notified. */
2073 SIGNAL_RECEIVED,
2074 /* Reverse execution -- target ran out of history info. */
2075 NO_HISTORY
2076 };
2077
2078 /* The PTID we'll do a target_wait on.*/
2079 ptid_t waiton_ptid;
2080
2081 /* Current inferior wait state. */
2082 enum infwait_states infwait_state;
2083
2084 /* Data to be passed around while handling an event. This data is
2085 discarded between events. */
2086 struct execution_control_state
2087 {
2088 ptid_t ptid;
2089 /* The thread that got the event, if this was a thread event; NULL
2090 otherwise. */
2091 struct thread_info *event_thread;
2092
2093 struct target_waitstatus ws;
2094 int random_signal;
2095 CORE_ADDR stop_func_start;
2096 CORE_ADDR stop_func_end;
2097 char *stop_func_name;
2098 int new_thread_event;
2099 int wait_some_more;
2100 };
2101
2102 static void handle_inferior_event (struct execution_control_state *ecs);
2103
2104 static void handle_step_into_function (struct gdbarch *gdbarch,
2105 struct execution_control_state *ecs);
2106 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2107 struct execution_control_state *ecs);
2108 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2109 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2110 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2111 struct symtab_and_line sr_sal,
2112 struct frame_id sr_id);
2113 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2114
2115 static void stop_stepping (struct execution_control_state *ecs);
2116 static void prepare_to_wait (struct execution_control_state *ecs);
2117 static void keep_going (struct execution_control_state *ecs);
2118 static void print_stop_reason (enum inferior_stop_reason stop_reason,
2119 int stop_info);
2120
2121 /* Callback for iterate over threads. If the thread is stopped, but
2122 the user/frontend doesn't know about that yet, go through
2123 normal_stop, as if the thread had just stopped now. ARG points at
2124 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2125 ptid_is_pid(PTID) is true, applies to all threads of the process
2126 pointed at by PTID. Otherwise, apply only to the thread pointed by
2127 PTID. */
2128
2129 static int
2130 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2131 {
2132 ptid_t ptid = * (ptid_t *) arg;
2133
2134 if ((ptid_equal (info->ptid, ptid)
2135 || ptid_equal (minus_one_ptid, ptid)
2136 || (ptid_is_pid (ptid)
2137 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2138 && is_running (info->ptid)
2139 && !is_executing (info->ptid))
2140 {
2141 struct cleanup *old_chain;
2142 struct execution_control_state ecss;
2143 struct execution_control_state *ecs = &ecss;
2144
2145 memset (ecs, 0, sizeof (*ecs));
2146
2147 old_chain = make_cleanup_restore_current_thread ();
2148
2149 switch_to_thread (info->ptid);
2150
2151 /* Go through handle_inferior_event/normal_stop, so we always
2152 have consistent output as if the stop event had been
2153 reported. */
2154 ecs->ptid = info->ptid;
2155 ecs->event_thread = find_thread_ptid (info->ptid);
2156 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2157 ecs->ws.value.sig = TARGET_SIGNAL_0;
2158
2159 handle_inferior_event (ecs);
2160
2161 if (!ecs->wait_some_more)
2162 {
2163 struct thread_info *tp;
2164
2165 normal_stop ();
2166
2167 /* Finish off the continuations. The continations
2168 themselves are responsible for realising the thread
2169 didn't finish what it was supposed to do. */
2170 tp = inferior_thread ();
2171 do_all_intermediate_continuations_thread (tp);
2172 do_all_continuations_thread (tp);
2173 }
2174
2175 do_cleanups (old_chain);
2176 }
2177
2178 return 0;
2179 }
2180
2181 /* This function is attached as a "thread_stop_requested" observer.
2182 Cleanup local state that assumed the PTID was to be resumed, and
2183 report the stop to the frontend. */
2184
2185 static void
2186 infrun_thread_stop_requested (ptid_t ptid)
2187 {
2188 struct displaced_step_inferior_state *displaced;
2189
2190 /* PTID was requested to stop. Remove it from the displaced
2191 stepping queue, so we don't try to resume it automatically. */
2192
2193 for (displaced = displaced_step_inferior_states;
2194 displaced;
2195 displaced = displaced->next)
2196 {
2197 struct displaced_step_request *it, **prev_next_p;
2198
2199 it = displaced->step_request_queue;
2200 prev_next_p = &displaced->step_request_queue;
2201 while (it)
2202 {
2203 if (ptid_match (it->ptid, ptid))
2204 {
2205 *prev_next_p = it->next;
2206 it->next = NULL;
2207 xfree (it);
2208 }
2209 else
2210 {
2211 prev_next_p = &it->next;
2212 }
2213
2214 it = *prev_next_p;
2215 }
2216 }
2217
2218 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2219 }
2220
2221 static void
2222 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2223 {
2224 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2225 nullify_last_target_wait_ptid ();
2226 }
2227
2228 /* Callback for iterate_over_threads. */
2229
2230 static int
2231 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2232 {
2233 if (is_exited (info->ptid))
2234 return 0;
2235
2236 delete_step_resume_breakpoint (info);
2237 return 0;
2238 }
2239
2240 /* In all-stop, delete the step resume breakpoint of any thread that
2241 had one. In non-stop, delete the step resume breakpoint of the
2242 thread that just stopped. */
2243
2244 static void
2245 delete_step_thread_step_resume_breakpoint (void)
2246 {
2247 if (!target_has_execution
2248 || ptid_equal (inferior_ptid, null_ptid))
2249 /* If the inferior has exited, we have already deleted the step
2250 resume breakpoints out of GDB's lists. */
2251 return;
2252
2253 if (non_stop)
2254 {
2255 /* If in non-stop mode, only delete the step-resume or
2256 longjmp-resume breakpoint of the thread that just stopped
2257 stepping. */
2258 struct thread_info *tp = inferior_thread ();
2259
2260 delete_step_resume_breakpoint (tp);
2261 }
2262 else
2263 /* In all-stop mode, delete all step-resume and longjmp-resume
2264 breakpoints of any thread that had them. */
2265 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2266 }
2267
2268 /* A cleanup wrapper. */
2269
2270 static void
2271 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2272 {
2273 delete_step_thread_step_resume_breakpoint ();
2274 }
2275
2276 /* Pretty print the results of target_wait, for debugging purposes. */
2277
2278 static void
2279 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2280 const struct target_waitstatus *ws)
2281 {
2282 char *status_string = target_waitstatus_to_string (ws);
2283 struct ui_file *tmp_stream = mem_fileopen ();
2284 char *text;
2285
2286 /* The text is split over several lines because it was getting too long.
2287 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2288 output as a unit; we want only one timestamp printed if debug_timestamp
2289 is set. */
2290
2291 fprintf_unfiltered (tmp_stream,
2292 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2293 if (PIDGET (waiton_ptid) != -1)
2294 fprintf_unfiltered (tmp_stream,
2295 " [%s]", target_pid_to_str (waiton_ptid));
2296 fprintf_unfiltered (tmp_stream, ", status) =\n");
2297 fprintf_unfiltered (tmp_stream,
2298 "infrun: %d [%s],\n",
2299 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2300 fprintf_unfiltered (tmp_stream,
2301 "infrun: %s\n",
2302 status_string);
2303
2304 text = ui_file_xstrdup (tmp_stream, NULL);
2305
2306 /* This uses %s in part to handle %'s in the text, but also to avoid
2307 a gcc error: the format attribute requires a string literal. */
2308 fprintf_unfiltered (gdb_stdlog, "%s", text);
2309
2310 xfree (status_string);
2311 xfree (text);
2312 ui_file_delete (tmp_stream);
2313 }
2314
2315 /* Prepare and stabilize the inferior for detaching it. E.g.,
2316 detaching while a thread is displaced stepping is a recipe for
2317 crashing it, as nothing would readjust the PC out of the scratch
2318 pad. */
2319
2320 void
2321 prepare_for_detach (void)
2322 {
2323 struct inferior *inf = current_inferior ();
2324 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2325 struct cleanup *old_chain_1;
2326 struct displaced_step_inferior_state *displaced;
2327
2328 displaced = get_displaced_stepping_state (inf->pid);
2329
2330 /* Is any thread of this process displaced stepping? If not,
2331 there's nothing else to do. */
2332 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2333 return;
2334
2335 if (debug_infrun)
2336 fprintf_unfiltered (gdb_stdlog,
2337 "displaced-stepping in-process while detaching");
2338
2339 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2340 inf->detaching = 1;
2341
2342 while (!ptid_equal (displaced->step_ptid, null_ptid))
2343 {
2344 struct cleanup *old_chain_2;
2345 struct execution_control_state ecss;
2346 struct execution_control_state *ecs;
2347
2348 ecs = &ecss;
2349 memset (ecs, 0, sizeof (*ecs));
2350
2351 overlay_cache_invalid = 1;
2352
2353 /* We have to invalidate the registers BEFORE calling
2354 target_wait because they can be loaded from the target while
2355 in target_wait. This makes remote debugging a bit more
2356 efficient for those targets that provide critical registers
2357 as part of their normal status mechanism. */
2358
2359 registers_changed ();
2360
2361 if (deprecated_target_wait_hook)
2362 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2363 else
2364 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2365
2366 if (debug_infrun)
2367 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2368
2369 /* If an error happens while handling the event, propagate GDB's
2370 knowledge of the executing state to the frontend/user running
2371 state. */
2372 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2373
2374 /* In non-stop mode, each thread is handled individually.
2375 Switch early, so the global state is set correctly for this
2376 thread. */
2377 if (non_stop
2378 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2379 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2380 context_switch (ecs->ptid);
2381
2382 /* Now figure out what to do with the result of the result. */
2383 handle_inferior_event (ecs);
2384
2385 /* No error, don't finish the state yet. */
2386 discard_cleanups (old_chain_2);
2387
2388 /* Breakpoints and watchpoints are not installed on the target
2389 at this point, and signals are passed directly to the
2390 inferior, so this must mean the process is gone. */
2391 if (!ecs->wait_some_more)
2392 {
2393 discard_cleanups (old_chain_1);
2394 error (_("Program exited while detaching"));
2395 }
2396 }
2397
2398 discard_cleanups (old_chain_1);
2399 }
2400
2401 /* Wait for control to return from inferior to debugger.
2402
2403 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2404 as if they were SIGTRAP signals. This can be useful during
2405 the startup sequence on some targets such as HP/UX, where
2406 we receive an EXEC event instead of the expected SIGTRAP.
2407
2408 If inferior gets a signal, we may decide to start it up again
2409 instead of returning. That is why there is a loop in this function.
2410 When this function actually returns it means the inferior
2411 should be left stopped and GDB should read more commands. */
2412
2413 void
2414 wait_for_inferior (int treat_exec_as_sigtrap)
2415 {
2416 struct cleanup *old_cleanups;
2417 struct execution_control_state ecss;
2418 struct execution_control_state *ecs;
2419
2420 if (debug_infrun)
2421 fprintf_unfiltered
2422 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2423 treat_exec_as_sigtrap);
2424
2425 old_cleanups =
2426 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2427
2428 ecs = &ecss;
2429 memset (ecs, 0, sizeof (*ecs));
2430
2431 /* We'll update this if & when we switch to a new thread. */
2432 previous_inferior_ptid = inferior_ptid;
2433
2434 while (1)
2435 {
2436 struct cleanup *old_chain;
2437
2438 /* We have to invalidate the registers BEFORE calling target_wait
2439 because they can be loaded from the target while in target_wait.
2440 This makes remote debugging a bit more efficient for those
2441 targets that provide critical registers as part of their normal
2442 status mechanism. */
2443
2444 overlay_cache_invalid = 1;
2445 registers_changed ();
2446
2447 if (deprecated_target_wait_hook)
2448 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2449 else
2450 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2451
2452 if (debug_infrun)
2453 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2454
2455 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2456 {
2457 xfree (ecs->ws.value.execd_pathname);
2458 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2459 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2460 }
2461
2462 /* If an error happens while handling the event, propagate GDB's
2463 knowledge of the executing state to the frontend/user running
2464 state. */
2465 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2466
2467 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2468 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2469 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2470
2471 /* Now figure out what to do with the result of the result. */
2472 handle_inferior_event (ecs);
2473
2474 /* No error, don't finish the state yet. */
2475 discard_cleanups (old_chain);
2476
2477 if (!ecs->wait_some_more)
2478 break;
2479 }
2480
2481 do_cleanups (old_cleanups);
2482 }
2483
2484 /* Asynchronous version of wait_for_inferior. It is called by the
2485 event loop whenever a change of state is detected on the file
2486 descriptor corresponding to the target. It can be called more than
2487 once to complete a single execution command. In such cases we need
2488 to keep the state in a global variable ECSS. If it is the last time
2489 that this function is called for a single execution command, then
2490 report to the user that the inferior has stopped, and do the
2491 necessary cleanups. */
2492
2493 void
2494 fetch_inferior_event (void *client_data)
2495 {
2496 struct execution_control_state ecss;
2497 struct execution_control_state *ecs = &ecss;
2498 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2499 struct cleanup *ts_old_chain;
2500 int was_sync = sync_execution;
2501
2502 memset (ecs, 0, sizeof (*ecs));
2503
2504 /* We'll update this if & when we switch to a new thread. */
2505 previous_inferior_ptid = inferior_ptid;
2506
2507 if (non_stop)
2508 /* In non-stop mode, the user/frontend should not notice a thread
2509 switch due to internal events. Make sure we reverse to the
2510 user selected thread and frame after handling the event and
2511 running any breakpoint commands. */
2512 make_cleanup_restore_current_thread ();
2513
2514 /* We have to invalidate the registers BEFORE calling target_wait
2515 because they can be loaded from the target while in target_wait.
2516 This makes remote debugging a bit more efficient for those
2517 targets that provide critical registers as part of their normal
2518 status mechanism. */
2519
2520 overlay_cache_invalid = 1;
2521 registers_changed ();
2522
2523 if (deprecated_target_wait_hook)
2524 ecs->ptid =
2525 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2526 else
2527 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2528
2529 if (debug_infrun)
2530 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2531
2532 if (non_stop
2533 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2534 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2535 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2536 /* In non-stop mode, each thread is handled individually. Switch
2537 early, so the global state is set correctly for this
2538 thread. */
2539 context_switch (ecs->ptid);
2540
2541 /* If an error happens while handling the event, propagate GDB's
2542 knowledge of the executing state to the frontend/user running
2543 state. */
2544 if (!non_stop)
2545 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2546 else
2547 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2548
2549 /* Now figure out what to do with the result of the result. */
2550 handle_inferior_event (ecs);
2551
2552 if (!ecs->wait_some_more)
2553 {
2554 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2555
2556 delete_step_thread_step_resume_breakpoint ();
2557
2558 /* We may not find an inferior if this was a process exit. */
2559 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2560 normal_stop ();
2561
2562 if (target_has_execution
2563 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2564 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2565 && ecs->event_thread->step_multi
2566 && ecs->event_thread->stop_step)
2567 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2568 else
2569 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2570 }
2571
2572 /* No error, don't finish the thread states yet. */
2573 discard_cleanups (ts_old_chain);
2574
2575 /* Revert thread and frame. */
2576 do_cleanups (old_chain);
2577
2578 /* If the inferior was in sync execution mode, and now isn't,
2579 restore the prompt. */
2580 if (was_sync && !sync_execution)
2581 display_gdb_prompt (0);
2582 }
2583
2584 /* Record the frame and location we're currently stepping through. */
2585 void
2586 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2587 {
2588 struct thread_info *tp = inferior_thread ();
2589
2590 tp->step_frame_id = get_frame_id (frame);
2591 tp->step_stack_frame_id = get_stack_frame_id (frame);
2592
2593 tp->current_symtab = sal.symtab;
2594 tp->current_line = sal.line;
2595 }
2596
2597 /* Clear context switchable stepping state. */
2598
2599 void
2600 init_thread_stepping_state (struct thread_info *tss)
2601 {
2602 tss->stepping_over_breakpoint = 0;
2603 tss->step_after_step_resume_breakpoint = 0;
2604 tss->stepping_through_solib_after_catch = 0;
2605 tss->stepping_through_solib_catchpoints = NULL;
2606 }
2607
2608 /* Return the cached copy of the last pid/waitstatus returned by
2609 target_wait()/deprecated_target_wait_hook(). The data is actually
2610 cached by handle_inferior_event(), which gets called immediately
2611 after target_wait()/deprecated_target_wait_hook(). */
2612
2613 void
2614 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2615 {
2616 *ptidp = target_last_wait_ptid;
2617 *status = target_last_waitstatus;
2618 }
2619
2620 void
2621 nullify_last_target_wait_ptid (void)
2622 {
2623 target_last_wait_ptid = minus_one_ptid;
2624 }
2625
2626 /* Switch thread contexts. */
2627
2628 static void
2629 context_switch (ptid_t ptid)
2630 {
2631 if (debug_infrun)
2632 {
2633 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2634 target_pid_to_str (inferior_ptid));
2635 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2636 target_pid_to_str (ptid));
2637 }
2638
2639 switch_to_thread (ptid);
2640 }
2641
2642 static void
2643 adjust_pc_after_break (struct execution_control_state *ecs)
2644 {
2645 struct regcache *regcache;
2646 struct gdbarch *gdbarch;
2647 struct address_space *aspace;
2648 CORE_ADDR breakpoint_pc;
2649
2650 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2651 we aren't, just return.
2652
2653 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2654 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2655 implemented by software breakpoints should be handled through the normal
2656 breakpoint layer.
2657
2658 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2659 different signals (SIGILL or SIGEMT for instance), but it is less
2660 clear where the PC is pointing afterwards. It may not match
2661 gdbarch_decr_pc_after_break. I don't know any specific target that
2662 generates these signals at breakpoints (the code has been in GDB since at
2663 least 1992) so I can not guess how to handle them here.
2664
2665 In earlier versions of GDB, a target with
2666 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2667 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2668 target with both of these set in GDB history, and it seems unlikely to be
2669 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2670
2671 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2672 return;
2673
2674 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2675 return;
2676
2677 /* In reverse execution, when a breakpoint is hit, the instruction
2678 under it has already been de-executed. The reported PC always
2679 points at the breakpoint address, so adjusting it further would
2680 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2681 architecture:
2682
2683 B1 0x08000000 : INSN1
2684 B2 0x08000001 : INSN2
2685 0x08000002 : INSN3
2686 PC -> 0x08000003 : INSN4
2687
2688 Say you're stopped at 0x08000003 as above. Reverse continuing
2689 from that point should hit B2 as below. Reading the PC when the
2690 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2691 been de-executed already.
2692
2693 B1 0x08000000 : INSN1
2694 B2 PC -> 0x08000001 : INSN2
2695 0x08000002 : INSN3
2696 0x08000003 : INSN4
2697
2698 We can't apply the same logic as for forward execution, because
2699 we would wrongly adjust the PC to 0x08000000, since there's a
2700 breakpoint at PC - 1. We'd then report a hit on B1, although
2701 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2702 behaviour. */
2703 if (execution_direction == EXEC_REVERSE)
2704 return;
2705
2706 /* If this target does not decrement the PC after breakpoints, then
2707 we have nothing to do. */
2708 regcache = get_thread_regcache (ecs->ptid);
2709 gdbarch = get_regcache_arch (regcache);
2710 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2711 return;
2712
2713 aspace = get_regcache_aspace (regcache);
2714
2715 /* Find the location where (if we've hit a breakpoint) the
2716 breakpoint would be. */
2717 breakpoint_pc = regcache_read_pc (regcache)
2718 - gdbarch_decr_pc_after_break (gdbarch);
2719
2720 /* Check whether there actually is a software breakpoint inserted at
2721 that location.
2722
2723 If in non-stop mode, a race condition is possible where we've
2724 removed a breakpoint, but stop events for that breakpoint were
2725 already queued and arrive later. To suppress those spurious
2726 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2727 and retire them after a number of stop events are reported. */
2728 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2729 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2730 {
2731 struct cleanup *old_cleanups = NULL;
2732
2733 if (RECORD_IS_USED)
2734 old_cleanups = record_gdb_operation_disable_set ();
2735
2736 /* When using hardware single-step, a SIGTRAP is reported for both
2737 a completed single-step and a software breakpoint. Need to
2738 differentiate between the two, as the latter needs adjusting
2739 but the former does not.
2740
2741 The SIGTRAP can be due to a completed hardware single-step only if
2742 - we didn't insert software single-step breakpoints
2743 - the thread to be examined is still the current thread
2744 - this thread is currently being stepped
2745
2746 If any of these events did not occur, we must have stopped due
2747 to hitting a software breakpoint, and have to back up to the
2748 breakpoint address.
2749
2750 As a special case, we could have hardware single-stepped a
2751 software breakpoint. In this case (prev_pc == breakpoint_pc),
2752 we also need to back up to the breakpoint address. */
2753
2754 if (singlestep_breakpoints_inserted_p
2755 || !ptid_equal (ecs->ptid, inferior_ptid)
2756 || !currently_stepping (ecs->event_thread)
2757 || ecs->event_thread->prev_pc == breakpoint_pc)
2758 regcache_write_pc (regcache, breakpoint_pc);
2759
2760 if (RECORD_IS_USED)
2761 do_cleanups (old_cleanups);
2762 }
2763 }
2764
2765 void
2766 init_infwait_state (void)
2767 {
2768 waiton_ptid = pid_to_ptid (-1);
2769 infwait_state = infwait_normal_state;
2770 }
2771
2772 void
2773 error_is_running (void)
2774 {
2775 error (_("\
2776 Cannot execute this command while the selected thread is running."));
2777 }
2778
2779 void
2780 ensure_not_running (void)
2781 {
2782 if (is_running (inferior_ptid))
2783 error_is_running ();
2784 }
2785
2786 static int
2787 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2788 {
2789 for (frame = get_prev_frame (frame);
2790 frame != NULL;
2791 frame = get_prev_frame (frame))
2792 {
2793 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2794 return 1;
2795 if (get_frame_type (frame) != INLINE_FRAME)
2796 break;
2797 }
2798
2799 return 0;
2800 }
2801
2802 /* Auxiliary function that handles syscall entry/return events.
2803 It returns 1 if the inferior should keep going (and GDB
2804 should ignore the event), or 0 if the event deserves to be
2805 processed. */
2806
2807 static int
2808 handle_syscall_event (struct execution_control_state *ecs)
2809 {
2810 struct regcache *regcache;
2811 struct gdbarch *gdbarch;
2812 int syscall_number;
2813
2814 if (!ptid_equal (ecs->ptid, inferior_ptid))
2815 context_switch (ecs->ptid);
2816
2817 regcache = get_thread_regcache (ecs->ptid);
2818 gdbarch = get_regcache_arch (regcache);
2819 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2820 stop_pc = regcache_read_pc (regcache);
2821
2822 target_last_waitstatus.value.syscall_number = syscall_number;
2823
2824 if (catch_syscall_enabled () > 0
2825 && catching_syscall_number (syscall_number) > 0)
2826 {
2827 if (debug_infrun)
2828 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2829 syscall_number);
2830
2831 ecs->event_thread->stop_bpstat
2832 = bpstat_stop_status (get_regcache_aspace (regcache),
2833 stop_pc, ecs->ptid);
2834 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2835
2836 if (!ecs->random_signal)
2837 {
2838 /* Catchpoint hit. */
2839 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2840 return 0;
2841 }
2842 }
2843
2844 /* If no catchpoint triggered for this, then keep going. */
2845 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2846 keep_going (ecs);
2847 return 1;
2848 }
2849
2850 /* Given an execution control state that has been freshly filled in
2851 by an event from the inferior, figure out what it means and take
2852 appropriate action. */
2853
2854 static void
2855 handle_inferior_event (struct execution_control_state *ecs)
2856 {
2857 struct frame_info *frame;
2858 struct gdbarch *gdbarch;
2859 int sw_single_step_trap_p = 0;
2860 int stopped_by_watchpoint;
2861 int stepped_after_stopped_by_watchpoint = 0;
2862 struct symtab_and_line stop_pc_sal;
2863 enum stop_kind stop_soon;
2864
2865 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2866 {
2867 /* We had an event in the inferior, but we are not interested in
2868 handling it at this level. The lower layers have already
2869 done what needs to be done, if anything.
2870
2871 One of the possible circumstances for this is when the
2872 inferior produces output for the console. The inferior has
2873 not stopped, and we are ignoring the event. Another possible
2874 circumstance is any event which the lower level knows will be
2875 reported multiple times without an intervening resume. */
2876 if (debug_infrun)
2877 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2878 prepare_to_wait (ecs);
2879 return;
2880 }
2881
2882 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2883 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2884 {
2885 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2886
2887 gdb_assert (inf);
2888 stop_soon = inf->stop_soon;
2889 }
2890 else
2891 stop_soon = NO_STOP_QUIETLY;
2892
2893 /* Cache the last pid/waitstatus. */
2894 target_last_wait_ptid = ecs->ptid;
2895 target_last_waitstatus = ecs->ws;
2896
2897 /* Always clear state belonging to the previous time we stopped. */
2898 stop_stack_dummy = STOP_NONE;
2899
2900 /* If it's a new process, add it to the thread database */
2901
2902 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2903 && !ptid_equal (ecs->ptid, minus_one_ptid)
2904 && !in_thread_list (ecs->ptid));
2905
2906 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2907 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2908 add_thread (ecs->ptid);
2909
2910 ecs->event_thread = find_thread_ptid (ecs->ptid);
2911
2912 /* Dependent on valid ECS->EVENT_THREAD. */
2913 adjust_pc_after_break (ecs);
2914
2915 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2916 reinit_frame_cache ();
2917
2918 breakpoint_retire_moribund ();
2919
2920 /* First, distinguish signals caused by the debugger from signals
2921 that have to do with the program's own actions. Note that
2922 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
2923 on the operating system version. Here we detect when a SIGILL or
2924 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
2925 something similar for SIGSEGV, since a SIGSEGV will be generated
2926 when we're trying to execute a breakpoint instruction on a
2927 non-executable stack. This happens for call dummy breakpoints
2928 for architectures like SPARC that place call dummies on the
2929 stack. */
2930 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
2931 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
2932 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
2933 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
2934 {
2935 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2936
2937 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2938 regcache_read_pc (regcache)))
2939 {
2940 if (debug_infrun)
2941 fprintf_unfiltered (gdb_stdlog,
2942 "infrun: Treating signal as SIGTRAP\n");
2943 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2944 }
2945 }
2946
2947 /* Mark the non-executing threads accordingly. In all-stop, all
2948 threads of all processes are stopped when we get any event
2949 reported. In non-stop mode, only the event thread stops. If
2950 we're handling a process exit in non-stop mode, there's nothing
2951 to do, as threads of the dead process are gone, and threads of
2952 any other process were left running. */
2953 if (!non_stop)
2954 set_executing (minus_one_ptid, 0);
2955 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2956 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2957 set_executing (inferior_ptid, 0);
2958
2959 switch (infwait_state)
2960 {
2961 case infwait_thread_hop_state:
2962 if (debug_infrun)
2963 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2964 break;
2965
2966 case infwait_normal_state:
2967 if (debug_infrun)
2968 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2969 break;
2970
2971 case infwait_step_watch_state:
2972 if (debug_infrun)
2973 fprintf_unfiltered (gdb_stdlog,
2974 "infrun: infwait_step_watch_state\n");
2975
2976 stepped_after_stopped_by_watchpoint = 1;
2977 break;
2978
2979 case infwait_nonstep_watch_state:
2980 if (debug_infrun)
2981 fprintf_unfiltered (gdb_stdlog,
2982 "infrun: infwait_nonstep_watch_state\n");
2983 insert_breakpoints ();
2984
2985 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2986 handle things like signals arriving and other things happening
2987 in combination correctly? */
2988 stepped_after_stopped_by_watchpoint = 1;
2989 break;
2990
2991 default:
2992 internal_error (__FILE__, __LINE__, _("bad switch"));
2993 }
2994
2995 infwait_state = infwait_normal_state;
2996 waiton_ptid = pid_to_ptid (-1);
2997
2998 switch (ecs->ws.kind)
2999 {
3000 case TARGET_WAITKIND_LOADED:
3001 if (debug_infrun)
3002 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3003 /* Ignore gracefully during startup of the inferior, as it might
3004 be the shell which has just loaded some objects, otherwise
3005 add the symbols for the newly loaded objects. Also ignore at
3006 the beginning of an attach or remote session; we will query
3007 the full list of libraries once the connection is
3008 established. */
3009 if (stop_soon == NO_STOP_QUIETLY)
3010 {
3011 /* Check for any newly added shared libraries if we're
3012 supposed to be adding them automatically. Switch
3013 terminal for any messages produced by
3014 breakpoint_re_set. */
3015 target_terminal_ours_for_output ();
3016 /* NOTE: cagney/2003-11-25: Make certain that the target
3017 stack's section table is kept up-to-date. Architectures,
3018 (e.g., PPC64), use the section table to perform
3019 operations such as address => section name and hence
3020 require the table to contain all sections (including
3021 those found in shared libraries). */
3022 #ifdef SOLIB_ADD
3023 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3024 #else
3025 solib_add (NULL, 0, &current_target, auto_solib_add);
3026 #endif
3027 target_terminal_inferior ();
3028
3029 /* If requested, stop when the dynamic linker notifies
3030 gdb of events. This allows the user to get control
3031 and place breakpoints in initializer routines for
3032 dynamically loaded objects (among other things). */
3033 if (stop_on_solib_events)
3034 {
3035 /* Make sure we print "Stopped due to solib-event" in
3036 normal_stop. */
3037 stop_print_frame = 1;
3038
3039 stop_stepping (ecs);
3040 return;
3041 }
3042
3043 /* NOTE drow/2007-05-11: This might be a good place to check
3044 for "catch load". */
3045 }
3046
3047 /* If we are skipping through a shell, or through shared library
3048 loading that we aren't interested in, resume the program. If
3049 we're running the program normally, also resume. But stop if
3050 we're attaching or setting up a remote connection. */
3051 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3052 {
3053 /* Loading of shared libraries might have changed breakpoint
3054 addresses. Make sure new breakpoints are inserted. */
3055 if (stop_soon == NO_STOP_QUIETLY
3056 && !breakpoints_always_inserted_mode ())
3057 insert_breakpoints ();
3058 resume (0, TARGET_SIGNAL_0);
3059 prepare_to_wait (ecs);
3060 return;
3061 }
3062
3063 break;
3064
3065 case TARGET_WAITKIND_SPURIOUS:
3066 if (debug_infrun)
3067 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3068 resume (0, TARGET_SIGNAL_0);
3069 prepare_to_wait (ecs);
3070 return;
3071
3072 case TARGET_WAITKIND_EXITED:
3073 if (debug_infrun)
3074 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3075 inferior_ptid = ecs->ptid;
3076 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3077 set_current_program_space (current_inferior ()->pspace);
3078 handle_vfork_child_exec_or_exit (0);
3079 target_terminal_ours (); /* Must do this before mourn anyway */
3080 print_stop_reason (EXITED, ecs->ws.value.integer);
3081
3082 /* Record the exit code in the convenience variable $_exitcode, so
3083 that the user can inspect this again later. */
3084 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3085 (LONGEST) ecs->ws.value.integer);
3086 gdb_flush (gdb_stdout);
3087 target_mourn_inferior ();
3088 singlestep_breakpoints_inserted_p = 0;
3089 stop_print_frame = 0;
3090 stop_stepping (ecs);
3091 return;
3092
3093 case TARGET_WAITKIND_SIGNALLED:
3094 if (debug_infrun)
3095 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3096 inferior_ptid = ecs->ptid;
3097 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3098 set_current_program_space (current_inferior ()->pspace);
3099 handle_vfork_child_exec_or_exit (0);
3100 stop_print_frame = 0;
3101 target_terminal_ours (); /* Must do this before mourn anyway */
3102
3103 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3104 reach here unless the inferior is dead. However, for years
3105 target_kill() was called here, which hints that fatal signals aren't
3106 really fatal on some systems. If that's true, then some changes
3107 may be needed. */
3108 target_mourn_inferior ();
3109
3110 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3111 singlestep_breakpoints_inserted_p = 0;
3112 stop_stepping (ecs);
3113 return;
3114
3115 /* The following are the only cases in which we keep going;
3116 the above cases end in a continue or goto. */
3117 case TARGET_WAITKIND_FORKED:
3118 case TARGET_WAITKIND_VFORKED:
3119 if (debug_infrun)
3120 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3121
3122 if (!ptid_equal (ecs->ptid, inferior_ptid))
3123 {
3124 context_switch (ecs->ptid);
3125 reinit_frame_cache ();
3126 }
3127
3128 /* Immediately detach breakpoints from the child before there's
3129 any chance of letting the user delete breakpoints from the
3130 breakpoint lists. If we don't do this early, it's easy to
3131 leave left over traps in the child, vis: "break foo; catch
3132 fork; c; <fork>; del; c; <child calls foo>". We only follow
3133 the fork on the last `continue', and by that time the
3134 breakpoint at "foo" is long gone from the breakpoint table.
3135 If we vforked, then we don't need to unpatch here, since both
3136 parent and child are sharing the same memory pages; we'll
3137 need to unpatch at follow/detach time instead to be certain
3138 that new breakpoints added between catchpoint hit time and
3139 vfork follow are detached. */
3140 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3141 {
3142 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3143
3144 /* This won't actually modify the breakpoint list, but will
3145 physically remove the breakpoints from the child. */
3146 detach_breakpoints (child_pid);
3147 }
3148
3149 /* In case the event is caught by a catchpoint, remember that
3150 the event is to be followed at the next resume of the thread,
3151 and not immediately. */
3152 ecs->event_thread->pending_follow = ecs->ws;
3153
3154 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3155
3156 ecs->event_thread->stop_bpstat
3157 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3158 stop_pc, ecs->ptid);
3159
3160 /* Note that we're interested in knowing the bpstat actually
3161 causes a stop, not just if it may explain the signal.
3162 Software watchpoints, for example, always appear in the
3163 bpstat. */
3164 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3165
3166 /* If no catchpoint triggered for this, then keep going. */
3167 if (ecs->random_signal)
3168 {
3169 ptid_t parent;
3170 ptid_t child;
3171 int should_resume;
3172 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3173
3174 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3175
3176 should_resume = follow_fork ();
3177
3178 parent = ecs->ptid;
3179 child = ecs->ws.value.related_pid;
3180
3181 /* In non-stop mode, also resume the other branch. */
3182 if (non_stop && !detach_fork)
3183 {
3184 if (follow_child)
3185 switch_to_thread (parent);
3186 else
3187 switch_to_thread (child);
3188
3189 ecs->event_thread = inferior_thread ();
3190 ecs->ptid = inferior_ptid;
3191 keep_going (ecs);
3192 }
3193
3194 if (follow_child)
3195 switch_to_thread (child);
3196 else
3197 switch_to_thread (parent);
3198
3199 ecs->event_thread = inferior_thread ();
3200 ecs->ptid = inferior_ptid;
3201
3202 if (should_resume)
3203 keep_going (ecs);
3204 else
3205 stop_stepping (ecs);
3206 return;
3207 }
3208 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3209 goto process_event_stop_test;
3210
3211 case TARGET_WAITKIND_VFORK_DONE:
3212 /* Done with the shared memory region. Re-insert breakpoints in
3213 the parent, and keep going. */
3214
3215 if (debug_infrun)
3216 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3217
3218 if (!ptid_equal (ecs->ptid, inferior_ptid))
3219 context_switch (ecs->ptid);
3220
3221 current_inferior ()->waiting_for_vfork_done = 0;
3222 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3223 /* This also takes care of reinserting breakpoints in the
3224 previously locked inferior. */
3225 keep_going (ecs);
3226 return;
3227
3228 case TARGET_WAITKIND_EXECD:
3229 if (debug_infrun)
3230 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3231
3232 if (!ptid_equal (ecs->ptid, inferior_ptid))
3233 {
3234 context_switch (ecs->ptid);
3235 reinit_frame_cache ();
3236 }
3237
3238 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3239
3240 /* Do whatever is necessary to the parent branch of the vfork. */
3241 handle_vfork_child_exec_or_exit (1);
3242
3243 /* This causes the eventpoints and symbol table to be reset.
3244 Must do this now, before trying to determine whether to
3245 stop. */
3246 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3247
3248 ecs->event_thread->stop_bpstat
3249 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3250 stop_pc, ecs->ptid);
3251 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3252
3253 /* Note that this may be referenced from inside
3254 bpstat_stop_status above, through inferior_has_execd. */
3255 xfree (ecs->ws.value.execd_pathname);
3256 ecs->ws.value.execd_pathname = NULL;
3257
3258 /* If no catchpoint triggered for this, then keep going. */
3259 if (ecs->random_signal)
3260 {
3261 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3262 keep_going (ecs);
3263 return;
3264 }
3265 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3266 goto process_event_stop_test;
3267
3268 /* Be careful not to try to gather much state about a thread
3269 that's in a syscall. It's frequently a losing proposition. */
3270 case TARGET_WAITKIND_SYSCALL_ENTRY:
3271 if (debug_infrun)
3272 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3273 /* Getting the current syscall number */
3274 if (handle_syscall_event (ecs) != 0)
3275 return;
3276 goto process_event_stop_test;
3277
3278 /* Before examining the threads further, step this thread to
3279 get it entirely out of the syscall. (We get notice of the
3280 event when the thread is just on the verge of exiting a
3281 syscall. Stepping one instruction seems to get it back
3282 into user code.) */
3283 case TARGET_WAITKIND_SYSCALL_RETURN:
3284 if (debug_infrun)
3285 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3286 if (handle_syscall_event (ecs) != 0)
3287 return;
3288 goto process_event_stop_test;
3289
3290 case TARGET_WAITKIND_STOPPED:
3291 if (debug_infrun)
3292 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3293 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3294 break;
3295
3296 case TARGET_WAITKIND_NO_HISTORY:
3297 /* Reverse execution: target ran out of history info. */
3298 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3299 print_stop_reason (NO_HISTORY, 0);
3300 stop_stepping (ecs);
3301 return;
3302 }
3303
3304 if (ecs->new_thread_event)
3305 {
3306 if (non_stop)
3307 /* Non-stop assumes that the target handles adding new threads
3308 to the thread list. */
3309 internal_error (__FILE__, __LINE__, "\
3310 targets should add new threads to the thread list themselves in non-stop mode.");
3311
3312 /* We may want to consider not doing a resume here in order to
3313 give the user a chance to play with the new thread. It might
3314 be good to make that a user-settable option. */
3315
3316 /* At this point, all threads are stopped (happens automatically
3317 in either the OS or the native code). Therefore we need to
3318 continue all threads in order to make progress. */
3319
3320 if (!ptid_equal (ecs->ptid, inferior_ptid))
3321 context_switch (ecs->ptid);
3322 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3323 prepare_to_wait (ecs);
3324 return;
3325 }
3326
3327 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3328 {
3329 /* Do we need to clean up the state of a thread that has
3330 completed a displaced single-step? (Doing so usually affects
3331 the PC, so do it here, before we set stop_pc.) */
3332 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3333
3334 /* If we either finished a single-step or hit a breakpoint, but
3335 the user wanted this thread to be stopped, pretend we got a
3336 SIG0 (generic unsignaled stop). */
3337
3338 if (ecs->event_thread->stop_requested
3339 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3340 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3341 }
3342
3343 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3344
3345 if (debug_infrun)
3346 {
3347 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3348 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3349 struct cleanup *old_chain = save_inferior_ptid ();
3350
3351 inferior_ptid = ecs->ptid;
3352
3353 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3354 paddress (gdbarch, stop_pc));
3355 if (target_stopped_by_watchpoint ())
3356 {
3357 CORE_ADDR addr;
3358
3359 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3360
3361 if (target_stopped_data_address (&current_target, &addr))
3362 fprintf_unfiltered (gdb_stdlog,
3363 "infrun: stopped data address = %s\n",
3364 paddress (gdbarch, addr));
3365 else
3366 fprintf_unfiltered (gdb_stdlog,
3367 "infrun: (no data address available)\n");
3368 }
3369
3370 do_cleanups (old_chain);
3371 }
3372
3373 if (stepping_past_singlestep_breakpoint)
3374 {
3375 gdb_assert (singlestep_breakpoints_inserted_p);
3376 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3377 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3378
3379 stepping_past_singlestep_breakpoint = 0;
3380
3381 /* We've either finished single-stepping past the single-step
3382 breakpoint, or stopped for some other reason. It would be nice if
3383 we could tell, but we can't reliably. */
3384 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3385 {
3386 if (debug_infrun)
3387 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3388 /* Pull the single step breakpoints out of the target. */
3389 remove_single_step_breakpoints ();
3390 singlestep_breakpoints_inserted_p = 0;
3391
3392 ecs->random_signal = 0;
3393 ecs->event_thread->trap_expected = 0;
3394
3395 context_switch (saved_singlestep_ptid);
3396 if (deprecated_context_hook)
3397 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3398
3399 resume (1, TARGET_SIGNAL_0);
3400 prepare_to_wait (ecs);
3401 return;
3402 }
3403 }
3404
3405 if (!ptid_equal (deferred_step_ptid, null_ptid))
3406 {
3407 /* In non-stop mode, there's never a deferred_step_ptid set. */
3408 gdb_assert (!non_stop);
3409
3410 /* If we stopped for some other reason than single-stepping, ignore
3411 the fact that we were supposed to switch back. */
3412 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3413 {
3414 if (debug_infrun)
3415 fprintf_unfiltered (gdb_stdlog,
3416 "infrun: handling deferred step\n");
3417
3418 /* Pull the single step breakpoints out of the target. */
3419 if (singlestep_breakpoints_inserted_p)
3420 {
3421 remove_single_step_breakpoints ();
3422 singlestep_breakpoints_inserted_p = 0;
3423 }
3424
3425 /* Note: We do not call context_switch at this point, as the
3426 context is already set up for stepping the original thread. */
3427 switch_to_thread (deferred_step_ptid);
3428 deferred_step_ptid = null_ptid;
3429 /* Suppress spurious "Switching to ..." message. */
3430 previous_inferior_ptid = inferior_ptid;
3431
3432 resume (1, TARGET_SIGNAL_0);
3433 prepare_to_wait (ecs);
3434 return;
3435 }
3436
3437 deferred_step_ptid = null_ptid;
3438 }
3439
3440 /* See if a thread hit a thread-specific breakpoint that was meant for
3441 another thread. If so, then step that thread past the breakpoint,
3442 and continue it. */
3443
3444 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3445 {
3446 int thread_hop_needed = 0;
3447 struct address_space *aspace =
3448 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3449
3450 /* Check if a regular breakpoint has been hit before checking
3451 for a potential single step breakpoint. Otherwise, GDB will
3452 not see this breakpoint hit when stepping onto breakpoints. */
3453 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3454 {
3455 ecs->random_signal = 0;
3456 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3457 thread_hop_needed = 1;
3458 }
3459 else if (singlestep_breakpoints_inserted_p)
3460 {
3461 /* We have not context switched yet, so this should be true
3462 no matter which thread hit the singlestep breakpoint. */
3463 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3464 if (debug_infrun)
3465 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3466 "trap for %s\n",
3467 target_pid_to_str (ecs->ptid));
3468
3469 ecs->random_signal = 0;
3470 /* The call to in_thread_list is necessary because PTIDs sometimes
3471 change when we go from single-threaded to multi-threaded. If
3472 the singlestep_ptid is still in the list, assume that it is
3473 really different from ecs->ptid. */
3474 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3475 && in_thread_list (singlestep_ptid))
3476 {
3477 /* If the PC of the thread we were trying to single-step
3478 has changed, discard this event (which we were going
3479 to ignore anyway), and pretend we saw that thread
3480 trap. This prevents us continuously moving the
3481 single-step breakpoint forward, one instruction at a
3482 time. If the PC has changed, then the thread we were
3483 trying to single-step has trapped or been signalled,
3484 but the event has not been reported to GDB yet.
3485
3486 There might be some cases where this loses signal
3487 information, if a signal has arrived at exactly the
3488 same time that the PC changed, but this is the best
3489 we can do with the information available. Perhaps we
3490 should arrange to report all events for all threads
3491 when they stop, or to re-poll the remote looking for
3492 this particular thread (i.e. temporarily enable
3493 schedlock). */
3494
3495 CORE_ADDR new_singlestep_pc
3496 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3497
3498 if (new_singlestep_pc != singlestep_pc)
3499 {
3500 enum target_signal stop_signal;
3501
3502 if (debug_infrun)
3503 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3504 " but expected thread advanced also\n");
3505
3506 /* The current context still belongs to
3507 singlestep_ptid. Don't swap here, since that's
3508 the context we want to use. Just fudge our
3509 state and continue. */
3510 stop_signal = ecs->event_thread->stop_signal;
3511 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3512 ecs->ptid = singlestep_ptid;
3513 ecs->event_thread = find_thread_ptid (ecs->ptid);
3514 ecs->event_thread->stop_signal = stop_signal;
3515 stop_pc = new_singlestep_pc;
3516 }
3517 else
3518 {
3519 if (debug_infrun)
3520 fprintf_unfiltered (gdb_stdlog,
3521 "infrun: unexpected thread\n");
3522
3523 thread_hop_needed = 1;
3524 stepping_past_singlestep_breakpoint = 1;
3525 saved_singlestep_ptid = singlestep_ptid;
3526 }
3527 }
3528 }
3529
3530 if (thread_hop_needed)
3531 {
3532 struct regcache *thread_regcache;
3533 int remove_status = 0;
3534
3535 if (debug_infrun)
3536 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3537
3538 /* Switch context before touching inferior memory, the
3539 previous thread may have exited. */
3540 if (!ptid_equal (inferior_ptid, ecs->ptid))
3541 context_switch (ecs->ptid);
3542
3543 /* Saw a breakpoint, but it was hit by the wrong thread.
3544 Just continue. */
3545
3546 if (singlestep_breakpoints_inserted_p)
3547 {
3548 /* Pull the single step breakpoints out of the target. */
3549 remove_single_step_breakpoints ();
3550 singlestep_breakpoints_inserted_p = 0;
3551 }
3552
3553 /* If the arch can displace step, don't remove the
3554 breakpoints. */
3555 thread_regcache = get_thread_regcache (ecs->ptid);
3556 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3557 remove_status = remove_breakpoints ();
3558
3559 /* Did we fail to remove breakpoints? If so, try
3560 to set the PC past the bp. (There's at least
3561 one situation in which we can fail to remove
3562 the bp's: On HP-UX's that use ttrace, we can't
3563 change the address space of a vforking child
3564 process until the child exits (well, okay, not
3565 then either :-) or execs. */
3566 if (remove_status != 0)
3567 error (_("Cannot step over breakpoint hit in wrong thread"));
3568 else
3569 { /* Single step */
3570 if (!non_stop)
3571 {
3572 /* Only need to require the next event from this
3573 thread in all-stop mode. */
3574 waiton_ptid = ecs->ptid;
3575 infwait_state = infwait_thread_hop_state;
3576 }
3577
3578 ecs->event_thread->stepping_over_breakpoint = 1;
3579 keep_going (ecs);
3580 return;
3581 }
3582 }
3583 else if (singlestep_breakpoints_inserted_p)
3584 {
3585 sw_single_step_trap_p = 1;
3586 ecs->random_signal = 0;
3587 }
3588 }
3589 else
3590 ecs->random_signal = 1;
3591
3592 /* See if something interesting happened to the non-current thread. If
3593 so, then switch to that thread. */
3594 if (!ptid_equal (ecs->ptid, inferior_ptid))
3595 {
3596 if (debug_infrun)
3597 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3598
3599 context_switch (ecs->ptid);
3600
3601 if (deprecated_context_hook)
3602 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3603 }
3604
3605 /* At this point, get hold of the now-current thread's frame. */
3606 frame = get_current_frame ();
3607 gdbarch = get_frame_arch (frame);
3608
3609 if (singlestep_breakpoints_inserted_p)
3610 {
3611 /* Pull the single step breakpoints out of the target. */
3612 remove_single_step_breakpoints ();
3613 singlestep_breakpoints_inserted_p = 0;
3614 }
3615
3616 if (stepped_after_stopped_by_watchpoint)
3617 stopped_by_watchpoint = 0;
3618 else
3619 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3620
3621 /* If necessary, step over this watchpoint. We'll be back to display
3622 it in a moment. */
3623 if (stopped_by_watchpoint
3624 && (target_have_steppable_watchpoint
3625 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3626 {
3627 /* At this point, we are stopped at an instruction which has
3628 attempted to write to a piece of memory under control of
3629 a watchpoint. The instruction hasn't actually executed
3630 yet. If we were to evaluate the watchpoint expression
3631 now, we would get the old value, and therefore no change
3632 would seem to have occurred.
3633
3634 In order to make watchpoints work `right', we really need
3635 to complete the memory write, and then evaluate the
3636 watchpoint expression. We do this by single-stepping the
3637 target.
3638
3639 It may not be necessary to disable the watchpoint to stop over
3640 it. For example, the PA can (with some kernel cooperation)
3641 single step over a watchpoint without disabling the watchpoint.
3642
3643 It is far more common to need to disable a watchpoint to step
3644 the inferior over it. If we have non-steppable watchpoints,
3645 we must disable the current watchpoint; it's simplest to
3646 disable all watchpoints and breakpoints. */
3647 int hw_step = 1;
3648
3649 if (!target_have_steppable_watchpoint)
3650 remove_breakpoints ();
3651 /* Single step */
3652 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3653 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3654 waiton_ptid = ecs->ptid;
3655 if (target_have_steppable_watchpoint)
3656 infwait_state = infwait_step_watch_state;
3657 else
3658 infwait_state = infwait_nonstep_watch_state;
3659 prepare_to_wait (ecs);
3660 return;
3661 }
3662
3663 ecs->stop_func_start = 0;
3664 ecs->stop_func_end = 0;
3665 ecs->stop_func_name = 0;
3666 /* Don't care about return value; stop_func_start and stop_func_name
3667 will both be 0 if it doesn't work. */
3668 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3669 &ecs->stop_func_start, &ecs->stop_func_end);
3670 ecs->stop_func_start
3671 += gdbarch_deprecated_function_start_offset (gdbarch);
3672 ecs->event_thread->stepping_over_breakpoint = 0;
3673 bpstat_clear (&ecs->event_thread->stop_bpstat);
3674 ecs->event_thread->stop_step = 0;
3675 stop_print_frame = 1;
3676 ecs->random_signal = 0;
3677 stopped_by_random_signal = 0;
3678
3679 /* Hide inlined functions starting here, unless we just performed stepi or
3680 nexti. After stepi and nexti, always show the innermost frame (not any
3681 inline function call sites). */
3682 if (ecs->event_thread->step_range_end != 1)
3683 skip_inline_frames (ecs->ptid);
3684
3685 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3686 && ecs->event_thread->trap_expected
3687 && gdbarch_single_step_through_delay_p (gdbarch)
3688 && currently_stepping (ecs->event_thread))
3689 {
3690 /* We're trying to step off a breakpoint. Turns out that we're
3691 also on an instruction that needs to be stepped multiple
3692 times before it's been fully executing. E.g., architectures
3693 with a delay slot. It needs to be stepped twice, once for
3694 the instruction and once for the delay slot. */
3695 int step_through_delay
3696 = gdbarch_single_step_through_delay (gdbarch, frame);
3697
3698 if (debug_infrun && step_through_delay)
3699 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3700 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3701 {
3702 /* The user issued a continue when stopped at a breakpoint.
3703 Set up for another trap and get out of here. */
3704 ecs->event_thread->stepping_over_breakpoint = 1;
3705 keep_going (ecs);
3706 return;
3707 }
3708 else if (step_through_delay)
3709 {
3710 /* The user issued a step when stopped at a breakpoint.
3711 Maybe we should stop, maybe we should not - the delay
3712 slot *might* correspond to a line of source. In any
3713 case, don't decide that here, just set
3714 ecs->stepping_over_breakpoint, making sure we
3715 single-step again before breakpoints are re-inserted. */
3716 ecs->event_thread->stepping_over_breakpoint = 1;
3717 }
3718 }
3719
3720 /* Look at the cause of the stop, and decide what to do.
3721 The alternatives are:
3722 1) stop_stepping and return; to really stop and return to the debugger,
3723 2) keep_going and return to start up again
3724 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3725 3) set ecs->random_signal to 1, and the decision between 1 and 2
3726 will be made according to the signal handling tables. */
3727
3728 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3729 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3730 || stop_soon == STOP_QUIETLY_REMOTE)
3731 {
3732 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3733 {
3734 if (debug_infrun)
3735 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3736 stop_print_frame = 0;
3737 stop_stepping (ecs);
3738 return;
3739 }
3740
3741 /* This is originated from start_remote(), start_inferior() and
3742 shared libraries hook functions. */
3743 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3744 {
3745 if (debug_infrun)
3746 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3747 stop_stepping (ecs);
3748 return;
3749 }
3750
3751 /* This originates from attach_command(). We need to overwrite
3752 the stop_signal here, because some kernels don't ignore a
3753 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3754 See more comments in inferior.h. On the other hand, if we
3755 get a non-SIGSTOP, report it to the user - assume the backend
3756 will handle the SIGSTOP if it should show up later.
3757
3758 Also consider that the attach is complete when we see a
3759 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3760 target extended-remote report it instead of a SIGSTOP
3761 (e.g. gdbserver). We already rely on SIGTRAP being our
3762 signal, so this is no exception.
3763
3764 Also consider that the attach is complete when we see a
3765 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3766 the target to stop all threads of the inferior, in case the
3767 low level attach operation doesn't stop them implicitly. If
3768 they weren't stopped implicitly, then the stub will report a
3769 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3770 other than GDB's request. */
3771 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3772 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3773 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3774 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3775 {
3776 stop_stepping (ecs);
3777 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3778 return;
3779 }
3780
3781 /* See if there is a breakpoint at the current PC. */
3782 ecs->event_thread->stop_bpstat
3783 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3784 stop_pc, ecs->ptid);
3785
3786 /* Following in case break condition called a
3787 function. */
3788 stop_print_frame = 1;
3789
3790 /* This is where we handle "moribund" watchpoints. Unlike
3791 software breakpoints traps, hardware watchpoint traps are
3792 always distinguishable from random traps. If no high-level
3793 watchpoint is associated with the reported stop data address
3794 anymore, then the bpstat does not explain the signal ---
3795 simply make sure to ignore it if `stopped_by_watchpoint' is
3796 set. */
3797
3798 if (debug_infrun
3799 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3800 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3801 && stopped_by_watchpoint)
3802 fprintf_unfiltered (gdb_stdlog, "\
3803 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3804
3805 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3806 at one stage in the past included checks for an inferior
3807 function call's call dummy's return breakpoint. The original
3808 comment, that went with the test, read:
3809
3810 ``End of a stack dummy. Some systems (e.g. Sony news) give
3811 another signal besides SIGTRAP, so check here as well as
3812 above.''
3813
3814 If someone ever tries to get call dummys on a
3815 non-executable stack to work (where the target would stop
3816 with something like a SIGSEGV), then those tests might need
3817 to be re-instated. Given, however, that the tests were only
3818 enabled when momentary breakpoints were not being used, I
3819 suspect that it won't be the case.
3820
3821 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3822 be necessary for call dummies on a non-executable stack on
3823 SPARC. */
3824
3825 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3826 ecs->random_signal
3827 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3828 || stopped_by_watchpoint
3829 || ecs->event_thread->trap_expected
3830 || (ecs->event_thread->step_range_end
3831 && ecs->event_thread->step_resume_breakpoint == NULL));
3832 else
3833 {
3834 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3835 if (!ecs->random_signal)
3836 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3837 }
3838 }
3839
3840 /* When we reach this point, we've pretty much decided
3841 that the reason for stopping must've been a random
3842 (unexpected) signal. */
3843
3844 else
3845 ecs->random_signal = 1;
3846
3847 process_event_stop_test:
3848
3849 /* Re-fetch current thread's frame in case we did a
3850 "goto process_event_stop_test" above. */
3851 frame = get_current_frame ();
3852 gdbarch = get_frame_arch (frame);
3853
3854 /* For the program's own signals, act according to
3855 the signal handling tables. */
3856
3857 if (ecs->random_signal)
3858 {
3859 /* Signal not for debugging purposes. */
3860 int printed = 0;
3861 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3862
3863 if (debug_infrun)
3864 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3865 ecs->event_thread->stop_signal);
3866
3867 stopped_by_random_signal = 1;
3868
3869 if (signal_print[ecs->event_thread->stop_signal])
3870 {
3871 printed = 1;
3872 target_terminal_ours_for_output ();
3873 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3874 }
3875 /* Always stop on signals if we're either just gaining control
3876 of the program, or the user explicitly requested this thread
3877 to remain stopped. */
3878 if (stop_soon != NO_STOP_QUIETLY
3879 || ecs->event_thread->stop_requested
3880 || (!inf->detaching
3881 && signal_stop_state (ecs->event_thread->stop_signal)))
3882 {
3883 stop_stepping (ecs);
3884 return;
3885 }
3886 /* If not going to stop, give terminal back
3887 if we took it away. */
3888 else if (printed)
3889 target_terminal_inferior ();
3890
3891 /* Clear the signal if it should not be passed. */
3892 if (signal_program[ecs->event_thread->stop_signal] == 0)
3893 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3894
3895 if (ecs->event_thread->prev_pc == stop_pc
3896 && ecs->event_thread->trap_expected
3897 && ecs->event_thread->step_resume_breakpoint == NULL)
3898 {
3899 /* We were just starting a new sequence, attempting to
3900 single-step off of a breakpoint and expecting a SIGTRAP.
3901 Instead this signal arrives. This signal will take us out
3902 of the stepping range so GDB needs to remember to, when
3903 the signal handler returns, resume stepping off that
3904 breakpoint. */
3905 /* To simplify things, "continue" is forced to use the same
3906 code paths as single-step - set a breakpoint at the
3907 signal return address and then, once hit, step off that
3908 breakpoint. */
3909 if (debug_infrun)
3910 fprintf_unfiltered (gdb_stdlog,
3911 "infrun: signal arrived while stepping over "
3912 "breakpoint\n");
3913
3914 insert_step_resume_breakpoint_at_frame (frame);
3915 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3916 keep_going (ecs);
3917 return;
3918 }
3919
3920 if (ecs->event_thread->step_range_end != 0
3921 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3922 && (ecs->event_thread->step_range_start <= stop_pc
3923 && stop_pc < ecs->event_thread->step_range_end)
3924 && frame_id_eq (get_stack_frame_id (frame),
3925 ecs->event_thread->step_stack_frame_id)
3926 && ecs->event_thread->step_resume_breakpoint == NULL)
3927 {
3928 /* The inferior is about to take a signal that will take it
3929 out of the single step range. Set a breakpoint at the
3930 current PC (which is presumably where the signal handler
3931 will eventually return) and then allow the inferior to
3932 run free.
3933
3934 Note that this is only needed for a signal delivered
3935 while in the single-step range. Nested signals aren't a
3936 problem as they eventually all return. */
3937 if (debug_infrun)
3938 fprintf_unfiltered (gdb_stdlog,
3939 "infrun: signal may take us out of "
3940 "single-step range\n");
3941
3942 insert_step_resume_breakpoint_at_frame (frame);
3943 keep_going (ecs);
3944 return;
3945 }
3946
3947 /* Note: step_resume_breakpoint may be non-NULL. This occures
3948 when either there's a nested signal, or when there's a
3949 pending signal enabled just as the signal handler returns
3950 (leaving the inferior at the step-resume-breakpoint without
3951 actually executing it). Either way continue until the
3952 breakpoint is really hit. */
3953 keep_going (ecs);
3954 return;
3955 }
3956
3957 /* Handle cases caused by hitting a breakpoint. */
3958 {
3959 CORE_ADDR jmp_buf_pc;
3960 struct bpstat_what what;
3961
3962 what = bpstat_what (ecs->event_thread->stop_bpstat);
3963
3964 if (what.call_dummy)
3965 {
3966 stop_stack_dummy = what.call_dummy;
3967 }
3968
3969 switch (what.main_action)
3970 {
3971 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3972 /* If we hit the breakpoint at longjmp while stepping, we
3973 install a momentary breakpoint at the target of the
3974 jmp_buf. */
3975
3976 if (debug_infrun)
3977 fprintf_unfiltered (gdb_stdlog,
3978 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3979
3980 ecs->event_thread->stepping_over_breakpoint = 1;
3981
3982 if (!gdbarch_get_longjmp_target_p (gdbarch)
3983 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3984 {
3985 if (debug_infrun)
3986 fprintf_unfiltered (gdb_stdlog, "\
3987 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3988 keep_going (ecs);
3989 return;
3990 }
3991
3992 /* We're going to replace the current step-resume breakpoint
3993 with a longjmp-resume breakpoint. */
3994 delete_step_resume_breakpoint (ecs->event_thread);
3995
3996 /* Insert a breakpoint at resume address. */
3997 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3998
3999 keep_going (ecs);
4000 return;
4001
4002 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4003 if (debug_infrun)
4004 fprintf_unfiltered (gdb_stdlog,
4005 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4006
4007 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4008 delete_step_resume_breakpoint (ecs->event_thread);
4009
4010 ecs->event_thread->stop_step = 1;
4011 print_stop_reason (END_STEPPING_RANGE, 0);
4012 stop_stepping (ecs);
4013 return;
4014
4015 case BPSTAT_WHAT_SINGLE:
4016 if (debug_infrun)
4017 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4018 ecs->event_thread->stepping_over_breakpoint = 1;
4019 /* Still need to check other stuff, at least the case
4020 where we are stepping and step out of the right range. */
4021 break;
4022
4023 case BPSTAT_WHAT_STOP_NOISY:
4024 if (debug_infrun)
4025 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4026 stop_print_frame = 1;
4027
4028 /* We are about to nuke the step_resume_breakpointt via the
4029 cleanup chain, so no need to worry about it here. */
4030
4031 stop_stepping (ecs);
4032 return;
4033
4034 case BPSTAT_WHAT_STOP_SILENT:
4035 if (debug_infrun)
4036 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4037 stop_print_frame = 0;
4038
4039 /* We are about to nuke the step_resume_breakpoin via the
4040 cleanup chain, so no need to worry about it here. */
4041
4042 stop_stepping (ecs);
4043 return;
4044
4045 case BPSTAT_WHAT_STEP_RESUME:
4046 if (debug_infrun)
4047 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4048
4049 delete_step_resume_breakpoint (ecs->event_thread);
4050 if (ecs->event_thread->step_after_step_resume_breakpoint)
4051 {
4052 /* Back when the step-resume breakpoint was inserted, we
4053 were trying to single-step off a breakpoint. Go back
4054 to doing that. */
4055 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4056 ecs->event_thread->stepping_over_breakpoint = 1;
4057 keep_going (ecs);
4058 return;
4059 }
4060 if (stop_pc == ecs->stop_func_start
4061 && execution_direction == EXEC_REVERSE)
4062 {
4063 /* We are stepping over a function call in reverse, and
4064 just hit the step-resume breakpoint at the start
4065 address of the function. Go back to single-stepping,
4066 which should take us back to the function call. */
4067 ecs->event_thread->stepping_over_breakpoint = 1;
4068 keep_going (ecs);
4069 return;
4070 }
4071 break;
4072
4073 case BPSTAT_WHAT_CHECK_SHLIBS:
4074 {
4075 if (debug_infrun)
4076 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
4077
4078 /* Check for any newly added shared libraries if we're
4079 supposed to be adding them automatically. Switch
4080 terminal for any messages produced by
4081 breakpoint_re_set. */
4082 target_terminal_ours_for_output ();
4083 /* NOTE: cagney/2003-11-25: Make certain that the target
4084 stack's section table is kept up-to-date. Architectures,
4085 (e.g., PPC64), use the section table to perform
4086 operations such as address => section name and hence
4087 require the table to contain all sections (including
4088 those found in shared libraries). */
4089 #ifdef SOLIB_ADD
4090 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
4091 #else
4092 solib_add (NULL, 0, &current_target, auto_solib_add);
4093 #endif
4094 target_terminal_inferior ();
4095
4096 /* If requested, stop when the dynamic linker notifies
4097 gdb of events. This allows the user to get control
4098 and place breakpoints in initializer routines for
4099 dynamically loaded objects (among other things). */
4100 if (stop_on_solib_events || stop_stack_dummy)
4101 {
4102 stop_stepping (ecs);
4103 return;
4104 }
4105 else
4106 {
4107 /* We want to step over this breakpoint, then keep going. */
4108 ecs->event_thread->stepping_over_breakpoint = 1;
4109 break;
4110 }
4111 }
4112 break;
4113
4114 case BPSTAT_WHAT_CHECK_JIT:
4115 if (debug_infrun)
4116 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
4117
4118 /* Switch terminal for any messages produced by breakpoint_re_set. */
4119 target_terminal_ours_for_output ();
4120
4121 jit_event_handler (gdbarch);
4122
4123 target_terminal_inferior ();
4124
4125 /* We want to step over this breakpoint, then keep going. */
4126 ecs->event_thread->stepping_over_breakpoint = 1;
4127
4128 break;
4129
4130 case BPSTAT_WHAT_LAST:
4131 /* Not a real code, but listed here to shut up gcc -Wall. */
4132
4133 case BPSTAT_WHAT_KEEP_CHECKING:
4134 break;
4135 }
4136 }
4137
4138 /* We come here if we hit a breakpoint but should not
4139 stop for it. Possibly we also were stepping
4140 and should stop for that. So fall through and
4141 test for stepping. But, if not stepping,
4142 do not stop. */
4143
4144 /* In all-stop mode, if we're currently stepping but have stopped in
4145 some other thread, we need to switch back to the stepped thread. */
4146 if (!non_stop)
4147 {
4148 struct thread_info *tp;
4149
4150 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4151 ecs->event_thread);
4152 if (tp)
4153 {
4154 /* However, if the current thread is blocked on some internal
4155 breakpoint, and we simply need to step over that breakpoint
4156 to get it going again, do that first. */
4157 if ((ecs->event_thread->trap_expected
4158 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4159 || ecs->event_thread->stepping_over_breakpoint)
4160 {
4161 keep_going (ecs);
4162 return;
4163 }
4164
4165 /* If the stepping thread exited, then don't try to switch
4166 back and resume it, which could fail in several different
4167 ways depending on the target. Instead, just keep going.
4168
4169 We can find a stepping dead thread in the thread list in
4170 two cases:
4171
4172 - The target supports thread exit events, and when the
4173 target tries to delete the thread from the thread list,
4174 inferior_ptid pointed at the exiting thread. In such
4175 case, calling delete_thread does not really remove the
4176 thread from the list; instead, the thread is left listed,
4177 with 'exited' state.
4178
4179 - The target's debug interface does not support thread
4180 exit events, and so we have no idea whatsoever if the
4181 previously stepping thread is still alive. For that
4182 reason, we need to synchronously query the target
4183 now. */
4184 if (is_exited (tp->ptid)
4185 || !target_thread_alive (tp->ptid))
4186 {
4187 if (debug_infrun)
4188 fprintf_unfiltered (gdb_stdlog, "\
4189 infrun: not switching back to stepped thread, it has vanished\n");
4190
4191 delete_thread (tp->ptid);
4192 keep_going (ecs);
4193 return;
4194 }
4195
4196 /* Otherwise, we no longer expect a trap in the current thread.
4197 Clear the trap_expected flag before switching back -- this is
4198 what keep_going would do as well, if we called it. */
4199 ecs->event_thread->trap_expected = 0;
4200
4201 if (debug_infrun)
4202 fprintf_unfiltered (gdb_stdlog,
4203 "infrun: switching back to stepped thread\n");
4204
4205 ecs->event_thread = tp;
4206 ecs->ptid = tp->ptid;
4207 context_switch (ecs->ptid);
4208 keep_going (ecs);
4209 return;
4210 }
4211 }
4212
4213 /* Are we stepping to get the inferior out of the dynamic linker's
4214 hook (and possibly the dld itself) after catching a shlib
4215 event? */
4216 if (ecs->event_thread->stepping_through_solib_after_catch)
4217 {
4218 #if defined(SOLIB_ADD)
4219 /* Have we reached our destination? If not, keep going. */
4220 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4221 {
4222 if (debug_infrun)
4223 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4224 ecs->event_thread->stepping_over_breakpoint = 1;
4225 keep_going (ecs);
4226 return;
4227 }
4228 #endif
4229 if (debug_infrun)
4230 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4231 /* Else, stop and report the catchpoint(s) whose triggering
4232 caused us to begin stepping. */
4233 ecs->event_thread->stepping_through_solib_after_catch = 0;
4234 bpstat_clear (&ecs->event_thread->stop_bpstat);
4235 ecs->event_thread->stop_bpstat
4236 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4237 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4238 stop_print_frame = 1;
4239 stop_stepping (ecs);
4240 return;
4241 }
4242
4243 if (ecs->event_thread->step_resume_breakpoint)
4244 {
4245 if (debug_infrun)
4246 fprintf_unfiltered (gdb_stdlog,
4247 "infrun: step-resume breakpoint is inserted\n");
4248
4249 /* Having a step-resume breakpoint overrides anything
4250 else having to do with stepping commands until
4251 that breakpoint is reached. */
4252 keep_going (ecs);
4253 return;
4254 }
4255
4256 if (ecs->event_thread->step_range_end == 0)
4257 {
4258 if (debug_infrun)
4259 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4260 /* Likewise if we aren't even stepping. */
4261 keep_going (ecs);
4262 return;
4263 }
4264
4265 /* Re-fetch current thread's frame in case the code above caused
4266 the frame cache to be re-initialized, making our FRAME variable
4267 a dangling pointer. */
4268 frame = get_current_frame ();
4269
4270 /* If stepping through a line, keep going if still within it.
4271
4272 Note that step_range_end is the address of the first instruction
4273 beyond the step range, and NOT the address of the last instruction
4274 within it!
4275
4276 Note also that during reverse execution, we may be stepping
4277 through a function epilogue and therefore must detect when
4278 the current-frame changes in the middle of a line. */
4279
4280 if (stop_pc >= ecs->event_thread->step_range_start
4281 && stop_pc < ecs->event_thread->step_range_end
4282 && (execution_direction != EXEC_REVERSE
4283 || frame_id_eq (get_frame_id (frame),
4284 ecs->event_thread->step_frame_id)))
4285 {
4286 if (debug_infrun)
4287 fprintf_unfiltered
4288 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4289 paddress (gdbarch, ecs->event_thread->step_range_start),
4290 paddress (gdbarch, ecs->event_thread->step_range_end));
4291
4292 /* When stepping backward, stop at beginning of line range
4293 (unless it's the function entry point, in which case
4294 keep going back to the call point). */
4295 if (stop_pc == ecs->event_thread->step_range_start
4296 && stop_pc != ecs->stop_func_start
4297 && execution_direction == EXEC_REVERSE)
4298 {
4299 ecs->event_thread->stop_step = 1;
4300 print_stop_reason (END_STEPPING_RANGE, 0);
4301 stop_stepping (ecs);
4302 }
4303 else
4304 keep_going (ecs);
4305
4306 return;
4307 }
4308
4309 /* We stepped out of the stepping range. */
4310
4311 /* If we are stepping at the source level and entered the runtime
4312 loader dynamic symbol resolution code...
4313
4314 EXEC_FORWARD: we keep on single stepping until we exit the run
4315 time loader code and reach the callee's address.
4316
4317 EXEC_REVERSE: we've already executed the callee (backward), and
4318 the runtime loader code is handled just like any other
4319 undebuggable function call. Now we need only keep stepping
4320 backward through the trampoline code, and that's handled further
4321 down, so there is nothing for us to do here. */
4322
4323 if (execution_direction != EXEC_REVERSE
4324 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4325 && in_solib_dynsym_resolve_code (stop_pc))
4326 {
4327 CORE_ADDR pc_after_resolver =
4328 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4329
4330 if (debug_infrun)
4331 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4332
4333 if (pc_after_resolver)
4334 {
4335 /* Set up a step-resume breakpoint at the address
4336 indicated by SKIP_SOLIB_RESOLVER. */
4337 struct symtab_and_line sr_sal;
4338
4339 init_sal (&sr_sal);
4340 sr_sal.pc = pc_after_resolver;
4341 sr_sal.pspace = get_frame_program_space (frame);
4342
4343 insert_step_resume_breakpoint_at_sal (gdbarch,
4344 sr_sal, null_frame_id);
4345 }
4346
4347 keep_going (ecs);
4348 return;
4349 }
4350
4351 if (ecs->event_thread->step_range_end != 1
4352 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4353 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4354 && get_frame_type (frame) == SIGTRAMP_FRAME)
4355 {
4356 if (debug_infrun)
4357 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4358 /* The inferior, while doing a "step" or "next", has ended up in
4359 a signal trampoline (either by a signal being delivered or by
4360 the signal handler returning). Just single-step until the
4361 inferior leaves the trampoline (either by calling the handler
4362 or returning). */
4363 keep_going (ecs);
4364 return;
4365 }
4366
4367 /* Check for subroutine calls. The check for the current frame
4368 equalling the step ID is not necessary - the check of the
4369 previous frame's ID is sufficient - but it is a common case and
4370 cheaper than checking the previous frame's ID.
4371
4372 NOTE: frame_id_eq will never report two invalid frame IDs as
4373 being equal, so to get into this block, both the current and
4374 previous frame must have valid frame IDs. */
4375 /* The outer_frame_id check is a heuristic to detect stepping
4376 through startup code. If we step over an instruction which
4377 sets the stack pointer from an invalid value to a valid value,
4378 we may detect that as a subroutine call from the mythical
4379 "outermost" function. This could be fixed by marking
4380 outermost frames as !stack_p,code_p,special_p. Then the
4381 initial outermost frame, before sp was valid, would
4382 have code_addr == &_start. See the comment in frame_id_eq
4383 for more. */
4384 if (!frame_id_eq (get_stack_frame_id (frame),
4385 ecs->event_thread->step_stack_frame_id)
4386 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4387 ecs->event_thread->step_stack_frame_id)
4388 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4389 outer_frame_id)
4390 || step_start_function != find_pc_function (stop_pc))))
4391 {
4392 CORE_ADDR real_stop_pc;
4393
4394 if (debug_infrun)
4395 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4396
4397 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4398 || ((ecs->event_thread->step_range_end == 1)
4399 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4400 ecs->stop_func_start)))
4401 {
4402 /* I presume that step_over_calls is only 0 when we're
4403 supposed to be stepping at the assembly language level
4404 ("stepi"). Just stop. */
4405 /* Also, maybe we just did a "nexti" inside a prolog, so we
4406 thought it was a subroutine call but it was not. Stop as
4407 well. FENN */
4408 /* And this works the same backward as frontward. MVS */
4409 ecs->event_thread->stop_step = 1;
4410 print_stop_reason (END_STEPPING_RANGE, 0);
4411 stop_stepping (ecs);
4412 return;
4413 }
4414
4415 /* Reverse stepping through solib trampolines. */
4416
4417 if (execution_direction == EXEC_REVERSE
4418 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4419 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4420 || (ecs->stop_func_start == 0
4421 && in_solib_dynsym_resolve_code (stop_pc))))
4422 {
4423 /* Any solib trampoline code can be handled in reverse
4424 by simply continuing to single-step. We have already
4425 executed the solib function (backwards), and a few
4426 steps will take us back through the trampoline to the
4427 caller. */
4428 keep_going (ecs);
4429 return;
4430 }
4431
4432 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4433 {
4434 /* We're doing a "next".
4435
4436 Normal (forward) execution: set a breakpoint at the
4437 callee's return address (the address at which the caller
4438 will resume).
4439
4440 Reverse (backward) execution. set the step-resume
4441 breakpoint at the start of the function that we just
4442 stepped into (backwards), and continue to there. When we
4443 get there, we'll need to single-step back to the caller. */
4444
4445 if (execution_direction == EXEC_REVERSE)
4446 {
4447 struct symtab_and_line sr_sal;
4448
4449 /* Normal function call return (static or dynamic). */
4450 init_sal (&sr_sal);
4451 sr_sal.pc = ecs->stop_func_start;
4452 sr_sal.pspace = get_frame_program_space (frame);
4453 insert_step_resume_breakpoint_at_sal (gdbarch,
4454 sr_sal, null_frame_id);
4455 }
4456 else
4457 insert_step_resume_breakpoint_at_caller (frame);
4458
4459 keep_going (ecs);
4460 return;
4461 }
4462
4463 /* If we are in a function call trampoline (a stub between the
4464 calling routine and the real function), locate the real
4465 function. That's what tells us (a) whether we want to step
4466 into it at all, and (b) what prologue we want to run to the
4467 end of, if we do step into it. */
4468 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4469 if (real_stop_pc == 0)
4470 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4471 if (real_stop_pc != 0)
4472 ecs->stop_func_start = real_stop_pc;
4473
4474 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4475 {
4476 struct symtab_and_line sr_sal;
4477
4478 init_sal (&sr_sal);
4479 sr_sal.pc = ecs->stop_func_start;
4480 sr_sal.pspace = get_frame_program_space (frame);
4481
4482 insert_step_resume_breakpoint_at_sal (gdbarch,
4483 sr_sal, null_frame_id);
4484 keep_going (ecs);
4485 return;
4486 }
4487
4488 /* If we have line number information for the function we are
4489 thinking of stepping into, step into it.
4490
4491 If there are several symtabs at that PC (e.g. with include
4492 files), just want to know whether *any* of them have line
4493 numbers. find_pc_line handles this. */
4494 {
4495 struct symtab_and_line tmp_sal;
4496
4497 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4498 tmp_sal.pspace = get_frame_program_space (frame);
4499 if (tmp_sal.line != 0)
4500 {
4501 if (execution_direction == EXEC_REVERSE)
4502 handle_step_into_function_backward (gdbarch, ecs);
4503 else
4504 handle_step_into_function (gdbarch, ecs);
4505 return;
4506 }
4507 }
4508
4509 /* If we have no line number and the step-stop-if-no-debug is
4510 set, we stop the step so that the user has a chance to switch
4511 in assembly mode. */
4512 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4513 && step_stop_if_no_debug)
4514 {
4515 ecs->event_thread->stop_step = 1;
4516 print_stop_reason (END_STEPPING_RANGE, 0);
4517 stop_stepping (ecs);
4518 return;
4519 }
4520
4521 if (execution_direction == EXEC_REVERSE)
4522 {
4523 /* Set a breakpoint at callee's start address.
4524 From there we can step once and be back in the caller. */
4525 struct symtab_and_line sr_sal;
4526
4527 init_sal (&sr_sal);
4528 sr_sal.pc = ecs->stop_func_start;
4529 sr_sal.pspace = get_frame_program_space (frame);
4530 insert_step_resume_breakpoint_at_sal (gdbarch,
4531 sr_sal, null_frame_id);
4532 }
4533 else
4534 /* Set a breakpoint at callee's return address (the address
4535 at which the caller will resume). */
4536 insert_step_resume_breakpoint_at_caller (frame);
4537
4538 keep_going (ecs);
4539 return;
4540 }
4541
4542 /* Reverse stepping through solib trampolines. */
4543
4544 if (execution_direction == EXEC_REVERSE
4545 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4546 {
4547 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4548 || (ecs->stop_func_start == 0
4549 && in_solib_dynsym_resolve_code (stop_pc)))
4550 {
4551 /* Any solib trampoline code can be handled in reverse
4552 by simply continuing to single-step. We have already
4553 executed the solib function (backwards), and a few
4554 steps will take us back through the trampoline to the
4555 caller. */
4556 keep_going (ecs);
4557 return;
4558 }
4559 else if (in_solib_dynsym_resolve_code (stop_pc))
4560 {
4561 /* Stepped backward into the solib dynsym resolver.
4562 Set a breakpoint at its start and continue, then
4563 one more step will take us out. */
4564 struct symtab_and_line sr_sal;
4565
4566 init_sal (&sr_sal);
4567 sr_sal.pc = ecs->stop_func_start;
4568 sr_sal.pspace = get_frame_program_space (frame);
4569 insert_step_resume_breakpoint_at_sal (gdbarch,
4570 sr_sal, null_frame_id);
4571 keep_going (ecs);
4572 return;
4573 }
4574 }
4575
4576 /* If we're in the return path from a shared library trampoline,
4577 we want to proceed through the trampoline when stepping. */
4578 if (gdbarch_in_solib_return_trampoline (gdbarch,
4579 stop_pc, ecs->stop_func_name))
4580 {
4581 /* Determine where this trampoline returns. */
4582 CORE_ADDR real_stop_pc;
4583
4584 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4585
4586 if (debug_infrun)
4587 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4588
4589 /* Only proceed through if we know where it's going. */
4590 if (real_stop_pc)
4591 {
4592 /* And put the step-breakpoint there and go until there. */
4593 struct symtab_and_line sr_sal;
4594
4595 init_sal (&sr_sal); /* initialize to zeroes */
4596 sr_sal.pc = real_stop_pc;
4597 sr_sal.section = find_pc_overlay (sr_sal.pc);
4598 sr_sal.pspace = get_frame_program_space (frame);
4599
4600 /* Do not specify what the fp should be when we stop since
4601 on some machines the prologue is where the new fp value
4602 is established. */
4603 insert_step_resume_breakpoint_at_sal (gdbarch,
4604 sr_sal, null_frame_id);
4605
4606 /* Restart without fiddling with the step ranges or
4607 other state. */
4608 keep_going (ecs);
4609 return;
4610 }
4611 }
4612
4613 stop_pc_sal = find_pc_line (stop_pc, 0);
4614
4615 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4616 the trampoline processing logic, however, there are some trampolines
4617 that have no names, so we should do trampoline handling first. */
4618 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4619 && ecs->stop_func_name == NULL
4620 && stop_pc_sal.line == 0)
4621 {
4622 if (debug_infrun)
4623 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4624
4625 /* The inferior just stepped into, or returned to, an
4626 undebuggable function (where there is no debugging information
4627 and no line number corresponding to the address where the
4628 inferior stopped). Since we want to skip this kind of code,
4629 we keep going until the inferior returns from this
4630 function - unless the user has asked us not to (via
4631 set step-mode) or we no longer know how to get back
4632 to the call site. */
4633 if (step_stop_if_no_debug
4634 || !frame_id_p (frame_unwind_caller_id (frame)))
4635 {
4636 /* If we have no line number and the step-stop-if-no-debug
4637 is set, we stop the step so that the user has a chance to
4638 switch in assembly mode. */
4639 ecs->event_thread->stop_step = 1;
4640 print_stop_reason (END_STEPPING_RANGE, 0);
4641 stop_stepping (ecs);
4642 return;
4643 }
4644 else
4645 {
4646 /* Set a breakpoint at callee's return address (the address
4647 at which the caller will resume). */
4648 insert_step_resume_breakpoint_at_caller (frame);
4649 keep_going (ecs);
4650 return;
4651 }
4652 }
4653
4654 if (ecs->event_thread->step_range_end == 1)
4655 {
4656 /* It is stepi or nexti. We always want to stop stepping after
4657 one instruction. */
4658 if (debug_infrun)
4659 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4660 ecs->event_thread->stop_step = 1;
4661 print_stop_reason (END_STEPPING_RANGE, 0);
4662 stop_stepping (ecs);
4663 return;
4664 }
4665
4666 if (stop_pc_sal.line == 0)
4667 {
4668 /* We have no line number information. That means to stop
4669 stepping (does this always happen right after one instruction,
4670 when we do "s" in a function with no line numbers,
4671 or can this happen as a result of a return or longjmp?). */
4672 if (debug_infrun)
4673 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4674 ecs->event_thread->stop_step = 1;
4675 print_stop_reason (END_STEPPING_RANGE, 0);
4676 stop_stepping (ecs);
4677 return;
4678 }
4679
4680 /* Look for "calls" to inlined functions, part one. If the inline
4681 frame machinery detected some skipped call sites, we have entered
4682 a new inline function. */
4683
4684 if (frame_id_eq (get_frame_id (get_current_frame ()),
4685 ecs->event_thread->step_frame_id)
4686 && inline_skipped_frames (ecs->ptid))
4687 {
4688 struct symtab_and_line call_sal;
4689
4690 if (debug_infrun)
4691 fprintf_unfiltered (gdb_stdlog,
4692 "infrun: stepped into inlined function\n");
4693
4694 find_frame_sal (get_current_frame (), &call_sal);
4695
4696 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4697 {
4698 /* For "step", we're going to stop. But if the call site
4699 for this inlined function is on the same source line as
4700 we were previously stepping, go down into the function
4701 first. Otherwise stop at the call site. */
4702
4703 if (call_sal.line == ecs->event_thread->current_line
4704 && call_sal.symtab == ecs->event_thread->current_symtab)
4705 step_into_inline_frame (ecs->ptid);
4706
4707 ecs->event_thread->stop_step = 1;
4708 print_stop_reason (END_STEPPING_RANGE, 0);
4709 stop_stepping (ecs);
4710 return;
4711 }
4712 else
4713 {
4714 /* For "next", we should stop at the call site if it is on a
4715 different source line. Otherwise continue through the
4716 inlined function. */
4717 if (call_sal.line == ecs->event_thread->current_line
4718 && call_sal.symtab == ecs->event_thread->current_symtab)
4719 keep_going (ecs);
4720 else
4721 {
4722 ecs->event_thread->stop_step = 1;
4723 print_stop_reason (END_STEPPING_RANGE, 0);
4724 stop_stepping (ecs);
4725 }
4726 return;
4727 }
4728 }
4729
4730 /* Look for "calls" to inlined functions, part two. If we are still
4731 in the same real function we were stepping through, but we have
4732 to go further up to find the exact frame ID, we are stepping
4733 through a more inlined call beyond its call site. */
4734
4735 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4736 && !frame_id_eq (get_frame_id (get_current_frame ()),
4737 ecs->event_thread->step_frame_id)
4738 && stepped_in_from (get_current_frame (),
4739 ecs->event_thread->step_frame_id))
4740 {
4741 if (debug_infrun)
4742 fprintf_unfiltered (gdb_stdlog,
4743 "infrun: stepping through inlined function\n");
4744
4745 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4746 keep_going (ecs);
4747 else
4748 {
4749 ecs->event_thread->stop_step = 1;
4750 print_stop_reason (END_STEPPING_RANGE, 0);
4751 stop_stepping (ecs);
4752 }
4753 return;
4754 }
4755
4756 if ((stop_pc == stop_pc_sal.pc)
4757 && (ecs->event_thread->current_line != stop_pc_sal.line
4758 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4759 {
4760 /* We are at the start of a different line. So stop. Note that
4761 we don't stop if we step into the middle of a different line.
4762 That is said to make things like for (;;) statements work
4763 better. */
4764 if (debug_infrun)
4765 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4766 ecs->event_thread->stop_step = 1;
4767 print_stop_reason (END_STEPPING_RANGE, 0);
4768 stop_stepping (ecs);
4769 return;
4770 }
4771
4772 /* We aren't done stepping.
4773
4774 Optimize by setting the stepping range to the line.
4775 (We might not be in the original line, but if we entered a
4776 new line in mid-statement, we continue stepping. This makes
4777 things like for(;;) statements work better.) */
4778
4779 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4780 ecs->event_thread->step_range_end = stop_pc_sal.end;
4781 set_step_info (frame, stop_pc_sal);
4782
4783 if (debug_infrun)
4784 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4785 keep_going (ecs);
4786 }
4787
4788 /* Is thread TP in the middle of single-stepping? */
4789
4790 static int
4791 currently_stepping (struct thread_info *tp)
4792 {
4793 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4794 || tp->trap_expected
4795 || tp->stepping_through_solib_after_catch
4796 || bpstat_should_step ());
4797 }
4798
4799 /* Returns true if any thread *but* the one passed in "data" is in the
4800 middle of stepping or of handling a "next". */
4801
4802 static int
4803 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4804 {
4805 if (tp == data)
4806 return 0;
4807
4808 return (tp->step_range_end
4809 || tp->trap_expected
4810 || tp->stepping_through_solib_after_catch);
4811 }
4812
4813 /* Inferior has stepped into a subroutine call with source code that
4814 we should not step over. Do step to the first line of code in
4815 it. */
4816
4817 static void
4818 handle_step_into_function (struct gdbarch *gdbarch,
4819 struct execution_control_state *ecs)
4820 {
4821 struct symtab *s;
4822 struct symtab_and_line stop_func_sal, sr_sal;
4823
4824 s = find_pc_symtab (stop_pc);
4825 if (s && s->language != language_asm)
4826 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4827 ecs->stop_func_start);
4828
4829 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4830 /* Use the step_resume_break to step until the end of the prologue,
4831 even if that involves jumps (as it seems to on the vax under
4832 4.2). */
4833 /* If the prologue ends in the middle of a source line, continue to
4834 the end of that source line (if it is still within the function).
4835 Otherwise, just go to end of prologue. */
4836 if (stop_func_sal.end
4837 && stop_func_sal.pc != ecs->stop_func_start
4838 && stop_func_sal.end < ecs->stop_func_end)
4839 ecs->stop_func_start = stop_func_sal.end;
4840
4841 /* Architectures which require breakpoint adjustment might not be able
4842 to place a breakpoint at the computed address. If so, the test
4843 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4844 ecs->stop_func_start to an address at which a breakpoint may be
4845 legitimately placed.
4846
4847 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4848 made, GDB will enter an infinite loop when stepping through
4849 optimized code consisting of VLIW instructions which contain
4850 subinstructions corresponding to different source lines. On
4851 FR-V, it's not permitted to place a breakpoint on any but the
4852 first subinstruction of a VLIW instruction. When a breakpoint is
4853 set, GDB will adjust the breakpoint address to the beginning of
4854 the VLIW instruction. Thus, we need to make the corresponding
4855 adjustment here when computing the stop address. */
4856
4857 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4858 {
4859 ecs->stop_func_start
4860 = gdbarch_adjust_breakpoint_address (gdbarch,
4861 ecs->stop_func_start);
4862 }
4863
4864 if (ecs->stop_func_start == stop_pc)
4865 {
4866 /* We are already there: stop now. */
4867 ecs->event_thread->stop_step = 1;
4868 print_stop_reason (END_STEPPING_RANGE, 0);
4869 stop_stepping (ecs);
4870 return;
4871 }
4872 else
4873 {
4874 /* Put the step-breakpoint there and go until there. */
4875 init_sal (&sr_sal); /* initialize to zeroes */
4876 sr_sal.pc = ecs->stop_func_start;
4877 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4878 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4879
4880 /* Do not specify what the fp should be when we stop since on
4881 some machines the prologue is where the new fp value is
4882 established. */
4883 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4884
4885 /* And make sure stepping stops right away then. */
4886 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4887 }
4888 keep_going (ecs);
4889 }
4890
4891 /* Inferior has stepped backward into a subroutine call with source
4892 code that we should not step over. Do step to the beginning of the
4893 last line of code in it. */
4894
4895 static void
4896 handle_step_into_function_backward (struct gdbarch *gdbarch,
4897 struct execution_control_state *ecs)
4898 {
4899 struct symtab *s;
4900 struct symtab_and_line stop_func_sal;
4901
4902 s = find_pc_symtab (stop_pc);
4903 if (s && s->language != language_asm)
4904 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4905 ecs->stop_func_start);
4906
4907 stop_func_sal = find_pc_line (stop_pc, 0);
4908
4909 /* OK, we're just going to keep stepping here. */
4910 if (stop_func_sal.pc == stop_pc)
4911 {
4912 /* We're there already. Just stop stepping now. */
4913 ecs->event_thread->stop_step = 1;
4914 print_stop_reason (END_STEPPING_RANGE, 0);
4915 stop_stepping (ecs);
4916 }
4917 else
4918 {
4919 /* Else just reset the step range and keep going.
4920 No step-resume breakpoint, they don't work for
4921 epilogues, which can have multiple entry paths. */
4922 ecs->event_thread->step_range_start = stop_func_sal.pc;
4923 ecs->event_thread->step_range_end = stop_func_sal.end;
4924 keep_going (ecs);
4925 }
4926 return;
4927 }
4928
4929 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4930 This is used to both functions and to skip over code. */
4931
4932 static void
4933 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4934 struct symtab_and_line sr_sal,
4935 struct frame_id sr_id)
4936 {
4937 /* There should never be more than one step-resume or longjmp-resume
4938 breakpoint per thread, so we should never be setting a new
4939 step_resume_breakpoint when one is already active. */
4940 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4941
4942 if (debug_infrun)
4943 fprintf_unfiltered (gdb_stdlog,
4944 "infrun: inserting step-resume breakpoint at %s\n",
4945 paddress (gdbarch, sr_sal.pc));
4946
4947 inferior_thread ()->step_resume_breakpoint
4948 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4949 }
4950
4951 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4952 to skip a potential signal handler.
4953
4954 This is called with the interrupted function's frame. The signal
4955 handler, when it returns, will resume the interrupted function at
4956 RETURN_FRAME.pc. */
4957
4958 static void
4959 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4960 {
4961 struct symtab_and_line sr_sal;
4962 struct gdbarch *gdbarch;
4963
4964 gdb_assert (return_frame != NULL);
4965 init_sal (&sr_sal); /* initialize to zeros */
4966
4967 gdbarch = get_frame_arch (return_frame);
4968 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4969 sr_sal.section = find_pc_overlay (sr_sal.pc);
4970 sr_sal.pspace = get_frame_program_space (return_frame);
4971
4972 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4973 get_stack_frame_id (return_frame));
4974 }
4975
4976 /* Similar to insert_step_resume_breakpoint_at_frame, except
4977 but a breakpoint at the previous frame's PC. This is used to
4978 skip a function after stepping into it (for "next" or if the called
4979 function has no debugging information).
4980
4981 The current function has almost always been reached by single
4982 stepping a call or return instruction. NEXT_FRAME belongs to the
4983 current function, and the breakpoint will be set at the caller's
4984 resume address.
4985
4986 This is a separate function rather than reusing
4987 insert_step_resume_breakpoint_at_frame in order to avoid
4988 get_prev_frame, which may stop prematurely (see the implementation
4989 of frame_unwind_caller_id for an example). */
4990
4991 static void
4992 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4993 {
4994 struct symtab_and_line sr_sal;
4995 struct gdbarch *gdbarch;
4996
4997 /* We shouldn't have gotten here if we don't know where the call site
4998 is. */
4999 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5000
5001 init_sal (&sr_sal); /* initialize to zeros */
5002
5003 gdbarch = frame_unwind_caller_arch (next_frame);
5004 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5005 frame_unwind_caller_pc (next_frame));
5006 sr_sal.section = find_pc_overlay (sr_sal.pc);
5007 sr_sal.pspace = frame_unwind_program_space (next_frame);
5008
5009 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5010 frame_unwind_caller_id (next_frame));
5011 }
5012
5013 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5014 new breakpoint at the target of a jmp_buf. The handling of
5015 longjmp-resume uses the same mechanisms used for handling
5016 "step-resume" breakpoints. */
5017
5018 static void
5019 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5020 {
5021 /* There should never be more than one step-resume or longjmp-resume
5022 breakpoint per thread, so we should never be setting a new
5023 longjmp_resume_breakpoint when one is already active. */
5024 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5025
5026 if (debug_infrun)
5027 fprintf_unfiltered (gdb_stdlog,
5028 "infrun: inserting longjmp-resume breakpoint at %s\n",
5029 paddress (gdbarch, pc));
5030
5031 inferior_thread ()->step_resume_breakpoint =
5032 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5033 }
5034
5035 static void
5036 stop_stepping (struct execution_control_state *ecs)
5037 {
5038 if (debug_infrun)
5039 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5040
5041 /* Let callers know we don't want to wait for the inferior anymore. */
5042 ecs->wait_some_more = 0;
5043 }
5044
5045 /* This function handles various cases where we need to continue
5046 waiting for the inferior. */
5047 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5048
5049 static void
5050 keep_going (struct execution_control_state *ecs)
5051 {
5052 /* Make sure normal_stop is called if we get a QUIT handled before
5053 reaching resume. */
5054 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5055
5056 /* Save the pc before execution, to compare with pc after stop. */
5057 ecs->event_thread->prev_pc
5058 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5059
5060 /* If we did not do break;, it means we should keep running the
5061 inferior and not return to debugger. */
5062
5063 if (ecs->event_thread->trap_expected
5064 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5065 {
5066 /* We took a signal (which we are supposed to pass through to
5067 the inferior, else we'd not get here) and we haven't yet
5068 gotten our trap. Simply continue. */
5069
5070 discard_cleanups (old_cleanups);
5071 resume (currently_stepping (ecs->event_thread),
5072 ecs->event_thread->stop_signal);
5073 }
5074 else
5075 {
5076 /* Either the trap was not expected, but we are continuing
5077 anyway (the user asked that this signal be passed to the
5078 child)
5079 -- or --
5080 The signal was SIGTRAP, e.g. it was our signal, but we
5081 decided we should resume from it.
5082
5083 We're going to run this baby now!
5084
5085 Note that insert_breakpoints won't try to re-insert
5086 already inserted breakpoints. Therefore, we don't
5087 care if breakpoints were already inserted, or not. */
5088
5089 if (ecs->event_thread->stepping_over_breakpoint)
5090 {
5091 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5092
5093 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5094 /* Since we can't do a displaced step, we have to remove
5095 the breakpoint while we step it. To keep things
5096 simple, we remove them all. */
5097 remove_breakpoints ();
5098 }
5099 else
5100 {
5101 struct gdb_exception e;
5102
5103 /* Stop stepping when inserting breakpoints
5104 has failed. */
5105 TRY_CATCH (e, RETURN_MASK_ERROR)
5106 {
5107 insert_breakpoints ();
5108 }
5109 if (e.reason < 0)
5110 {
5111 exception_print (gdb_stderr, e);
5112 stop_stepping (ecs);
5113 return;
5114 }
5115 }
5116
5117 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5118
5119 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5120 specifies that such a signal should be delivered to the
5121 target program).
5122
5123 Typically, this would occure when a user is debugging a
5124 target monitor on a simulator: the target monitor sets a
5125 breakpoint; the simulator encounters this break-point and
5126 halts the simulation handing control to GDB; GDB, noteing
5127 that the break-point isn't valid, returns control back to the
5128 simulator; the simulator then delivers the hardware
5129 equivalent of a SIGNAL_TRAP to the program being debugged. */
5130
5131 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5132 && !signal_program[ecs->event_thread->stop_signal])
5133 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5134
5135 discard_cleanups (old_cleanups);
5136 resume (currently_stepping (ecs->event_thread),
5137 ecs->event_thread->stop_signal);
5138 }
5139
5140 prepare_to_wait (ecs);
5141 }
5142
5143 /* This function normally comes after a resume, before
5144 handle_inferior_event exits. It takes care of any last bits of
5145 housekeeping, and sets the all-important wait_some_more flag. */
5146
5147 static void
5148 prepare_to_wait (struct execution_control_state *ecs)
5149 {
5150 if (debug_infrun)
5151 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5152
5153 /* This is the old end of the while loop. Let everybody know we
5154 want to wait for the inferior some more and get called again
5155 soon. */
5156 ecs->wait_some_more = 1;
5157 }
5158
5159 /* Print why the inferior has stopped. We always print something when
5160 the inferior exits, or receives a signal. The rest of the cases are
5161 dealt with later on in normal_stop() and print_it_typical(). Ideally
5162 there should be a call to this function from handle_inferior_event()
5163 each time stop_stepping() is called.*/
5164 static void
5165 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5166 {
5167 switch (stop_reason)
5168 {
5169 case END_STEPPING_RANGE:
5170 /* We are done with a step/next/si/ni command. */
5171 /* For now print nothing. */
5172 /* Print a message only if not in the middle of doing a "step n"
5173 operation for n > 1 */
5174 if (!inferior_thread ()->step_multi
5175 || !inferior_thread ()->stop_step)
5176 if (ui_out_is_mi_like_p (uiout))
5177 ui_out_field_string
5178 (uiout, "reason",
5179 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5180 break;
5181 case SIGNAL_EXITED:
5182 /* The inferior was terminated by a signal. */
5183 annotate_signalled ();
5184 if (ui_out_is_mi_like_p (uiout))
5185 ui_out_field_string
5186 (uiout, "reason",
5187 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5188 ui_out_text (uiout, "\nProgram terminated with signal ");
5189 annotate_signal_name ();
5190 ui_out_field_string (uiout, "signal-name",
5191 target_signal_to_name (stop_info));
5192 annotate_signal_name_end ();
5193 ui_out_text (uiout, ", ");
5194 annotate_signal_string ();
5195 ui_out_field_string (uiout, "signal-meaning",
5196 target_signal_to_string (stop_info));
5197 annotate_signal_string_end ();
5198 ui_out_text (uiout, ".\n");
5199 ui_out_text (uiout, "The program no longer exists.\n");
5200 break;
5201 case EXITED:
5202 /* The inferior program is finished. */
5203 annotate_exited (stop_info);
5204 if (stop_info)
5205 {
5206 if (ui_out_is_mi_like_p (uiout))
5207 ui_out_field_string (uiout, "reason",
5208 async_reason_lookup (EXEC_ASYNC_EXITED));
5209 ui_out_text (uiout, "\nProgram exited with code ");
5210 ui_out_field_fmt (uiout, "exit-code", "0%o",
5211 (unsigned int) stop_info);
5212 ui_out_text (uiout, ".\n");
5213 }
5214 else
5215 {
5216 if (ui_out_is_mi_like_p (uiout))
5217 ui_out_field_string
5218 (uiout, "reason",
5219 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5220 ui_out_text (uiout, "\nProgram exited normally.\n");
5221 }
5222 /* Support the --return-child-result option. */
5223 return_child_result_value = stop_info;
5224 break;
5225 case SIGNAL_RECEIVED:
5226 /* Signal received. The signal table tells us to print about
5227 it. */
5228 annotate_signal ();
5229
5230 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5231 {
5232 struct thread_info *t = inferior_thread ();
5233
5234 ui_out_text (uiout, "\n[");
5235 ui_out_field_string (uiout, "thread-name",
5236 target_pid_to_str (t->ptid));
5237 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5238 ui_out_text (uiout, " stopped");
5239 }
5240 else
5241 {
5242 ui_out_text (uiout, "\nProgram received signal ");
5243 annotate_signal_name ();
5244 if (ui_out_is_mi_like_p (uiout))
5245 ui_out_field_string
5246 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5247 ui_out_field_string (uiout, "signal-name",
5248 target_signal_to_name (stop_info));
5249 annotate_signal_name_end ();
5250 ui_out_text (uiout, ", ");
5251 annotate_signal_string ();
5252 ui_out_field_string (uiout, "signal-meaning",
5253 target_signal_to_string (stop_info));
5254 annotate_signal_string_end ();
5255 }
5256 ui_out_text (uiout, ".\n");
5257 break;
5258 case NO_HISTORY:
5259 /* Reverse execution: target ran out of history info. */
5260 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5261 break;
5262 default:
5263 internal_error (__FILE__, __LINE__,
5264 _("print_stop_reason: unrecognized enum value"));
5265 break;
5266 }
5267 }
5268 \f
5269
5270 /* Here to return control to GDB when the inferior stops for real.
5271 Print appropriate messages, remove breakpoints, give terminal our modes.
5272
5273 STOP_PRINT_FRAME nonzero means print the executing frame
5274 (pc, function, args, file, line number and line text).
5275 BREAKPOINTS_FAILED nonzero means stop was due to error
5276 attempting to insert breakpoints. */
5277
5278 void
5279 normal_stop (void)
5280 {
5281 struct target_waitstatus last;
5282 ptid_t last_ptid;
5283 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5284
5285 get_last_target_status (&last_ptid, &last);
5286
5287 /* If an exception is thrown from this point on, make sure to
5288 propagate GDB's knowledge of the executing state to the
5289 frontend/user running state. A QUIT is an easy exception to see
5290 here, so do this before any filtered output. */
5291 if (!non_stop)
5292 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5293 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5294 && last.kind != TARGET_WAITKIND_EXITED)
5295 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5296
5297 /* In non-stop mode, we don't want GDB to switch threads behind the
5298 user's back, to avoid races where the user is typing a command to
5299 apply to thread x, but GDB switches to thread y before the user
5300 finishes entering the command. */
5301
5302 /* As with the notification of thread events, we want to delay
5303 notifying the user that we've switched thread context until
5304 the inferior actually stops.
5305
5306 There's no point in saying anything if the inferior has exited.
5307 Note that SIGNALLED here means "exited with a signal", not
5308 "received a signal". */
5309 if (!non_stop
5310 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5311 && target_has_execution
5312 && last.kind != TARGET_WAITKIND_SIGNALLED
5313 && last.kind != TARGET_WAITKIND_EXITED)
5314 {
5315 target_terminal_ours_for_output ();
5316 printf_filtered (_("[Switching to %s]\n"),
5317 target_pid_to_str (inferior_ptid));
5318 annotate_thread_changed ();
5319 previous_inferior_ptid = inferior_ptid;
5320 }
5321
5322 if (!breakpoints_always_inserted_mode () && target_has_execution)
5323 {
5324 if (remove_breakpoints ())
5325 {
5326 target_terminal_ours_for_output ();
5327 printf_filtered (_("\
5328 Cannot remove breakpoints because program is no longer writable.\n\
5329 Further execution is probably impossible.\n"));
5330 }
5331 }
5332
5333 /* If an auto-display called a function and that got a signal,
5334 delete that auto-display to avoid an infinite recursion. */
5335
5336 if (stopped_by_random_signal)
5337 disable_current_display ();
5338
5339 /* Don't print a message if in the middle of doing a "step n"
5340 operation for n > 1 */
5341 if (target_has_execution
5342 && last.kind != TARGET_WAITKIND_SIGNALLED
5343 && last.kind != TARGET_WAITKIND_EXITED
5344 && inferior_thread ()->step_multi
5345 && inferior_thread ()->stop_step)
5346 goto done;
5347
5348 target_terminal_ours ();
5349
5350 /* Set the current source location. This will also happen if we
5351 display the frame below, but the current SAL will be incorrect
5352 during a user hook-stop function. */
5353 if (has_stack_frames () && !stop_stack_dummy)
5354 set_current_sal_from_frame (get_current_frame (), 1);
5355
5356 /* Let the user/frontend see the threads as stopped. */
5357 do_cleanups (old_chain);
5358
5359 /* Look up the hook_stop and run it (CLI internally handles problem
5360 of stop_command's pre-hook not existing). */
5361 if (stop_command)
5362 catch_errors (hook_stop_stub, stop_command,
5363 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5364
5365 if (!has_stack_frames ())
5366 goto done;
5367
5368 if (last.kind == TARGET_WAITKIND_SIGNALLED
5369 || last.kind == TARGET_WAITKIND_EXITED)
5370 goto done;
5371
5372 /* Select innermost stack frame - i.e., current frame is frame 0,
5373 and current location is based on that.
5374 Don't do this on return from a stack dummy routine,
5375 or if the program has exited. */
5376
5377 if (!stop_stack_dummy)
5378 {
5379 select_frame (get_current_frame ());
5380
5381 /* Print current location without a level number, if
5382 we have changed functions or hit a breakpoint.
5383 Print source line if we have one.
5384 bpstat_print() contains the logic deciding in detail
5385 what to print, based on the event(s) that just occurred. */
5386
5387 /* If --batch-silent is enabled then there's no need to print the current
5388 source location, and to try risks causing an error message about
5389 missing source files. */
5390 if (stop_print_frame && !batch_silent)
5391 {
5392 int bpstat_ret;
5393 int source_flag;
5394 int do_frame_printing = 1;
5395 struct thread_info *tp = inferior_thread ();
5396
5397 bpstat_ret = bpstat_print (tp->stop_bpstat);
5398 switch (bpstat_ret)
5399 {
5400 case PRINT_UNKNOWN:
5401 /* If we had hit a shared library event breakpoint,
5402 bpstat_print would print out this message. If we hit
5403 an OS-level shared library event, do the same
5404 thing. */
5405 if (last.kind == TARGET_WAITKIND_LOADED)
5406 {
5407 printf_filtered (_("Stopped due to shared library event\n"));
5408 source_flag = SRC_LINE; /* something bogus */
5409 do_frame_printing = 0;
5410 break;
5411 }
5412
5413 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5414 (or should) carry around the function and does (or
5415 should) use that when doing a frame comparison. */
5416 if (tp->stop_step
5417 && frame_id_eq (tp->step_frame_id,
5418 get_frame_id (get_current_frame ()))
5419 && step_start_function == find_pc_function (stop_pc))
5420 source_flag = SRC_LINE; /* finished step, just print source line */
5421 else
5422 source_flag = SRC_AND_LOC; /* print location and source line */
5423 break;
5424 case PRINT_SRC_AND_LOC:
5425 source_flag = SRC_AND_LOC; /* print location and source line */
5426 break;
5427 case PRINT_SRC_ONLY:
5428 source_flag = SRC_LINE;
5429 break;
5430 case PRINT_NOTHING:
5431 source_flag = SRC_LINE; /* something bogus */
5432 do_frame_printing = 0;
5433 break;
5434 default:
5435 internal_error (__FILE__, __LINE__, _("Unknown value."));
5436 }
5437
5438 /* The behavior of this routine with respect to the source
5439 flag is:
5440 SRC_LINE: Print only source line
5441 LOCATION: Print only location
5442 SRC_AND_LOC: Print location and source line */
5443 if (do_frame_printing)
5444 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5445
5446 /* Display the auto-display expressions. */
5447 do_displays ();
5448 }
5449 }
5450
5451 /* Save the function value return registers, if we care.
5452 We might be about to restore their previous contents. */
5453 if (inferior_thread ()->proceed_to_finish)
5454 {
5455 /* This should not be necessary. */
5456 if (stop_registers)
5457 regcache_xfree (stop_registers);
5458
5459 /* NB: The copy goes through to the target picking up the value of
5460 all the registers. */
5461 stop_registers = regcache_dup (get_current_regcache ());
5462 }
5463
5464 if (stop_stack_dummy == STOP_STACK_DUMMY)
5465 {
5466 /* Pop the empty frame that contains the stack dummy.
5467 This also restores inferior state prior to the call
5468 (struct inferior_thread_state). */
5469 struct frame_info *frame = get_current_frame ();
5470
5471 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5472 frame_pop (frame);
5473 /* frame_pop() calls reinit_frame_cache as the last thing it does
5474 which means there's currently no selected frame. We don't need
5475 to re-establish a selected frame if the dummy call returns normally,
5476 that will be done by restore_inferior_status. However, we do have
5477 to handle the case where the dummy call is returning after being
5478 stopped (e.g. the dummy call previously hit a breakpoint). We
5479 can't know which case we have so just always re-establish a
5480 selected frame here. */
5481 select_frame (get_current_frame ());
5482 }
5483
5484 done:
5485 annotate_stopped ();
5486
5487 /* Suppress the stop observer if we're in the middle of:
5488
5489 - a step n (n > 1), as there still more steps to be done.
5490
5491 - a "finish" command, as the observer will be called in
5492 finish_command_continuation, so it can include the inferior
5493 function's return value.
5494
5495 - calling an inferior function, as we pretend we inferior didn't
5496 run at all. The return value of the call is handled by the
5497 expression evaluator, through call_function_by_hand. */
5498
5499 if (!target_has_execution
5500 || last.kind == TARGET_WAITKIND_SIGNALLED
5501 || last.kind == TARGET_WAITKIND_EXITED
5502 || (!inferior_thread ()->step_multi
5503 && !(inferior_thread ()->stop_bpstat
5504 && inferior_thread ()->proceed_to_finish)
5505 && !inferior_thread ()->in_infcall))
5506 {
5507 if (!ptid_equal (inferior_ptid, null_ptid))
5508 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5509 stop_print_frame);
5510 else
5511 observer_notify_normal_stop (NULL, stop_print_frame);
5512 }
5513
5514 if (target_has_execution)
5515 {
5516 if (last.kind != TARGET_WAITKIND_SIGNALLED
5517 && last.kind != TARGET_WAITKIND_EXITED)
5518 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5519 Delete any breakpoint that is to be deleted at the next stop. */
5520 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5521 }
5522
5523 /* Try to get rid of automatically added inferiors that are no
5524 longer needed. Keeping those around slows down things linearly.
5525 Note that this never removes the current inferior. */
5526 prune_inferiors ();
5527 }
5528
5529 static int
5530 hook_stop_stub (void *cmd)
5531 {
5532 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5533 return (0);
5534 }
5535 \f
5536 int
5537 signal_stop_state (int signo)
5538 {
5539 return signal_stop[signo];
5540 }
5541
5542 int
5543 signal_print_state (int signo)
5544 {
5545 return signal_print[signo];
5546 }
5547
5548 int
5549 signal_pass_state (int signo)
5550 {
5551 return signal_program[signo];
5552 }
5553
5554 int
5555 signal_stop_update (int signo, int state)
5556 {
5557 int ret = signal_stop[signo];
5558
5559 signal_stop[signo] = state;
5560 return ret;
5561 }
5562
5563 int
5564 signal_print_update (int signo, int state)
5565 {
5566 int ret = signal_print[signo];
5567
5568 signal_print[signo] = state;
5569 return ret;
5570 }
5571
5572 int
5573 signal_pass_update (int signo, int state)
5574 {
5575 int ret = signal_program[signo];
5576
5577 signal_program[signo] = state;
5578 return ret;
5579 }
5580
5581 static void
5582 sig_print_header (void)
5583 {
5584 printf_filtered (_("\
5585 Signal Stop\tPrint\tPass to program\tDescription\n"));
5586 }
5587
5588 static void
5589 sig_print_info (enum target_signal oursig)
5590 {
5591 const char *name = target_signal_to_name (oursig);
5592 int name_padding = 13 - strlen (name);
5593
5594 if (name_padding <= 0)
5595 name_padding = 0;
5596
5597 printf_filtered ("%s", name);
5598 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5599 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5600 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5601 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5602 printf_filtered ("%s\n", target_signal_to_string (oursig));
5603 }
5604
5605 /* Specify how various signals in the inferior should be handled. */
5606
5607 static void
5608 handle_command (char *args, int from_tty)
5609 {
5610 char **argv;
5611 int digits, wordlen;
5612 int sigfirst, signum, siglast;
5613 enum target_signal oursig;
5614 int allsigs;
5615 int nsigs;
5616 unsigned char *sigs;
5617 struct cleanup *old_chain;
5618
5619 if (args == NULL)
5620 {
5621 error_no_arg (_("signal to handle"));
5622 }
5623
5624 /* Allocate and zero an array of flags for which signals to handle. */
5625
5626 nsigs = (int) TARGET_SIGNAL_LAST;
5627 sigs = (unsigned char *) alloca (nsigs);
5628 memset (sigs, 0, nsigs);
5629
5630 /* Break the command line up into args. */
5631
5632 argv = gdb_buildargv (args);
5633 old_chain = make_cleanup_freeargv (argv);
5634
5635 /* Walk through the args, looking for signal oursigs, signal names, and
5636 actions. Signal numbers and signal names may be interspersed with
5637 actions, with the actions being performed for all signals cumulatively
5638 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5639
5640 while (*argv != NULL)
5641 {
5642 wordlen = strlen (*argv);
5643 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5644 {;
5645 }
5646 allsigs = 0;
5647 sigfirst = siglast = -1;
5648
5649 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5650 {
5651 /* Apply action to all signals except those used by the
5652 debugger. Silently skip those. */
5653 allsigs = 1;
5654 sigfirst = 0;
5655 siglast = nsigs - 1;
5656 }
5657 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5658 {
5659 SET_SIGS (nsigs, sigs, signal_stop);
5660 SET_SIGS (nsigs, sigs, signal_print);
5661 }
5662 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5663 {
5664 UNSET_SIGS (nsigs, sigs, signal_program);
5665 }
5666 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5667 {
5668 SET_SIGS (nsigs, sigs, signal_print);
5669 }
5670 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5671 {
5672 SET_SIGS (nsigs, sigs, signal_program);
5673 }
5674 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5675 {
5676 UNSET_SIGS (nsigs, sigs, signal_stop);
5677 }
5678 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5679 {
5680 SET_SIGS (nsigs, sigs, signal_program);
5681 }
5682 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5683 {
5684 UNSET_SIGS (nsigs, sigs, signal_print);
5685 UNSET_SIGS (nsigs, sigs, signal_stop);
5686 }
5687 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5688 {
5689 UNSET_SIGS (nsigs, sigs, signal_program);
5690 }
5691 else if (digits > 0)
5692 {
5693 /* It is numeric. The numeric signal refers to our own
5694 internal signal numbering from target.h, not to host/target
5695 signal number. This is a feature; users really should be
5696 using symbolic names anyway, and the common ones like
5697 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5698
5699 sigfirst = siglast = (int)
5700 target_signal_from_command (atoi (*argv));
5701 if ((*argv)[digits] == '-')
5702 {
5703 siglast = (int)
5704 target_signal_from_command (atoi ((*argv) + digits + 1));
5705 }
5706 if (sigfirst > siglast)
5707 {
5708 /* Bet he didn't figure we'd think of this case... */
5709 signum = sigfirst;
5710 sigfirst = siglast;
5711 siglast = signum;
5712 }
5713 }
5714 else
5715 {
5716 oursig = target_signal_from_name (*argv);
5717 if (oursig != TARGET_SIGNAL_UNKNOWN)
5718 {
5719 sigfirst = siglast = (int) oursig;
5720 }
5721 else
5722 {
5723 /* Not a number and not a recognized flag word => complain. */
5724 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5725 }
5726 }
5727
5728 /* If any signal numbers or symbol names were found, set flags for
5729 which signals to apply actions to. */
5730
5731 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5732 {
5733 switch ((enum target_signal) signum)
5734 {
5735 case TARGET_SIGNAL_TRAP:
5736 case TARGET_SIGNAL_INT:
5737 if (!allsigs && !sigs[signum])
5738 {
5739 if (query (_("%s is used by the debugger.\n\
5740 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5741 {
5742 sigs[signum] = 1;
5743 }
5744 else
5745 {
5746 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5747 gdb_flush (gdb_stdout);
5748 }
5749 }
5750 break;
5751 case TARGET_SIGNAL_0:
5752 case TARGET_SIGNAL_DEFAULT:
5753 case TARGET_SIGNAL_UNKNOWN:
5754 /* Make sure that "all" doesn't print these. */
5755 break;
5756 default:
5757 sigs[signum] = 1;
5758 break;
5759 }
5760 }
5761
5762 argv++;
5763 }
5764
5765 for (signum = 0; signum < nsigs; signum++)
5766 if (sigs[signum])
5767 {
5768 target_notice_signals (inferior_ptid);
5769
5770 if (from_tty)
5771 {
5772 /* Show the results. */
5773 sig_print_header ();
5774 for (; signum < nsigs; signum++)
5775 if (sigs[signum])
5776 sig_print_info (signum);
5777 }
5778
5779 break;
5780 }
5781
5782 do_cleanups (old_chain);
5783 }
5784
5785 static void
5786 xdb_handle_command (char *args, int from_tty)
5787 {
5788 char **argv;
5789 struct cleanup *old_chain;
5790
5791 if (args == NULL)
5792 error_no_arg (_("xdb command"));
5793
5794 /* Break the command line up into args. */
5795
5796 argv = gdb_buildargv (args);
5797 old_chain = make_cleanup_freeargv (argv);
5798 if (argv[1] != (char *) NULL)
5799 {
5800 char *argBuf;
5801 int bufLen;
5802
5803 bufLen = strlen (argv[0]) + 20;
5804 argBuf = (char *) xmalloc (bufLen);
5805 if (argBuf)
5806 {
5807 int validFlag = 1;
5808 enum target_signal oursig;
5809
5810 oursig = target_signal_from_name (argv[0]);
5811 memset (argBuf, 0, bufLen);
5812 if (strcmp (argv[1], "Q") == 0)
5813 sprintf (argBuf, "%s %s", argv[0], "noprint");
5814 else
5815 {
5816 if (strcmp (argv[1], "s") == 0)
5817 {
5818 if (!signal_stop[oursig])
5819 sprintf (argBuf, "%s %s", argv[0], "stop");
5820 else
5821 sprintf (argBuf, "%s %s", argv[0], "nostop");
5822 }
5823 else if (strcmp (argv[1], "i") == 0)
5824 {
5825 if (!signal_program[oursig])
5826 sprintf (argBuf, "%s %s", argv[0], "pass");
5827 else
5828 sprintf (argBuf, "%s %s", argv[0], "nopass");
5829 }
5830 else if (strcmp (argv[1], "r") == 0)
5831 {
5832 if (!signal_print[oursig])
5833 sprintf (argBuf, "%s %s", argv[0], "print");
5834 else
5835 sprintf (argBuf, "%s %s", argv[0], "noprint");
5836 }
5837 else
5838 validFlag = 0;
5839 }
5840 if (validFlag)
5841 handle_command (argBuf, from_tty);
5842 else
5843 printf_filtered (_("Invalid signal handling flag.\n"));
5844 if (argBuf)
5845 xfree (argBuf);
5846 }
5847 }
5848 do_cleanups (old_chain);
5849 }
5850
5851 /* Print current contents of the tables set by the handle command.
5852 It is possible we should just be printing signals actually used
5853 by the current target (but for things to work right when switching
5854 targets, all signals should be in the signal tables). */
5855
5856 static void
5857 signals_info (char *signum_exp, int from_tty)
5858 {
5859 enum target_signal oursig;
5860
5861 sig_print_header ();
5862
5863 if (signum_exp)
5864 {
5865 /* First see if this is a symbol name. */
5866 oursig = target_signal_from_name (signum_exp);
5867 if (oursig == TARGET_SIGNAL_UNKNOWN)
5868 {
5869 /* No, try numeric. */
5870 oursig =
5871 target_signal_from_command (parse_and_eval_long (signum_exp));
5872 }
5873 sig_print_info (oursig);
5874 return;
5875 }
5876
5877 printf_filtered ("\n");
5878 /* These ugly casts brought to you by the native VAX compiler. */
5879 for (oursig = TARGET_SIGNAL_FIRST;
5880 (int) oursig < (int) TARGET_SIGNAL_LAST;
5881 oursig = (enum target_signal) ((int) oursig + 1))
5882 {
5883 QUIT;
5884
5885 if (oursig != TARGET_SIGNAL_UNKNOWN
5886 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5887 sig_print_info (oursig);
5888 }
5889
5890 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5891 }
5892
5893 /* The $_siginfo convenience variable is a bit special. We don't know
5894 for sure the type of the value until we actually have a chance to
5895 fetch the data. The type can change depending on gdbarch, so it it
5896 also dependent on which thread you have selected.
5897
5898 1. making $_siginfo be an internalvar that creates a new value on
5899 access.
5900
5901 2. making the value of $_siginfo be an lval_computed value. */
5902
5903 /* This function implements the lval_computed support for reading a
5904 $_siginfo value. */
5905
5906 static void
5907 siginfo_value_read (struct value *v)
5908 {
5909 LONGEST transferred;
5910
5911 transferred =
5912 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5913 NULL,
5914 value_contents_all_raw (v),
5915 value_offset (v),
5916 TYPE_LENGTH (value_type (v)));
5917
5918 if (transferred != TYPE_LENGTH (value_type (v)))
5919 error (_("Unable to read siginfo"));
5920 }
5921
5922 /* This function implements the lval_computed support for writing a
5923 $_siginfo value. */
5924
5925 static void
5926 siginfo_value_write (struct value *v, struct value *fromval)
5927 {
5928 LONGEST transferred;
5929
5930 transferred = target_write (&current_target,
5931 TARGET_OBJECT_SIGNAL_INFO,
5932 NULL,
5933 value_contents_all_raw (fromval),
5934 value_offset (v),
5935 TYPE_LENGTH (value_type (fromval)));
5936
5937 if (transferred != TYPE_LENGTH (value_type (fromval)))
5938 error (_("Unable to write siginfo"));
5939 }
5940
5941 static struct lval_funcs siginfo_value_funcs =
5942 {
5943 siginfo_value_read,
5944 siginfo_value_write
5945 };
5946
5947 /* Return a new value with the correct type for the siginfo object of
5948 the current thread using architecture GDBARCH. Return a void value
5949 if there's no object available. */
5950
5951 static struct value *
5952 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5953 {
5954 if (target_has_stack
5955 && !ptid_equal (inferior_ptid, null_ptid)
5956 && gdbarch_get_siginfo_type_p (gdbarch))
5957 {
5958 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5959
5960 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5961 }
5962
5963 return allocate_value (builtin_type (gdbarch)->builtin_void);
5964 }
5965
5966 \f
5967 /* Inferior thread state.
5968 These are details related to the inferior itself, and don't include
5969 things like what frame the user had selected or what gdb was doing
5970 with the target at the time.
5971 For inferior function calls these are things we want to restore
5972 regardless of whether the function call successfully completes
5973 or the dummy frame has to be manually popped. */
5974
5975 struct inferior_thread_state
5976 {
5977 enum target_signal stop_signal;
5978 CORE_ADDR stop_pc;
5979 struct regcache *registers;
5980 };
5981
5982 struct inferior_thread_state *
5983 save_inferior_thread_state (void)
5984 {
5985 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5986 struct thread_info *tp = inferior_thread ();
5987
5988 inf_state->stop_signal = tp->stop_signal;
5989 inf_state->stop_pc = stop_pc;
5990
5991 inf_state->registers = regcache_dup (get_current_regcache ());
5992
5993 return inf_state;
5994 }
5995
5996 /* Restore inferior session state to INF_STATE. */
5997
5998 void
5999 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6000 {
6001 struct thread_info *tp = inferior_thread ();
6002
6003 tp->stop_signal = inf_state->stop_signal;
6004 stop_pc = inf_state->stop_pc;
6005
6006 /* The inferior can be gone if the user types "print exit(0)"
6007 (and perhaps other times). */
6008 if (target_has_execution)
6009 /* NB: The register write goes through to the target. */
6010 regcache_cpy (get_current_regcache (), inf_state->registers);
6011 regcache_xfree (inf_state->registers);
6012 xfree (inf_state);
6013 }
6014
6015 static void
6016 do_restore_inferior_thread_state_cleanup (void *state)
6017 {
6018 restore_inferior_thread_state (state);
6019 }
6020
6021 struct cleanup *
6022 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6023 {
6024 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6025 }
6026
6027 void
6028 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6029 {
6030 regcache_xfree (inf_state->registers);
6031 xfree (inf_state);
6032 }
6033
6034 struct regcache *
6035 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6036 {
6037 return inf_state->registers;
6038 }
6039
6040 /* Session related state for inferior function calls.
6041 These are the additional bits of state that need to be restored
6042 when an inferior function call successfully completes. */
6043
6044 struct inferior_status
6045 {
6046 bpstat stop_bpstat;
6047 int stop_step;
6048 enum stop_stack_kind stop_stack_dummy;
6049 int stopped_by_random_signal;
6050 int stepping_over_breakpoint;
6051 CORE_ADDR step_range_start;
6052 CORE_ADDR step_range_end;
6053 struct frame_id step_frame_id;
6054 struct frame_id step_stack_frame_id;
6055 enum step_over_calls_kind step_over_calls;
6056 CORE_ADDR step_resume_break_address;
6057 int stop_after_trap;
6058 int stop_soon;
6059
6060 /* ID if the selected frame when the inferior function call was made. */
6061 struct frame_id selected_frame_id;
6062
6063 int proceed_to_finish;
6064 int in_infcall;
6065 };
6066
6067 /* Save all of the information associated with the inferior<==>gdb
6068 connection. */
6069
6070 struct inferior_status *
6071 save_inferior_status (void)
6072 {
6073 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6074 struct thread_info *tp = inferior_thread ();
6075 struct inferior *inf = current_inferior ();
6076
6077 inf_status->stop_step = tp->stop_step;
6078 inf_status->stop_stack_dummy = stop_stack_dummy;
6079 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6080 inf_status->stepping_over_breakpoint = tp->trap_expected;
6081 inf_status->step_range_start = tp->step_range_start;
6082 inf_status->step_range_end = tp->step_range_end;
6083 inf_status->step_frame_id = tp->step_frame_id;
6084 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6085 inf_status->step_over_calls = tp->step_over_calls;
6086 inf_status->stop_after_trap = stop_after_trap;
6087 inf_status->stop_soon = inf->stop_soon;
6088 /* Save original bpstat chain here; replace it with copy of chain.
6089 If caller's caller is walking the chain, they'll be happier if we
6090 hand them back the original chain when restore_inferior_status is
6091 called. */
6092 inf_status->stop_bpstat = tp->stop_bpstat;
6093 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6094 inf_status->proceed_to_finish = tp->proceed_to_finish;
6095 inf_status->in_infcall = tp->in_infcall;
6096
6097 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6098
6099 return inf_status;
6100 }
6101
6102 static int
6103 restore_selected_frame (void *args)
6104 {
6105 struct frame_id *fid = (struct frame_id *) args;
6106 struct frame_info *frame;
6107
6108 frame = frame_find_by_id (*fid);
6109
6110 /* If inf_status->selected_frame_id is NULL, there was no previously
6111 selected frame. */
6112 if (frame == NULL)
6113 {
6114 warning (_("Unable to restore previously selected frame."));
6115 return 0;
6116 }
6117
6118 select_frame (frame);
6119
6120 return (1);
6121 }
6122
6123 /* Restore inferior session state to INF_STATUS. */
6124
6125 void
6126 restore_inferior_status (struct inferior_status *inf_status)
6127 {
6128 struct thread_info *tp = inferior_thread ();
6129 struct inferior *inf = current_inferior ();
6130
6131 tp->stop_step = inf_status->stop_step;
6132 stop_stack_dummy = inf_status->stop_stack_dummy;
6133 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6134 tp->trap_expected = inf_status->stepping_over_breakpoint;
6135 tp->step_range_start = inf_status->step_range_start;
6136 tp->step_range_end = inf_status->step_range_end;
6137 tp->step_frame_id = inf_status->step_frame_id;
6138 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6139 tp->step_over_calls = inf_status->step_over_calls;
6140 stop_after_trap = inf_status->stop_after_trap;
6141 inf->stop_soon = inf_status->stop_soon;
6142 bpstat_clear (&tp->stop_bpstat);
6143 tp->stop_bpstat = inf_status->stop_bpstat;
6144 inf_status->stop_bpstat = NULL;
6145 tp->proceed_to_finish = inf_status->proceed_to_finish;
6146 tp->in_infcall = inf_status->in_infcall;
6147
6148 if (target_has_stack)
6149 {
6150 /* The point of catch_errors is that if the stack is clobbered,
6151 walking the stack might encounter a garbage pointer and
6152 error() trying to dereference it. */
6153 if (catch_errors
6154 (restore_selected_frame, &inf_status->selected_frame_id,
6155 "Unable to restore previously selected frame:\n",
6156 RETURN_MASK_ERROR) == 0)
6157 /* Error in restoring the selected frame. Select the innermost
6158 frame. */
6159 select_frame (get_current_frame ());
6160 }
6161
6162 xfree (inf_status);
6163 }
6164
6165 static void
6166 do_restore_inferior_status_cleanup (void *sts)
6167 {
6168 restore_inferior_status (sts);
6169 }
6170
6171 struct cleanup *
6172 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6173 {
6174 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6175 }
6176
6177 void
6178 discard_inferior_status (struct inferior_status *inf_status)
6179 {
6180 /* See save_inferior_status for info on stop_bpstat. */
6181 bpstat_clear (&inf_status->stop_bpstat);
6182 xfree (inf_status);
6183 }
6184 \f
6185 int
6186 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6187 {
6188 struct target_waitstatus last;
6189 ptid_t last_ptid;
6190
6191 get_last_target_status (&last_ptid, &last);
6192
6193 if (last.kind != TARGET_WAITKIND_FORKED)
6194 return 0;
6195
6196 if (!ptid_equal (last_ptid, pid))
6197 return 0;
6198
6199 *child_pid = last.value.related_pid;
6200 return 1;
6201 }
6202
6203 int
6204 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6205 {
6206 struct target_waitstatus last;
6207 ptid_t last_ptid;
6208
6209 get_last_target_status (&last_ptid, &last);
6210
6211 if (last.kind != TARGET_WAITKIND_VFORKED)
6212 return 0;
6213
6214 if (!ptid_equal (last_ptid, pid))
6215 return 0;
6216
6217 *child_pid = last.value.related_pid;
6218 return 1;
6219 }
6220
6221 int
6222 inferior_has_execd (ptid_t pid, char **execd_pathname)
6223 {
6224 struct target_waitstatus last;
6225 ptid_t last_ptid;
6226
6227 get_last_target_status (&last_ptid, &last);
6228
6229 if (last.kind != TARGET_WAITKIND_EXECD)
6230 return 0;
6231
6232 if (!ptid_equal (last_ptid, pid))
6233 return 0;
6234
6235 *execd_pathname = xstrdup (last.value.execd_pathname);
6236 return 1;
6237 }
6238
6239 int
6240 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6241 {
6242 struct target_waitstatus last;
6243 ptid_t last_ptid;
6244
6245 get_last_target_status (&last_ptid, &last);
6246
6247 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6248 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6249 return 0;
6250
6251 if (!ptid_equal (last_ptid, pid))
6252 return 0;
6253
6254 *syscall_number = last.value.syscall_number;
6255 return 1;
6256 }
6257
6258 /* Oft used ptids */
6259 ptid_t null_ptid;
6260 ptid_t minus_one_ptid;
6261
6262 /* Create a ptid given the necessary PID, LWP, and TID components. */
6263
6264 ptid_t
6265 ptid_build (int pid, long lwp, long tid)
6266 {
6267 ptid_t ptid;
6268
6269 ptid.pid = pid;
6270 ptid.lwp = lwp;
6271 ptid.tid = tid;
6272 return ptid;
6273 }
6274
6275 /* Create a ptid from just a pid. */
6276
6277 ptid_t
6278 pid_to_ptid (int pid)
6279 {
6280 return ptid_build (pid, 0, 0);
6281 }
6282
6283 /* Fetch the pid (process id) component from a ptid. */
6284
6285 int
6286 ptid_get_pid (ptid_t ptid)
6287 {
6288 return ptid.pid;
6289 }
6290
6291 /* Fetch the lwp (lightweight process) component from a ptid. */
6292
6293 long
6294 ptid_get_lwp (ptid_t ptid)
6295 {
6296 return ptid.lwp;
6297 }
6298
6299 /* Fetch the tid (thread id) component from a ptid. */
6300
6301 long
6302 ptid_get_tid (ptid_t ptid)
6303 {
6304 return ptid.tid;
6305 }
6306
6307 /* ptid_equal() is used to test equality of two ptids. */
6308
6309 int
6310 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6311 {
6312 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6313 && ptid1.tid == ptid2.tid);
6314 }
6315
6316 /* Returns true if PTID represents a process. */
6317
6318 int
6319 ptid_is_pid (ptid_t ptid)
6320 {
6321 if (ptid_equal (minus_one_ptid, ptid))
6322 return 0;
6323 if (ptid_equal (null_ptid, ptid))
6324 return 0;
6325
6326 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6327 }
6328
6329 int
6330 ptid_match (ptid_t ptid, ptid_t filter)
6331 {
6332 /* Since both parameters have the same type, prevent easy mistakes
6333 from happening. */
6334 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6335 && !ptid_equal (ptid, null_ptid));
6336
6337 if (ptid_equal (filter, minus_one_ptid))
6338 return 1;
6339 if (ptid_is_pid (filter)
6340 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6341 return 1;
6342 else if (ptid_equal (ptid, filter))
6343 return 1;
6344
6345 return 0;
6346 }
6347
6348 /* restore_inferior_ptid() will be used by the cleanup machinery
6349 to restore the inferior_ptid value saved in a call to
6350 save_inferior_ptid(). */
6351
6352 static void
6353 restore_inferior_ptid (void *arg)
6354 {
6355 ptid_t *saved_ptid_ptr = arg;
6356
6357 inferior_ptid = *saved_ptid_ptr;
6358 xfree (arg);
6359 }
6360
6361 /* Save the value of inferior_ptid so that it may be restored by a
6362 later call to do_cleanups(). Returns the struct cleanup pointer
6363 needed for later doing the cleanup. */
6364
6365 struct cleanup *
6366 save_inferior_ptid (void)
6367 {
6368 ptid_t *saved_ptid_ptr;
6369
6370 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6371 *saved_ptid_ptr = inferior_ptid;
6372 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6373 }
6374 \f
6375
6376 /* User interface for reverse debugging:
6377 Set exec-direction / show exec-direction commands
6378 (returns error unless target implements to_set_exec_direction method). */
6379
6380 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6381 static const char exec_forward[] = "forward";
6382 static const char exec_reverse[] = "reverse";
6383 static const char *exec_direction = exec_forward;
6384 static const char *exec_direction_names[] = {
6385 exec_forward,
6386 exec_reverse,
6387 NULL
6388 };
6389
6390 static void
6391 set_exec_direction_func (char *args, int from_tty,
6392 struct cmd_list_element *cmd)
6393 {
6394 if (target_can_execute_reverse)
6395 {
6396 if (!strcmp (exec_direction, exec_forward))
6397 execution_direction = EXEC_FORWARD;
6398 else if (!strcmp (exec_direction, exec_reverse))
6399 execution_direction = EXEC_REVERSE;
6400 }
6401 }
6402
6403 static void
6404 show_exec_direction_func (struct ui_file *out, int from_tty,
6405 struct cmd_list_element *cmd, const char *value)
6406 {
6407 switch (execution_direction) {
6408 case EXEC_FORWARD:
6409 fprintf_filtered (out, _("Forward.\n"));
6410 break;
6411 case EXEC_REVERSE:
6412 fprintf_filtered (out, _("Reverse.\n"));
6413 break;
6414 case EXEC_ERROR:
6415 default:
6416 fprintf_filtered (out,
6417 _("Forward (target `%s' does not support exec-direction).\n"),
6418 target_shortname);
6419 break;
6420 }
6421 }
6422
6423 /* User interface for non-stop mode. */
6424
6425 int non_stop = 0;
6426 static int non_stop_1 = 0;
6427
6428 static void
6429 set_non_stop (char *args, int from_tty,
6430 struct cmd_list_element *c)
6431 {
6432 if (target_has_execution)
6433 {
6434 non_stop_1 = non_stop;
6435 error (_("Cannot change this setting while the inferior is running."));
6436 }
6437
6438 non_stop = non_stop_1;
6439 }
6440
6441 static void
6442 show_non_stop (struct ui_file *file, int from_tty,
6443 struct cmd_list_element *c, const char *value)
6444 {
6445 fprintf_filtered (file,
6446 _("Controlling the inferior in non-stop mode is %s.\n"),
6447 value);
6448 }
6449
6450 static void
6451 show_schedule_multiple (struct ui_file *file, int from_tty,
6452 struct cmd_list_element *c, const char *value)
6453 {
6454 fprintf_filtered (file, _("\
6455 Resuming the execution of threads of all processes is %s.\n"), value);
6456 }
6457
6458 void
6459 _initialize_infrun (void)
6460 {
6461 int i;
6462 int numsigs;
6463
6464 add_info ("signals", signals_info, _("\
6465 What debugger does when program gets various signals.\n\
6466 Specify a signal as argument to print info on that signal only."));
6467 add_info_alias ("handle", "signals", 0);
6468
6469 add_com ("handle", class_run, handle_command, _("\
6470 Specify how to handle a signal.\n\
6471 Args are signals and actions to apply to those signals.\n\
6472 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6473 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6474 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6475 The special arg \"all\" is recognized to mean all signals except those\n\
6476 used by the debugger, typically SIGTRAP and SIGINT.\n\
6477 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6478 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6479 Stop means reenter debugger if this signal happens (implies print).\n\
6480 Print means print a message if this signal happens.\n\
6481 Pass means let program see this signal; otherwise program doesn't know.\n\
6482 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6483 Pass and Stop may be combined."));
6484 if (xdb_commands)
6485 {
6486 add_com ("lz", class_info, signals_info, _("\
6487 What debugger does when program gets various signals.\n\
6488 Specify a signal as argument to print info on that signal only."));
6489 add_com ("z", class_run, xdb_handle_command, _("\
6490 Specify how to handle a signal.\n\
6491 Args are signals and actions to apply to those signals.\n\
6492 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6493 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6494 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6495 The special arg \"all\" is recognized to mean all signals except those\n\
6496 used by the debugger, typically SIGTRAP and SIGINT.\n\
6497 Recognized actions include \"s\" (toggles between stop and nostop), \n\
6498 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6499 nopass), \"Q\" (noprint)\n\
6500 Stop means reenter debugger if this signal happens (implies print).\n\
6501 Print means print a message if this signal happens.\n\
6502 Pass means let program see this signal; otherwise program doesn't know.\n\
6503 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6504 Pass and Stop may be combined."));
6505 }
6506
6507 if (!dbx_commands)
6508 stop_command = add_cmd ("stop", class_obscure,
6509 not_just_help_class_command, _("\
6510 There is no `stop' command, but you can set a hook on `stop'.\n\
6511 This allows you to set a list of commands to be run each time execution\n\
6512 of the program stops."), &cmdlist);
6513
6514 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6515 Set inferior debugging."), _("\
6516 Show inferior debugging."), _("\
6517 When non-zero, inferior specific debugging is enabled."),
6518 NULL,
6519 show_debug_infrun,
6520 &setdebuglist, &showdebuglist);
6521
6522 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6523 Set displaced stepping debugging."), _("\
6524 Show displaced stepping debugging."), _("\
6525 When non-zero, displaced stepping specific debugging is enabled."),
6526 NULL,
6527 show_debug_displaced,
6528 &setdebuglist, &showdebuglist);
6529
6530 add_setshow_boolean_cmd ("non-stop", no_class,
6531 &non_stop_1, _("\
6532 Set whether gdb controls the inferior in non-stop mode."), _("\
6533 Show whether gdb controls the inferior in non-stop mode."), _("\
6534 When debugging a multi-threaded program and this setting is\n\
6535 off (the default, also called all-stop mode), when one thread stops\n\
6536 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6537 all other threads in the program while you interact with the thread of\n\
6538 interest. When you continue or step a thread, you can allow the other\n\
6539 threads to run, or have them remain stopped, but while you inspect any\n\
6540 thread's state, all threads stop.\n\
6541 \n\
6542 In non-stop mode, when one thread stops, other threads can continue\n\
6543 to run freely. You'll be able to step each thread independently,\n\
6544 leave it stopped or free to run as needed."),
6545 set_non_stop,
6546 show_non_stop,
6547 &setlist,
6548 &showlist);
6549
6550 numsigs = (int) TARGET_SIGNAL_LAST;
6551 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6552 signal_print = (unsigned char *)
6553 xmalloc (sizeof (signal_print[0]) * numsigs);
6554 signal_program = (unsigned char *)
6555 xmalloc (sizeof (signal_program[0]) * numsigs);
6556 for (i = 0; i < numsigs; i++)
6557 {
6558 signal_stop[i] = 1;
6559 signal_print[i] = 1;
6560 signal_program[i] = 1;
6561 }
6562
6563 /* Signals caused by debugger's own actions
6564 should not be given to the program afterwards. */
6565 signal_program[TARGET_SIGNAL_TRAP] = 0;
6566 signal_program[TARGET_SIGNAL_INT] = 0;
6567
6568 /* Signals that are not errors should not normally enter the debugger. */
6569 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6570 signal_print[TARGET_SIGNAL_ALRM] = 0;
6571 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6572 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6573 signal_stop[TARGET_SIGNAL_PROF] = 0;
6574 signal_print[TARGET_SIGNAL_PROF] = 0;
6575 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6576 signal_print[TARGET_SIGNAL_CHLD] = 0;
6577 signal_stop[TARGET_SIGNAL_IO] = 0;
6578 signal_print[TARGET_SIGNAL_IO] = 0;
6579 signal_stop[TARGET_SIGNAL_POLL] = 0;
6580 signal_print[TARGET_SIGNAL_POLL] = 0;
6581 signal_stop[TARGET_SIGNAL_URG] = 0;
6582 signal_print[TARGET_SIGNAL_URG] = 0;
6583 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6584 signal_print[TARGET_SIGNAL_WINCH] = 0;
6585
6586 /* These signals are used internally by user-level thread
6587 implementations. (See signal(5) on Solaris.) Like the above
6588 signals, a healthy program receives and handles them as part of
6589 its normal operation. */
6590 signal_stop[TARGET_SIGNAL_LWP] = 0;
6591 signal_print[TARGET_SIGNAL_LWP] = 0;
6592 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6593 signal_print[TARGET_SIGNAL_WAITING] = 0;
6594 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6595 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6596
6597 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6598 &stop_on_solib_events, _("\
6599 Set stopping for shared library events."), _("\
6600 Show stopping for shared library events."), _("\
6601 If nonzero, gdb will give control to the user when the dynamic linker\n\
6602 notifies gdb of shared library events. The most common event of interest\n\
6603 to the user would be loading/unloading of a new library."),
6604 NULL,
6605 show_stop_on_solib_events,
6606 &setlist, &showlist);
6607
6608 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6609 follow_fork_mode_kind_names,
6610 &follow_fork_mode_string, _("\
6611 Set debugger response to a program call of fork or vfork."), _("\
6612 Show debugger response to a program call of fork or vfork."), _("\
6613 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6614 parent - the original process is debugged after a fork\n\
6615 child - the new process is debugged after a fork\n\
6616 The unfollowed process will continue to run.\n\
6617 By default, the debugger will follow the parent process."),
6618 NULL,
6619 show_follow_fork_mode_string,
6620 &setlist, &showlist);
6621
6622 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6623 follow_exec_mode_names,
6624 &follow_exec_mode_string, _("\
6625 Set debugger response to a program call of exec."), _("\
6626 Show debugger response to a program call of exec."), _("\
6627 An exec call replaces the program image of a process.\n\
6628 \n\
6629 follow-exec-mode can be:\n\
6630 \n\
6631 new - the debugger creates a new inferior and rebinds the process \n\
6632 to this new inferior. The program the process was running before\n\
6633 the exec call can be restarted afterwards by restarting the original\n\
6634 inferior.\n\
6635 \n\
6636 same - the debugger keeps the process bound to the same inferior.\n\
6637 The new executable image replaces the previous executable loaded in\n\
6638 the inferior. Restarting the inferior after the exec call restarts\n\
6639 the executable the process was running after the exec call.\n\
6640 \n\
6641 By default, the debugger will use the same inferior."),
6642 NULL,
6643 show_follow_exec_mode_string,
6644 &setlist, &showlist);
6645
6646 add_setshow_enum_cmd ("scheduler-locking", class_run,
6647 scheduler_enums, &scheduler_mode, _("\
6648 Set mode for locking scheduler during execution."), _("\
6649 Show mode for locking scheduler during execution."), _("\
6650 off == no locking (threads may preempt at any time)\n\
6651 on == full locking (no thread except the current thread may run)\n\
6652 step == scheduler locked during every single-step operation.\n\
6653 In this mode, no other thread may run during a step command.\n\
6654 Other threads may run while stepping over a function call ('next')."),
6655 set_schedlock_func, /* traps on target vector */
6656 show_scheduler_mode,
6657 &setlist, &showlist);
6658
6659 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6660 Set mode for resuming threads of all processes."), _("\
6661 Show mode for resuming threads of all processes."), _("\
6662 When on, execution commands (such as 'continue' or 'next') resume all\n\
6663 threads of all processes. When off (which is the default), execution\n\
6664 commands only resume the threads of the current process. The set of\n\
6665 threads that are resumed is further refined by the scheduler-locking\n\
6666 mode (see help set scheduler-locking)."),
6667 NULL,
6668 show_schedule_multiple,
6669 &setlist, &showlist);
6670
6671 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6672 Set mode of the step operation."), _("\
6673 Show mode of the step operation."), _("\
6674 When set, doing a step over a function without debug line information\n\
6675 will stop at the first instruction of that function. Otherwise, the\n\
6676 function is skipped and the step command stops at a different source line."),
6677 NULL,
6678 show_step_stop_if_no_debug,
6679 &setlist, &showlist);
6680
6681 add_setshow_enum_cmd ("displaced-stepping", class_run,
6682 can_use_displaced_stepping_enum,
6683 &can_use_displaced_stepping, _("\
6684 Set debugger's willingness to use displaced stepping."), _("\
6685 Show debugger's willingness to use displaced stepping."), _("\
6686 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6687 supported by the target architecture. If off, gdb will not use displaced\n\
6688 stepping to step over breakpoints, even if such is supported by the target\n\
6689 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6690 if the target architecture supports it and non-stop mode is active, but will not\n\
6691 use it in all-stop mode (see help set non-stop)."),
6692 NULL,
6693 show_can_use_displaced_stepping,
6694 &setlist, &showlist);
6695
6696 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6697 &exec_direction, _("Set direction of execution.\n\
6698 Options are 'forward' or 'reverse'."),
6699 _("Show direction of execution (forward/reverse)."),
6700 _("Tells gdb whether to execute forward or backward."),
6701 set_exec_direction_func, show_exec_direction_func,
6702 &setlist, &showlist);
6703
6704 /* Set/show detach-on-fork: user-settable mode. */
6705
6706 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6707 Set whether gdb will detach the child of a fork."), _("\
6708 Show whether gdb will detach the child of a fork."), _("\
6709 Tells gdb whether to detach the child of a fork."),
6710 NULL, NULL, &setlist, &showlist);
6711
6712 /* ptid initializations */
6713 null_ptid = ptid_build (0, 0, 0);
6714 minus_one_ptid = ptid_build (-1, 0, 0);
6715 inferior_ptid = null_ptid;
6716 target_last_wait_ptid = minus_one_ptid;
6717
6718 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6719 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6720 observer_attach_thread_exit (infrun_thread_thread_exit);
6721 observer_attach_inferior_exit (infrun_inferior_exit);
6722
6723 /* Explicitly create without lookup, since that tries to create a
6724 value with a void typed value, and when we get here, gdbarch
6725 isn't initialized yet. At this point, we're quite sure there
6726 isn't another convenience variable of the same name. */
6727 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6728 }
This page took 0.176349 seconds and 4 git commands to generate.