2011-08-04 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149 /* If the program uses ELF-style shared libraries, then calls to
150 functions in shared libraries go through stubs, which live in a
151 table called the PLT (Procedure Linkage Table). The first time the
152 function is called, the stub sends control to the dynamic linker,
153 which looks up the function's real address, patches the stub so
154 that future calls will go directly to the function, and then passes
155 control to the function.
156
157 If we are stepping at the source level, we don't want to see any of
158 this --- we just want to skip over the stub and the dynamic linker.
159 The simple approach is to single-step until control leaves the
160 dynamic linker.
161
162 However, on some systems (e.g., Red Hat's 5.2 distribution) the
163 dynamic linker calls functions in the shared C library, so you
164 can't tell from the PC alone whether the dynamic linker is still
165 running. In this case, we use a step-resume breakpoint to get us
166 past the dynamic linker, as if we were using "next" to step over a
167 function call.
168
169 in_solib_dynsym_resolve_code() says whether we're in the dynamic
170 linker code or not. Normally, this means we single-step. However,
171 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
172 address where we can place a step-resume breakpoint to get past the
173 linker's symbol resolution function.
174
175 in_solib_dynsym_resolve_code() can generally be implemented in a
176 pretty portable way, by comparing the PC against the address ranges
177 of the dynamic linker's sections.
178
179 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
180 it depends on internal details of the dynamic linker. It's usually
181 not too hard to figure out where to put a breakpoint, but it
182 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
183 sanity checking. If it can't figure things out, returning zero and
184 getting the (possibly confusing) stepping behavior is better than
185 signalling an error, which will obscure the change in the
186 inferior's state. */
187
188 /* This function returns TRUE if pc is the address of an instruction
189 that lies within the dynamic linker (such as the event hook, or the
190 dld itself).
191
192 This function must be used only when a dynamic linker event has
193 been caught, and the inferior is being stepped out of the hook, or
194 undefined results are guaranteed. */
195
196 #ifndef SOLIB_IN_DYNAMIC_LINKER
197 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
198 #endif
199
200 /* "Observer mode" is somewhat like a more extreme version of
201 non-stop, in which all GDB operations that might affect the
202 target's execution have been disabled. */
203
204 static int non_stop_1 = 0;
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 extern int pagination_enabled;
214
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 target_async_permitted = 1;
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that the target may silently handle.
287 This is automatically determined from the flags above,
288 and simply cached here. */
289 static unsigned char *signal_pass;
290
291 #define SET_SIGS(nsigs,sigs,flags) \
292 do { \
293 int signum = (nsigs); \
294 while (signum-- > 0) \
295 if ((sigs)[signum]) \
296 (flags)[signum] = 1; \
297 } while (0)
298
299 #define UNSET_SIGS(nsigs,sigs,flags) \
300 do { \
301 int signum = (nsigs); \
302 while (signum-- > 0) \
303 if ((sigs)[signum]) \
304 (flags)[signum] = 0; \
305 } while (0)
306
307 /* Value to pass to target_resume() to cause all threads to resume. */
308
309 #define RESUME_ALL minus_one_ptid
310
311 /* Command list pointer for the "stop" placeholder. */
312
313 static struct cmd_list_element *stop_command;
314
315 /* Function inferior was in as of last step command. */
316
317 static struct symbol *step_start_function;
318
319 /* Nonzero if we want to give control to the user when we're notified
320 of shared library events by the dynamic linker. */
321 int stop_on_solib_events;
322 static void
323 show_stop_on_solib_events (struct ui_file *file, int from_tty,
324 struct cmd_list_element *c, const char *value)
325 {
326 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
327 value);
328 }
329
330 /* Nonzero means expecting a trace trap
331 and should stop the inferior and return silently when it happens. */
332
333 int stop_after_trap;
334
335 /* Save register contents here when executing a "finish" command or are
336 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
337 Thus this contains the return value from the called function (assuming
338 values are returned in a register). */
339
340 struct regcache *stop_registers;
341
342 /* Nonzero after stop if current stack frame should be printed. */
343
344 static int stop_print_frame;
345
346 /* This is a cached copy of the pid/waitstatus of the last event
347 returned by target_wait()/deprecated_target_wait_hook(). This
348 information is returned by get_last_target_status(). */
349 static ptid_t target_last_wait_ptid;
350 static struct target_waitstatus target_last_waitstatus;
351
352 static void context_switch (ptid_t ptid);
353
354 void init_thread_stepping_state (struct thread_info *tss);
355
356 void init_infwait_state (void);
357
358 static const char follow_fork_mode_child[] = "child";
359 static const char follow_fork_mode_parent[] = "parent";
360
361 static const char *follow_fork_mode_kind_names[] = {
362 follow_fork_mode_child,
363 follow_fork_mode_parent,
364 NULL
365 };
366
367 static const char *follow_fork_mode_string = follow_fork_mode_parent;
368 static void
369 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
370 struct cmd_list_element *c, const char *value)
371 {
372 fprintf_filtered (file,
373 _("Debugger response to a program "
374 "call of fork or vfork is \"%s\".\n"),
375 value);
376 }
377 \f
378
379 /* Tell the target to follow the fork we're stopped at. Returns true
380 if the inferior should be resumed; false, if the target for some
381 reason decided it's best not to resume. */
382
383 static int
384 follow_fork (void)
385 {
386 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
387 int should_resume = 1;
388 struct thread_info *tp;
389
390 /* Copy user stepping state to the new inferior thread. FIXME: the
391 followed fork child thread should have a copy of most of the
392 parent thread structure's run control related fields, not just these.
393 Initialized to avoid "may be used uninitialized" warnings from gcc. */
394 struct breakpoint *step_resume_breakpoint = NULL;
395 struct breakpoint *exception_resume_breakpoint = NULL;
396 CORE_ADDR step_range_start = 0;
397 CORE_ADDR step_range_end = 0;
398 struct frame_id step_frame_id = { 0 };
399
400 if (!non_stop)
401 {
402 ptid_t wait_ptid;
403 struct target_waitstatus wait_status;
404
405 /* Get the last target status returned by target_wait(). */
406 get_last_target_status (&wait_ptid, &wait_status);
407
408 /* If not stopped at a fork event, then there's nothing else to
409 do. */
410 if (wait_status.kind != TARGET_WAITKIND_FORKED
411 && wait_status.kind != TARGET_WAITKIND_VFORKED)
412 return 1;
413
414 /* Check if we switched over from WAIT_PTID, since the event was
415 reported. */
416 if (!ptid_equal (wait_ptid, minus_one_ptid)
417 && !ptid_equal (inferior_ptid, wait_ptid))
418 {
419 /* We did. Switch back to WAIT_PTID thread, to tell the
420 target to follow it (in either direction). We'll
421 afterwards refuse to resume, and inform the user what
422 happened. */
423 switch_to_thread (wait_ptid);
424 should_resume = 0;
425 }
426 }
427
428 tp = inferior_thread ();
429
430 /* If there were any forks/vforks that were caught and are now to be
431 followed, then do so now. */
432 switch (tp->pending_follow.kind)
433 {
434 case TARGET_WAITKIND_FORKED:
435 case TARGET_WAITKIND_VFORKED:
436 {
437 ptid_t parent, child;
438
439 /* If the user did a next/step, etc, over a fork call,
440 preserve the stepping state in the fork child. */
441 if (follow_child && should_resume)
442 {
443 step_resume_breakpoint = clone_momentary_breakpoint
444 (tp->control.step_resume_breakpoint);
445 step_range_start = tp->control.step_range_start;
446 step_range_end = tp->control.step_range_end;
447 step_frame_id = tp->control.step_frame_id;
448 exception_resume_breakpoint
449 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
450
451 /* For now, delete the parent's sr breakpoint, otherwise,
452 parent/child sr breakpoints are considered duplicates,
453 and the child version will not be installed. Remove
454 this when the breakpoints module becomes aware of
455 inferiors and address spaces. */
456 delete_step_resume_breakpoint (tp);
457 tp->control.step_range_start = 0;
458 tp->control.step_range_end = 0;
459 tp->control.step_frame_id = null_frame_id;
460 delete_exception_resume_breakpoint (tp);
461 }
462
463 parent = inferior_ptid;
464 child = tp->pending_follow.value.related_pid;
465
466 /* Tell the target to do whatever is necessary to follow
467 either parent or child. */
468 if (target_follow_fork (follow_child))
469 {
470 /* Target refused to follow, or there's some other reason
471 we shouldn't resume. */
472 should_resume = 0;
473 }
474 else
475 {
476 /* This pending follow fork event is now handled, one way
477 or another. The previous selected thread may be gone
478 from the lists by now, but if it is still around, need
479 to clear the pending follow request. */
480 tp = find_thread_ptid (parent);
481 if (tp)
482 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
483
484 /* This makes sure we don't try to apply the "Switched
485 over from WAIT_PID" logic above. */
486 nullify_last_target_wait_ptid ();
487
488 /* If we followed the child, switch to it... */
489 if (follow_child)
490 {
491 switch_to_thread (child);
492
493 /* ... and preserve the stepping state, in case the
494 user was stepping over the fork call. */
495 if (should_resume)
496 {
497 tp = inferior_thread ();
498 tp->control.step_resume_breakpoint
499 = step_resume_breakpoint;
500 tp->control.step_range_start = step_range_start;
501 tp->control.step_range_end = step_range_end;
502 tp->control.step_frame_id = step_frame_id;
503 tp->control.exception_resume_breakpoint
504 = exception_resume_breakpoint;
505 }
506 else
507 {
508 /* If we get here, it was because we're trying to
509 resume from a fork catchpoint, but, the user
510 has switched threads away from the thread that
511 forked. In that case, the resume command
512 issued is most likely not applicable to the
513 child, so just warn, and refuse to resume. */
514 warning (_("Not resuming: switched threads "
515 "before following fork child.\n"));
516 }
517
518 /* Reset breakpoints in the child as appropriate. */
519 follow_inferior_reset_breakpoints ();
520 }
521 else
522 switch_to_thread (parent);
523 }
524 }
525 break;
526 case TARGET_WAITKIND_SPURIOUS:
527 /* Nothing to follow. */
528 break;
529 default:
530 internal_error (__FILE__, __LINE__,
531 "Unexpected pending_follow.kind %d\n",
532 tp->pending_follow.kind);
533 break;
534 }
535
536 return should_resume;
537 }
538
539 void
540 follow_inferior_reset_breakpoints (void)
541 {
542 struct thread_info *tp = inferior_thread ();
543
544 /* Was there a step_resume breakpoint? (There was if the user
545 did a "next" at the fork() call.) If so, explicitly reset its
546 thread number.
547
548 step_resumes are a form of bp that are made to be per-thread.
549 Since we created the step_resume bp when the parent process
550 was being debugged, and now are switching to the child process,
551 from the breakpoint package's viewpoint, that's a switch of
552 "threads". We must update the bp's notion of which thread
553 it is for, or it'll be ignored when it triggers. */
554
555 if (tp->control.step_resume_breakpoint)
556 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
557
558 if (tp->control.exception_resume_breakpoint)
559 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
560
561 /* Reinsert all breakpoints in the child. The user may have set
562 breakpoints after catching the fork, in which case those
563 were never set in the child, but only in the parent. This makes
564 sure the inserted breakpoints match the breakpoint list. */
565
566 breakpoint_re_set ();
567 insert_breakpoints ();
568 }
569
570 /* The child has exited or execed: resume threads of the parent the
571 user wanted to be executing. */
572
573 static int
574 proceed_after_vfork_done (struct thread_info *thread,
575 void *arg)
576 {
577 int pid = * (int *) arg;
578
579 if (ptid_get_pid (thread->ptid) == pid
580 && is_running (thread->ptid)
581 && !is_executing (thread->ptid)
582 && !thread->stop_requested
583 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
584 {
585 if (debug_infrun)
586 fprintf_unfiltered (gdb_stdlog,
587 "infrun: resuming vfork parent thread %s\n",
588 target_pid_to_str (thread->ptid));
589
590 switch_to_thread (thread->ptid);
591 clear_proceed_status ();
592 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
593 }
594
595 return 0;
596 }
597
598 /* Called whenever we notice an exec or exit event, to handle
599 detaching or resuming a vfork parent. */
600
601 static void
602 handle_vfork_child_exec_or_exit (int exec)
603 {
604 struct inferior *inf = current_inferior ();
605
606 if (inf->vfork_parent)
607 {
608 int resume_parent = -1;
609
610 /* This exec or exit marks the end of the shared memory region
611 between the parent and the child. If the user wanted to
612 detach from the parent, now is the time. */
613
614 if (inf->vfork_parent->pending_detach)
615 {
616 struct thread_info *tp;
617 struct cleanup *old_chain;
618 struct program_space *pspace;
619 struct address_space *aspace;
620
621 /* follow-fork child, detach-on-fork on. */
622
623 old_chain = make_cleanup_restore_current_thread ();
624
625 /* We're letting loose of the parent. */
626 tp = any_live_thread_of_process (inf->vfork_parent->pid);
627 switch_to_thread (tp->ptid);
628
629 /* We're about to detach from the parent, which implicitly
630 removes breakpoints from its address space. There's a
631 catch here: we want to reuse the spaces for the child,
632 but, parent/child are still sharing the pspace at this
633 point, although the exec in reality makes the kernel give
634 the child a fresh set of new pages. The problem here is
635 that the breakpoints module being unaware of this, would
636 likely chose the child process to write to the parent
637 address space. Swapping the child temporarily away from
638 the spaces has the desired effect. Yes, this is "sort
639 of" a hack. */
640
641 pspace = inf->pspace;
642 aspace = inf->aspace;
643 inf->aspace = NULL;
644 inf->pspace = NULL;
645
646 if (debug_infrun || info_verbose)
647 {
648 target_terminal_ours ();
649
650 if (exec)
651 fprintf_filtered (gdb_stdlog,
652 "Detaching vfork parent process "
653 "%d after child exec.\n",
654 inf->vfork_parent->pid);
655 else
656 fprintf_filtered (gdb_stdlog,
657 "Detaching vfork parent process "
658 "%d after child exit.\n",
659 inf->vfork_parent->pid);
660 }
661
662 target_detach (NULL, 0);
663
664 /* Put it back. */
665 inf->pspace = pspace;
666 inf->aspace = aspace;
667
668 do_cleanups (old_chain);
669 }
670 else if (exec)
671 {
672 /* We're staying attached to the parent, so, really give the
673 child a new address space. */
674 inf->pspace = add_program_space (maybe_new_address_space ());
675 inf->aspace = inf->pspace->aspace;
676 inf->removable = 1;
677 set_current_program_space (inf->pspace);
678
679 resume_parent = inf->vfork_parent->pid;
680
681 /* Break the bonds. */
682 inf->vfork_parent->vfork_child = NULL;
683 }
684 else
685 {
686 struct cleanup *old_chain;
687 struct program_space *pspace;
688
689 /* If this is a vfork child exiting, then the pspace and
690 aspaces were shared with the parent. Since we're
691 reporting the process exit, we'll be mourning all that is
692 found in the address space, and switching to null_ptid,
693 preparing to start a new inferior. But, since we don't
694 want to clobber the parent's address/program spaces, we
695 go ahead and create a new one for this exiting
696 inferior. */
697
698 /* Switch to null_ptid, so that clone_program_space doesn't want
699 to read the selected frame of a dead process. */
700 old_chain = save_inferior_ptid ();
701 inferior_ptid = null_ptid;
702
703 /* This inferior is dead, so avoid giving the breakpoints
704 module the option to write through to it (cloning a
705 program space resets breakpoints). */
706 inf->aspace = NULL;
707 inf->pspace = NULL;
708 pspace = add_program_space (maybe_new_address_space ());
709 set_current_program_space (pspace);
710 inf->removable = 1;
711 clone_program_space (pspace, inf->vfork_parent->pspace);
712 inf->pspace = pspace;
713 inf->aspace = pspace->aspace;
714
715 /* Put back inferior_ptid. We'll continue mourning this
716 inferior. */
717 do_cleanups (old_chain);
718
719 resume_parent = inf->vfork_parent->pid;
720 /* Break the bonds. */
721 inf->vfork_parent->vfork_child = NULL;
722 }
723
724 inf->vfork_parent = NULL;
725
726 gdb_assert (current_program_space == inf->pspace);
727
728 if (non_stop && resume_parent != -1)
729 {
730 /* If the user wanted the parent to be running, let it go
731 free now. */
732 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
733
734 if (debug_infrun)
735 fprintf_unfiltered (gdb_stdlog,
736 "infrun: resuming vfork parent process %d\n",
737 resume_parent);
738
739 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
740
741 do_cleanups (old_chain);
742 }
743 }
744 }
745
746 /* Enum strings for "set|show displaced-stepping". */
747
748 static const char follow_exec_mode_new[] = "new";
749 static const char follow_exec_mode_same[] = "same";
750 static const char *follow_exec_mode_names[] =
751 {
752 follow_exec_mode_new,
753 follow_exec_mode_same,
754 NULL,
755 };
756
757 static const char *follow_exec_mode_string = follow_exec_mode_same;
758 static void
759 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
760 struct cmd_list_element *c, const char *value)
761 {
762 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
763 }
764
765 /* EXECD_PATHNAME is assumed to be non-NULL. */
766
767 static void
768 follow_exec (ptid_t pid, char *execd_pathname)
769 {
770 struct thread_info *th = inferior_thread ();
771 struct inferior *inf = current_inferior ();
772
773 /* This is an exec event that we actually wish to pay attention to.
774 Refresh our symbol table to the newly exec'd program, remove any
775 momentary bp's, etc.
776
777 If there are breakpoints, they aren't really inserted now,
778 since the exec() transformed our inferior into a fresh set
779 of instructions.
780
781 We want to preserve symbolic breakpoints on the list, since
782 we have hopes that they can be reset after the new a.out's
783 symbol table is read.
784
785 However, any "raw" breakpoints must be removed from the list
786 (e.g., the solib bp's), since their address is probably invalid
787 now.
788
789 And, we DON'T want to call delete_breakpoints() here, since
790 that may write the bp's "shadow contents" (the instruction
791 value that was overwritten witha TRAP instruction). Since
792 we now have a new a.out, those shadow contents aren't valid. */
793
794 mark_breakpoints_out ();
795
796 update_breakpoints_after_exec ();
797
798 /* If there was one, it's gone now. We cannot truly step-to-next
799 statement through an exec(). */
800 th->control.step_resume_breakpoint = NULL;
801 th->control.exception_resume_breakpoint = NULL;
802 th->control.step_range_start = 0;
803 th->control.step_range_end = 0;
804
805 /* The target reports the exec event to the main thread, even if
806 some other thread does the exec, and even if the main thread was
807 already stopped --- if debugging in non-stop mode, it's possible
808 the user had the main thread held stopped in the previous image
809 --- release it now. This is the same behavior as step-over-exec
810 with scheduler-locking on in all-stop mode. */
811 th->stop_requested = 0;
812
813 /* What is this a.out's name? */
814 printf_unfiltered (_("%s is executing new program: %s\n"),
815 target_pid_to_str (inferior_ptid),
816 execd_pathname);
817
818 /* We've followed the inferior through an exec. Therefore, the
819 inferior has essentially been killed & reborn. */
820
821 gdb_flush (gdb_stdout);
822
823 breakpoint_init_inferior (inf_execd);
824
825 if (gdb_sysroot && *gdb_sysroot)
826 {
827 char *name = alloca (strlen (gdb_sysroot)
828 + strlen (execd_pathname)
829 + 1);
830
831 strcpy (name, gdb_sysroot);
832 strcat (name, execd_pathname);
833 execd_pathname = name;
834 }
835
836 /* Reset the shared library package. This ensures that we get a
837 shlib event when the child reaches "_start", at which point the
838 dld will have had a chance to initialize the child. */
839 /* Also, loading a symbol file below may trigger symbol lookups, and
840 we don't want those to be satisfied by the libraries of the
841 previous incarnation of this process. */
842 no_shared_libraries (NULL, 0);
843
844 if (follow_exec_mode_string == follow_exec_mode_new)
845 {
846 struct program_space *pspace;
847
848 /* The user wants to keep the old inferior and program spaces
849 around. Create a new fresh one, and switch to it. */
850
851 inf = add_inferior (current_inferior ()->pid);
852 pspace = add_program_space (maybe_new_address_space ());
853 inf->pspace = pspace;
854 inf->aspace = pspace->aspace;
855
856 exit_inferior_num_silent (current_inferior ()->num);
857
858 set_current_inferior (inf);
859 set_current_program_space (pspace);
860 }
861
862 gdb_assert (current_program_space == inf->pspace);
863
864 /* That a.out is now the one to use. */
865 exec_file_attach (execd_pathname, 0);
866
867 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
868 (Position Independent Executable) main symbol file will get applied by
869 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
870 the breakpoints with the zero displacement. */
871
872 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
873 NULL, 0);
874
875 set_initial_language ();
876
877 #ifdef SOLIB_CREATE_INFERIOR_HOOK
878 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
879 #else
880 solib_create_inferior_hook (0);
881 #endif
882
883 jit_inferior_created_hook ();
884
885 breakpoint_re_set ();
886
887 /* Reinsert all breakpoints. (Those which were symbolic have
888 been reset to the proper address in the new a.out, thanks
889 to symbol_file_command...). */
890 insert_breakpoints ();
891
892 /* The next resume of this inferior should bring it to the shlib
893 startup breakpoints. (If the user had also set bp's on
894 "main" from the old (parent) process, then they'll auto-
895 matically get reset there in the new process.). */
896 }
897
898 /* Non-zero if we just simulating a single-step. This is needed
899 because we cannot remove the breakpoints in the inferior process
900 until after the `wait' in `wait_for_inferior'. */
901 static int singlestep_breakpoints_inserted_p = 0;
902
903 /* The thread we inserted single-step breakpoints for. */
904 static ptid_t singlestep_ptid;
905
906 /* PC when we started this single-step. */
907 static CORE_ADDR singlestep_pc;
908
909 /* If another thread hit the singlestep breakpoint, we save the original
910 thread here so that we can resume single-stepping it later. */
911 static ptid_t saved_singlestep_ptid;
912 static int stepping_past_singlestep_breakpoint;
913
914 /* If not equal to null_ptid, this means that after stepping over breakpoint
915 is finished, we need to switch to deferred_step_ptid, and step it.
916
917 The use case is when one thread has hit a breakpoint, and then the user
918 has switched to another thread and issued 'step'. We need to step over
919 breakpoint in the thread which hit the breakpoint, but then continue
920 stepping the thread user has selected. */
921 static ptid_t deferred_step_ptid;
922 \f
923 /* Displaced stepping. */
924
925 /* In non-stop debugging mode, we must take special care to manage
926 breakpoints properly; in particular, the traditional strategy for
927 stepping a thread past a breakpoint it has hit is unsuitable.
928 'Displaced stepping' is a tactic for stepping one thread past a
929 breakpoint it has hit while ensuring that other threads running
930 concurrently will hit the breakpoint as they should.
931
932 The traditional way to step a thread T off a breakpoint in a
933 multi-threaded program in all-stop mode is as follows:
934
935 a0) Initially, all threads are stopped, and breakpoints are not
936 inserted.
937 a1) We single-step T, leaving breakpoints uninserted.
938 a2) We insert breakpoints, and resume all threads.
939
940 In non-stop debugging, however, this strategy is unsuitable: we
941 don't want to have to stop all threads in the system in order to
942 continue or step T past a breakpoint. Instead, we use displaced
943 stepping:
944
945 n0) Initially, T is stopped, other threads are running, and
946 breakpoints are inserted.
947 n1) We copy the instruction "under" the breakpoint to a separate
948 location, outside the main code stream, making any adjustments
949 to the instruction, register, and memory state as directed by
950 T's architecture.
951 n2) We single-step T over the instruction at its new location.
952 n3) We adjust the resulting register and memory state as directed
953 by T's architecture. This includes resetting T's PC to point
954 back into the main instruction stream.
955 n4) We resume T.
956
957 This approach depends on the following gdbarch methods:
958
959 - gdbarch_max_insn_length and gdbarch_displaced_step_location
960 indicate where to copy the instruction, and how much space must
961 be reserved there. We use these in step n1.
962
963 - gdbarch_displaced_step_copy_insn copies a instruction to a new
964 address, and makes any necessary adjustments to the instruction,
965 register contents, and memory. We use this in step n1.
966
967 - gdbarch_displaced_step_fixup adjusts registers and memory after
968 we have successfuly single-stepped the instruction, to yield the
969 same effect the instruction would have had if we had executed it
970 at its original address. We use this in step n3.
971
972 - gdbarch_displaced_step_free_closure provides cleanup.
973
974 The gdbarch_displaced_step_copy_insn and
975 gdbarch_displaced_step_fixup functions must be written so that
976 copying an instruction with gdbarch_displaced_step_copy_insn,
977 single-stepping across the copied instruction, and then applying
978 gdbarch_displaced_insn_fixup should have the same effects on the
979 thread's memory and registers as stepping the instruction in place
980 would have. Exactly which responsibilities fall to the copy and
981 which fall to the fixup is up to the author of those functions.
982
983 See the comments in gdbarch.sh for details.
984
985 Note that displaced stepping and software single-step cannot
986 currently be used in combination, although with some care I think
987 they could be made to. Software single-step works by placing
988 breakpoints on all possible subsequent instructions; if the
989 displaced instruction is a PC-relative jump, those breakpoints
990 could fall in very strange places --- on pages that aren't
991 executable, or at addresses that are not proper instruction
992 boundaries. (We do generally let other threads run while we wait
993 to hit the software single-step breakpoint, and they might
994 encounter such a corrupted instruction.) One way to work around
995 this would be to have gdbarch_displaced_step_copy_insn fully
996 simulate the effect of PC-relative instructions (and return NULL)
997 on architectures that use software single-stepping.
998
999 In non-stop mode, we can have independent and simultaneous step
1000 requests, so more than one thread may need to simultaneously step
1001 over a breakpoint. The current implementation assumes there is
1002 only one scratch space per process. In this case, we have to
1003 serialize access to the scratch space. If thread A wants to step
1004 over a breakpoint, but we are currently waiting for some other
1005 thread to complete a displaced step, we leave thread A stopped and
1006 place it in the displaced_step_request_queue. Whenever a displaced
1007 step finishes, we pick the next thread in the queue and start a new
1008 displaced step operation on it. See displaced_step_prepare and
1009 displaced_step_fixup for details. */
1010
1011 struct displaced_step_request
1012 {
1013 ptid_t ptid;
1014 struct displaced_step_request *next;
1015 };
1016
1017 /* Per-inferior displaced stepping state. */
1018 struct displaced_step_inferior_state
1019 {
1020 /* Pointer to next in linked list. */
1021 struct displaced_step_inferior_state *next;
1022
1023 /* The process this displaced step state refers to. */
1024 int pid;
1025
1026 /* A queue of pending displaced stepping requests. One entry per
1027 thread that needs to do a displaced step. */
1028 struct displaced_step_request *step_request_queue;
1029
1030 /* If this is not null_ptid, this is the thread carrying out a
1031 displaced single-step in process PID. This thread's state will
1032 require fixing up once it has completed its step. */
1033 ptid_t step_ptid;
1034
1035 /* The architecture the thread had when we stepped it. */
1036 struct gdbarch *step_gdbarch;
1037
1038 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1039 for post-step cleanup. */
1040 struct displaced_step_closure *step_closure;
1041
1042 /* The address of the original instruction, and the copy we
1043 made. */
1044 CORE_ADDR step_original, step_copy;
1045
1046 /* Saved contents of copy area. */
1047 gdb_byte *step_saved_copy;
1048 };
1049
1050 /* The list of states of processes involved in displaced stepping
1051 presently. */
1052 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1053
1054 /* Get the displaced stepping state of process PID. */
1055
1056 static struct displaced_step_inferior_state *
1057 get_displaced_stepping_state (int pid)
1058 {
1059 struct displaced_step_inferior_state *state;
1060
1061 for (state = displaced_step_inferior_states;
1062 state != NULL;
1063 state = state->next)
1064 if (state->pid == pid)
1065 return state;
1066
1067 return NULL;
1068 }
1069
1070 /* Add a new displaced stepping state for process PID to the displaced
1071 stepping state list, or return a pointer to an already existing
1072 entry, if it already exists. Never returns NULL. */
1073
1074 static struct displaced_step_inferior_state *
1075 add_displaced_stepping_state (int pid)
1076 {
1077 struct displaced_step_inferior_state *state;
1078
1079 for (state = displaced_step_inferior_states;
1080 state != NULL;
1081 state = state->next)
1082 if (state->pid == pid)
1083 return state;
1084
1085 state = xcalloc (1, sizeof (*state));
1086 state->pid = pid;
1087 state->next = displaced_step_inferior_states;
1088 displaced_step_inferior_states = state;
1089
1090 return state;
1091 }
1092
1093 /* If inferior is in displaced stepping, and ADDR equals to starting address
1094 of copy area, return corresponding displaced_step_closure. Otherwise,
1095 return NULL. */
1096
1097 struct displaced_step_closure*
1098 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1099 {
1100 struct displaced_step_inferior_state *displaced
1101 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1102
1103 /* If checking the mode of displaced instruction in copy area. */
1104 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1105 && (displaced->step_copy == addr))
1106 return displaced->step_closure;
1107
1108 return NULL;
1109 }
1110
1111 /* Remove the displaced stepping state of process PID. */
1112
1113 static void
1114 remove_displaced_stepping_state (int pid)
1115 {
1116 struct displaced_step_inferior_state *it, **prev_next_p;
1117
1118 gdb_assert (pid != 0);
1119
1120 it = displaced_step_inferior_states;
1121 prev_next_p = &displaced_step_inferior_states;
1122 while (it)
1123 {
1124 if (it->pid == pid)
1125 {
1126 *prev_next_p = it->next;
1127 xfree (it);
1128 return;
1129 }
1130
1131 prev_next_p = &it->next;
1132 it = *prev_next_p;
1133 }
1134 }
1135
1136 static void
1137 infrun_inferior_exit (struct inferior *inf)
1138 {
1139 remove_displaced_stepping_state (inf->pid);
1140 }
1141
1142 /* Enum strings for "set|show displaced-stepping". */
1143
1144 static const char can_use_displaced_stepping_auto[] = "auto";
1145 static const char can_use_displaced_stepping_on[] = "on";
1146 static const char can_use_displaced_stepping_off[] = "off";
1147 static const char *can_use_displaced_stepping_enum[] =
1148 {
1149 can_use_displaced_stepping_auto,
1150 can_use_displaced_stepping_on,
1151 can_use_displaced_stepping_off,
1152 NULL,
1153 };
1154
1155 /* If ON, and the architecture supports it, GDB will use displaced
1156 stepping to step over breakpoints. If OFF, or if the architecture
1157 doesn't support it, GDB will instead use the traditional
1158 hold-and-step approach. If AUTO (which is the default), GDB will
1159 decide which technique to use to step over breakpoints depending on
1160 which of all-stop or non-stop mode is active --- displaced stepping
1161 in non-stop mode; hold-and-step in all-stop mode. */
1162
1163 static const char *can_use_displaced_stepping =
1164 can_use_displaced_stepping_auto;
1165
1166 static void
1167 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1168 struct cmd_list_element *c,
1169 const char *value)
1170 {
1171 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1172 fprintf_filtered (file,
1173 _("Debugger's willingness to use displaced stepping "
1174 "to step over breakpoints is %s (currently %s).\n"),
1175 value, non_stop ? "on" : "off");
1176 else
1177 fprintf_filtered (file,
1178 _("Debugger's willingness to use displaced stepping "
1179 "to step over breakpoints is %s.\n"), value);
1180 }
1181
1182 /* Return non-zero if displaced stepping can/should be used to step
1183 over breakpoints. */
1184
1185 static int
1186 use_displaced_stepping (struct gdbarch *gdbarch)
1187 {
1188 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1189 && non_stop)
1190 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1191 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1192 && !RECORD_IS_USED);
1193 }
1194
1195 /* Clean out any stray displaced stepping state. */
1196 static void
1197 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1198 {
1199 /* Indicate that there is no cleanup pending. */
1200 displaced->step_ptid = null_ptid;
1201
1202 if (displaced->step_closure)
1203 {
1204 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1205 displaced->step_closure);
1206 displaced->step_closure = NULL;
1207 }
1208 }
1209
1210 static void
1211 displaced_step_clear_cleanup (void *arg)
1212 {
1213 struct displaced_step_inferior_state *state = arg;
1214
1215 displaced_step_clear (state);
1216 }
1217
1218 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1219 void
1220 displaced_step_dump_bytes (struct ui_file *file,
1221 const gdb_byte *buf,
1222 size_t len)
1223 {
1224 int i;
1225
1226 for (i = 0; i < len; i++)
1227 fprintf_unfiltered (file, "%02x ", buf[i]);
1228 fputs_unfiltered ("\n", file);
1229 }
1230
1231 /* Prepare to single-step, using displaced stepping.
1232
1233 Note that we cannot use displaced stepping when we have a signal to
1234 deliver. If we have a signal to deliver and an instruction to step
1235 over, then after the step, there will be no indication from the
1236 target whether the thread entered a signal handler or ignored the
1237 signal and stepped over the instruction successfully --- both cases
1238 result in a simple SIGTRAP. In the first case we mustn't do a
1239 fixup, and in the second case we must --- but we can't tell which.
1240 Comments in the code for 'random signals' in handle_inferior_event
1241 explain how we handle this case instead.
1242
1243 Returns 1 if preparing was successful -- this thread is going to be
1244 stepped now; or 0 if displaced stepping this thread got queued. */
1245 static int
1246 displaced_step_prepare (ptid_t ptid)
1247 {
1248 struct cleanup *old_cleanups, *ignore_cleanups;
1249 struct regcache *regcache = get_thread_regcache (ptid);
1250 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1251 CORE_ADDR original, copy;
1252 ULONGEST len;
1253 struct displaced_step_closure *closure;
1254 struct displaced_step_inferior_state *displaced;
1255
1256 /* We should never reach this function if the architecture does not
1257 support displaced stepping. */
1258 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1259
1260 /* We have to displaced step one thread at a time, as we only have
1261 access to a single scratch space per inferior. */
1262
1263 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1264
1265 if (!ptid_equal (displaced->step_ptid, null_ptid))
1266 {
1267 /* Already waiting for a displaced step to finish. Defer this
1268 request and place in queue. */
1269 struct displaced_step_request *req, *new_req;
1270
1271 if (debug_displaced)
1272 fprintf_unfiltered (gdb_stdlog,
1273 "displaced: defering step of %s\n",
1274 target_pid_to_str (ptid));
1275
1276 new_req = xmalloc (sizeof (*new_req));
1277 new_req->ptid = ptid;
1278 new_req->next = NULL;
1279
1280 if (displaced->step_request_queue)
1281 {
1282 for (req = displaced->step_request_queue;
1283 req && req->next;
1284 req = req->next)
1285 ;
1286 req->next = new_req;
1287 }
1288 else
1289 displaced->step_request_queue = new_req;
1290
1291 return 0;
1292 }
1293 else
1294 {
1295 if (debug_displaced)
1296 fprintf_unfiltered (gdb_stdlog,
1297 "displaced: stepping %s now\n",
1298 target_pid_to_str (ptid));
1299 }
1300
1301 displaced_step_clear (displaced);
1302
1303 old_cleanups = save_inferior_ptid ();
1304 inferior_ptid = ptid;
1305
1306 original = regcache_read_pc (regcache);
1307
1308 copy = gdbarch_displaced_step_location (gdbarch);
1309 len = gdbarch_max_insn_length (gdbarch);
1310
1311 /* Save the original contents of the copy area. */
1312 displaced->step_saved_copy = xmalloc (len);
1313 ignore_cleanups = make_cleanup (free_current_contents,
1314 &displaced->step_saved_copy);
1315 read_memory (copy, displaced->step_saved_copy, len);
1316 if (debug_displaced)
1317 {
1318 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1319 paddress (gdbarch, copy));
1320 displaced_step_dump_bytes (gdb_stdlog,
1321 displaced->step_saved_copy,
1322 len);
1323 };
1324
1325 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1326 original, copy, regcache);
1327
1328 /* We don't support the fully-simulated case at present. */
1329 gdb_assert (closure);
1330
1331 /* Save the information we need to fix things up if the step
1332 succeeds. */
1333 displaced->step_ptid = ptid;
1334 displaced->step_gdbarch = gdbarch;
1335 displaced->step_closure = closure;
1336 displaced->step_original = original;
1337 displaced->step_copy = copy;
1338
1339 make_cleanup (displaced_step_clear_cleanup, displaced);
1340
1341 /* Resume execution at the copy. */
1342 regcache_write_pc (regcache, copy);
1343
1344 discard_cleanups (ignore_cleanups);
1345
1346 do_cleanups (old_cleanups);
1347
1348 if (debug_displaced)
1349 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1350 paddress (gdbarch, copy));
1351
1352 return 1;
1353 }
1354
1355 static void
1356 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1357 const gdb_byte *myaddr, int len)
1358 {
1359 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1360
1361 inferior_ptid = ptid;
1362 write_memory (memaddr, myaddr, len);
1363 do_cleanups (ptid_cleanup);
1364 }
1365
1366 static void
1367 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1368 {
1369 struct cleanup *old_cleanups;
1370 struct displaced_step_inferior_state *displaced
1371 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1372
1373 /* Was any thread of this process doing a displaced step? */
1374 if (displaced == NULL)
1375 return;
1376
1377 /* Was this event for the pid we displaced? */
1378 if (ptid_equal (displaced->step_ptid, null_ptid)
1379 || ! ptid_equal (displaced->step_ptid, event_ptid))
1380 return;
1381
1382 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1383
1384 /* Restore the contents of the copy area. */
1385 {
1386 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1387
1388 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1389 displaced->step_saved_copy, len);
1390 if (debug_displaced)
1391 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1392 paddress (displaced->step_gdbarch,
1393 displaced->step_copy));
1394 }
1395
1396 /* Did the instruction complete successfully? */
1397 if (signal == TARGET_SIGNAL_TRAP)
1398 {
1399 /* Fix up the resulting state. */
1400 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1401 displaced->step_closure,
1402 displaced->step_original,
1403 displaced->step_copy,
1404 get_thread_regcache (displaced->step_ptid));
1405 }
1406 else
1407 {
1408 /* Since the instruction didn't complete, all we can do is
1409 relocate the PC. */
1410 struct regcache *regcache = get_thread_regcache (event_ptid);
1411 CORE_ADDR pc = regcache_read_pc (regcache);
1412
1413 pc = displaced->step_original + (pc - displaced->step_copy);
1414 regcache_write_pc (regcache, pc);
1415 }
1416
1417 do_cleanups (old_cleanups);
1418
1419 displaced->step_ptid = null_ptid;
1420
1421 /* Are there any pending displaced stepping requests? If so, run
1422 one now. Leave the state object around, since we're likely to
1423 need it again soon. */
1424 while (displaced->step_request_queue)
1425 {
1426 struct displaced_step_request *head;
1427 ptid_t ptid;
1428 struct regcache *regcache;
1429 struct gdbarch *gdbarch;
1430 CORE_ADDR actual_pc;
1431 struct address_space *aspace;
1432
1433 head = displaced->step_request_queue;
1434 ptid = head->ptid;
1435 displaced->step_request_queue = head->next;
1436 xfree (head);
1437
1438 context_switch (ptid);
1439
1440 regcache = get_thread_regcache (ptid);
1441 actual_pc = regcache_read_pc (regcache);
1442 aspace = get_regcache_aspace (regcache);
1443
1444 if (breakpoint_here_p (aspace, actual_pc))
1445 {
1446 if (debug_displaced)
1447 fprintf_unfiltered (gdb_stdlog,
1448 "displaced: stepping queued %s now\n",
1449 target_pid_to_str (ptid));
1450
1451 displaced_step_prepare (ptid);
1452
1453 gdbarch = get_regcache_arch (regcache);
1454
1455 if (debug_displaced)
1456 {
1457 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1458 gdb_byte buf[4];
1459
1460 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1461 paddress (gdbarch, actual_pc));
1462 read_memory (actual_pc, buf, sizeof (buf));
1463 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1464 }
1465
1466 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1467 displaced->step_closure))
1468 target_resume (ptid, 1, TARGET_SIGNAL_0);
1469 else
1470 target_resume (ptid, 0, TARGET_SIGNAL_0);
1471
1472 /* Done, we're stepping a thread. */
1473 break;
1474 }
1475 else
1476 {
1477 int step;
1478 struct thread_info *tp = inferior_thread ();
1479
1480 /* The breakpoint we were sitting under has since been
1481 removed. */
1482 tp->control.trap_expected = 0;
1483
1484 /* Go back to what we were trying to do. */
1485 step = currently_stepping (tp);
1486
1487 if (debug_displaced)
1488 fprintf_unfiltered (gdb_stdlog,
1489 "breakpoint is gone %s: step(%d)\n",
1490 target_pid_to_str (tp->ptid), step);
1491
1492 target_resume (ptid, step, TARGET_SIGNAL_0);
1493 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1494
1495 /* This request was discarded. See if there's any other
1496 thread waiting for its turn. */
1497 }
1498 }
1499 }
1500
1501 /* Update global variables holding ptids to hold NEW_PTID if they were
1502 holding OLD_PTID. */
1503 static void
1504 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1505 {
1506 struct displaced_step_request *it;
1507 struct displaced_step_inferior_state *displaced;
1508
1509 if (ptid_equal (inferior_ptid, old_ptid))
1510 inferior_ptid = new_ptid;
1511
1512 if (ptid_equal (singlestep_ptid, old_ptid))
1513 singlestep_ptid = new_ptid;
1514
1515 if (ptid_equal (deferred_step_ptid, old_ptid))
1516 deferred_step_ptid = new_ptid;
1517
1518 for (displaced = displaced_step_inferior_states;
1519 displaced;
1520 displaced = displaced->next)
1521 {
1522 if (ptid_equal (displaced->step_ptid, old_ptid))
1523 displaced->step_ptid = new_ptid;
1524
1525 for (it = displaced->step_request_queue; it; it = it->next)
1526 if (ptid_equal (it->ptid, old_ptid))
1527 it->ptid = new_ptid;
1528 }
1529 }
1530
1531 \f
1532 /* Resuming. */
1533
1534 /* Things to clean up if we QUIT out of resume (). */
1535 static void
1536 resume_cleanups (void *ignore)
1537 {
1538 normal_stop ();
1539 }
1540
1541 static const char schedlock_off[] = "off";
1542 static const char schedlock_on[] = "on";
1543 static const char schedlock_step[] = "step";
1544 static const char *scheduler_enums[] = {
1545 schedlock_off,
1546 schedlock_on,
1547 schedlock_step,
1548 NULL
1549 };
1550 static const char *scheduler_mode = schedlock_off;
1551 static void
1552 show_scheduler_mode (struct ui_file *file, int from_tty,
1553 struct cmd_list_element *c, const char *value)
1554 {
1555 fprintf_filtered (file,
1556 _("Mode for locking scheduler "
1557 "during execution is \"%s\".\n"),
1558 value);
1559 }
1560
1561 static void
1562 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1563 {
1564 if (!target_can_lock_scheduler)
1565 {
1566 scheduler_mode = schedlock_off;
1567 error (_("Target '%s' cannot support this command."), target_shortname);
1568 }
1569 }
1570
1571 /* True if execution commands resume all threads of all processes by
1572 default; otherwise, resume only threads of the current inferior
1573 process. */
1574 int sched_multi = 0;
1575
1576 /* Try to setup for software single stepping over the specified location.
1577 Return 1 if target_resume() should use hardware single step.
1578
1579 GDBARCH the current gdbarch.
1580 PC the location to step over. */
1581
1582 static int
1583 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1584 {
1585 int hw_step = 1;
1586
1587 if (execution_direction == EXEC_FORWARD
1588 && gdbarch_software_single_step_p (gdbarch)
1589 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1590 {
1591 hw_step = 0;
1592 /* Do not pull these breakpoints until after a `wait' in
1593 `wait_for_inferior'. */
1594 singlestep_breakpoints_inserted_p = 1;
1595 singlestep_ptid = inferior_ptid;
1596 singlestep_pc = pc;
1597 }
1598 return hw_step;
1599 }
1600
1601 /* Return a ptid representing the set of threads that we will proceed,
1602 in the perspective of the user/frontend. We may actually resume
1603 fewer threads at first, e.g., if a thread is stopped at a
1604 breakpoint that needs stepping-off, but that should not be visible
1605 to the user/frontend, and neither should the frontend/user be
1606 allowed to proceed any of the threads that happen to be stopped for
1607 internal run control handling, if a previous command wanted them
1608 resumed. */
1609
1610 ptid_t
1611 user_visible_resume_ptid (int step)
1612 {
1613 /* By default, resume all threads of all processes. */
1614 ptid_t resume_ptid = RESUME_ALL;
1615
1616 /* Maybe resume only all threads of the current process. */
1617 if (!sched_multi && target_supports_multi_process ())
1618 {
1619 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1620 }
1621
1622 /* Maybe resume a single thread after all. */
1623 if (non_stop)
1624 {
1625 /* With non-stop mode on, threads are always handled
1626 individually. */
1627 resume_ptid = inferior_ptid;
1628 }
1629 else if ((scheduler_mode == schedlock_on)
1630 || (scheduler_mode == schedlock_step
1631 && (step || singlestep_breakpoints_inserted_p)))
1632 {
1633 /* User-settable 'scheduler' mode requires solo thread resume. */
1634 resume_ptid = inferior_ptid;
1635 }
1636
1637 return resume_ptid;
1638 }
1639
1640 /* Resume the inferior, but allow a QUIT. This is useful if the user
1641 wants to interrupt some lengthy single-stepping operation
1642 (for child processes, the SIGINT goes to the inferior, and so
1643 we get a SIGINT random_signal, but for remote debugging and perhaps
1644 other targets, that's not true).
1645
1646 STEP nonzero if we should step (zero to continue instead).
1647 SIG is the signal to give the inferior (zero for none). */
1648 void
1649 resume (int step, enum target_signal sig)
1650 {
1651 int should_resume = 1;
1652 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1653 struct regcache *regcache = get_current_regcache ();
1654 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1655 struct thread_info *tp = inferior_thread ();
1656 CORE_ADDR pc = regcache_read_pc (regcache);
1657 struct address_space *aspace = get_regcache_aspace (regcache);
1658
1659 QUIT;
1660
1661 if (current_inferior ()->waiting_for_vfork_done)
1662 {
1663 /* Don't try to single-step a vfork parent that is waiting for
1664 the child to get out of the shared memory region (by exec'ing
1665 or exiting). This is particularly important on software
1666 single-step archs, as the child process would trip on the
1667 software single step breakpoint inserted for the parent
1668 process. Since the parent will not actually execute any
1669 instruction until the child is out of the shared region (such
1670 are vfork's semantics), it is safe to simply continue it.
1671 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1672 the parent, and tell it to `keep_going', which automatically
1673 re-sets it stepping. */
1674 if (debug_infrun)
1675 fprintf_unfiltered (gdb_stdlog,
1676 "infrun: resume : clear step\n");
1677 step = 0;
1678 }
1679
1680 if (debug_infrun)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "infrun: resume (step=%d, signal=%d), "
1683 "trap_expected=%d, current thread [%s] at %s\n",
1684 step, sig, tp->control.trap_expected,
1685 target_pid_to_str (inferior_ptid),
1686 paddress (gdbarch, pc));
1687
1688 /* Normally, by the time we reach `resume', the breakpoints are either
1689 removed or inserted, as appropriate. The exception is if we're sitting
1690 at a permanent breakpoint; we need to step over it, but permanent
1691 breakpoints can't be removed. So we have to test for it here. */
1692 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1693 {
1694 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1695 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1696 else
1697 error (_("\
1698 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1699 how to step past a permanent breakpoint on this architecture. Try using\n\
1700 a command like `return' or `jump' to continue execution."));
1701 }
1702
1703 /* If enabled, step over breakpoints by executing a copy of the
1704 instruction at a different address.
1705
1706 We can't use displaced stepping when we have a signal to deliver;
1707 the comments for displaced_step_prepare explain why. The
1708 comments in the handle_inferior event for dealing with 'random
1709 signals' explain what we do instead.
1710
1711 We can't use displaced stepping when we are waiting for vfork_done
1712 event, displaced stepping breaks the vfork child similarly as single
1713 step software breakpoint. */
1714 if (use_displaced_stepping (gdbarch)
1715 && (tp->control.trap_expected
1716 || (step && gdbarch_software_single_step_p (gdbarch)))
1717 && sig == TARGET_SIGNAL_0
1718 && !current_inferior ()->waiting_for_vfork_done)
1719 {
1720 struct displaced_step_inferior_state *displaced;
1721
1722 if (!displaced_step_prepare (inferior_ptid))
1723 {
1724 /* Got placed in displaced stepping queue. Will be resumed
1725 later when all the currently queued displaced stepping
1726 requests finish. The thread is not executing at this point,
1727 and the call to set_executing will be made later. But we
1728 need to call set_running here, since from frontend point of view,
1729 the thread is running. */
1730 set_running (inferior_ptid, 1);
1731 discard_cleanups (old_cleanups);
1732 return;
1733 }
1734
1735 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1736 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1737 displaced->step_closure);
1738 }
1739
1740 /* Do we need to do it the hard way, w/temp breakpoints? */
1741 else if (step)
1742 step = maybe_software_singlestep (gdbarch, pc);
1743
1744 /* Currently, our software single-step implementation leads to different
1745 results than hardware single-stepping in one situation: when stepping
1746 into delivering a signal which has an associated signal handler,
1747 hardware single-step will stop at the first instruction of the handler,
1748 while software single-step will simply skip execution of the handler.
1749
1750 For now, this difference in behavior is accepted since there is no
1751 easy way to actually implement single-stepping into a signal handler
1752 without kernel support.
1753
1754 However, there is one scenario where this difference leads to follow-on
1755 problems: if we're stepping off a breakpoint by removing all breakpoints
1756 and then single-stepping. In this case, the software single-step
1757 behavior means that even if there is a *breakpoint* in the signal
1758 handler, GDB still would not stop.
1759
1760 Fortunately, we can at least fix this particular issue. We detect
1761 here the case where we are about to deliver a signal while software
1762 single-stepping with breakpoints removed. In this situation, we
1763 revert the decisions to remove all breakpoints and insert single-
1764 step breakpoints, and instead we install a step-resume breakpoint
1765 at the current address, deliver the signal without stepping, and
1766 once we arrive back at the step-resume breakpoint, actually step
1767 over the breakpoint we originally wanted to step over. */
1768 if (singlestep_breakpoints_inserted_p
1769 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1770 {
1771 /* If we have nested signals or a pending signal is delivered
1772 immediately after a handler returns, might might already have
1773 a step-resume breakpoint set on the earlier handler. We cannot
1774 set another step-resume breakpoint; just continue on until the
1775 original breakpoint is hit. */
1776 if (tp->control.step_resume_breakpoint == NULL)
1777 {
1778 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1779 tp->step_after_step_resume_breakpoint = 1;
1780 }
1781
1782 remove_single_step_breakpoints ();
1783 singlestep_breakpoints_inserted_p = 0;
1784
1785 insert_breakpoints ();
1786 tp->control.trap_expected = 0;
1787 }
1788
1789 if (should_resume)
1790 {
1791 ptid_t resume_ptid;
1792
1793 /* If STEP is set, it's a request to use hardware stepping
1794 facilities. But in that case, we should never
1795 use singlestep breakpoint. */
1796 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1797
1798 /* Decide the set of threads to ask the target to resume. Start
1799 by assuming everything will be resumed, than narrow the set
1800 by applying increasingly restricting conditions. */
1801 resume_ptid = user_visible_resume_ptid (step);
1802
1803 /* Maybe resume a single thread after all. */
1804 if (singlestep_breakpoints_inserted_p
1805 && stepping_past_singlestep_breakpoint)
1806 {
1807 /* The situation here is as follows. In thread T1 we wanted to
1808 single-step. Lacking hardware single-stepping we've
1809 set breakpoint at the PC of the next instruction -- call it
1810 P. After resuming, we've hit that breakpoint in thread T2.
1811 Now we've removed original breakpoint, inserted breakpoint
1812 at P+1, and try to step to advance T2 past breakpoint.
1813 We need to step only T2, as if T1 is allowed to freely run,
1814 it can run past P, and if other threads are allowed to run,
1815 they can hit breakpoint at P+1, and nested hits of single-step
1816 breakpoints is not something we'd want -- that's complicated
1817 to support, and has no value. */
1818 resume_ptid = inferior_ptid;
1819 }
1820 else if ((step || singlestep_breakpoints_inserted_p)
1821 && tp->control.trap_expected)
1822 {
1823 /* We're allowing a thread to run past a breakpoint it has
1824 hit, by single-stepping the thread with the breakpoint
1825 removed. In which case, we need to single-step only this
1826 thread, and keep others stopped, as they can miss this
1827 breakpoint if allowed to run.
1828
1829 The current code actually removes all breakpoints when
1830 doing this, not just the one being stepped over, so if we
1831 let other threads run, we can actually miss any
1832 breakpoint, not just the one at PC. */
1833 resume_ptid = inferior_ptid;
1834 }
1835
1836 if (gdbarch_cannot_step_breakpoint (gdbarch))
1837 {
1838 /* Most targets can step a breakpoint instruction, thus
1839 executing it normally. But if this one cannot, just
1840 continue and we will hit it anyway. */
1841 if (step && breakpoint_inserted_here_p (aspace, pc))
1842 step = 0;
1843 }
1844
1845 if (debug_displaced
1846 && use_displaced_stepping (gdbarch)
1847 && tp->control.trap_expected)
1848 {
1849 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1850 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1851 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1852 gdb_byte buf[4];
1853
1854 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1855 paddress (resume_gdbarch, actual_pc));
1856 read_memory (actual_pc, buf, sizeof (buf));
1857 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1858 }
1859
1860 /* Install inferior's terminal modes. */
1861 target_terminal_inferior ();
1862
1863 /* Avoid confusing the next resume, if the next stop/resume
1864 happens to apply to another thread. */
1865 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1866
1867 /* Advise target which signals may be handled silently. If we have
1868 removed breakpoints because we are stepping over one (which can
1869 happen only if we are not using displaced stepping), we need to
1870 receive all signals to avoid accidentally skipping a breakpoint
1871 during execution of a signal handler. */
1872 if ((step || singlestep_breakpoints_inserted_p)
1873 && tp->control.trap_expected
1874 && !use_displaced_stepping (gdbarch))
1875 target_pass_signals (0, NULL);
1876 else
1877 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1878
1879 target_resume (resume_ptid, step, sig);
1880 }
1881
1882 discard_cleanups (old_cleanups);
1883 }
1884 \f
1885 /* Proceeding. */
1886
1887 /* Clear out all variables saying what to do when inferior is continued.
1888 First do this, then set the ones you want, then call `proceed'. */
1889
1890 static void
1891 clear_proceed_status_thread (struct thread_info *tp)
1892 {
1893 if (debug_infrun)
1894 fprintf_unfiltered (gdb_stdlog,
1895 "infrun: clear_proceed_status_thread (%s)\n",
1896 target_pid_to_str (tp->ptid));
1897
1898 tp->control.trap_expected = 0;
1899 tp->control.step_range_start = 0;
1900 tp->control.step_range_end = 0;
1901 tp->control.step_frame_id = null_frame_id;
1902 tp->control.step_stack_frame_id = null_frame_id;
1903 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1904 tp->stop_requested = 0;
1905
1906 tp->control.stop_step = 0;
1907
1908 tp->control.proceed_to_finish = 0;
1909
1910 /* Discard any remaining commands or status from previous stop. */
1911 bpstat_clear (&tp->control.stop_bpstat);
1912 }
1913
1914 static int
1915 clear_proceed_status_callback (struct thread_info *tp, void *data)
1916 {
1917 if (is_exited (tp->ptid))
1918 return 0;
1919
1920 clear_proceed_status_thread (tp);
1921 return 0;
1922 }
1923
1924 void
1925 clear_proceed_status (void)
1926 {
1927 if (!non_stop)
1928 {
1929 /* In all-stop mode, delete the per-thread status of all
1930 threads, even if inferior_ptid is null_ptid, there may be
1931 threads on the list. E.g., we may be launching a new
1932 process, while selecting the executable. */
1933 iterate_over_threads (clear_proceed_status_callback, NULL);
1934 }
1935
1936 if (!ptid_equal (inferior_ptid, null_ptid))
1937 {
1938 struct inferior *inferior;
1939
1940 if (non_stop)
1941 {
1942 /* If in non-stop mode, only delete the per-thread status of
1943 the current thread. */
1944 clear_proceed_status_thread (inferior_thread ());
1945 }
1946
1947 inferior = current_inferior ();
1948 inferior->control.stop_soon = NO_STOP_QUIETLY;
1949 }
1950
1951 stop_after_trap = 0;
1952
1953 observer_notify_about_to_proceed ();
1954
1955 if (stop_registers)
1956 {
1957 regcache_xfree (stop_registers);
1958 stop_registers = NULL;
1959 }
1960 }
1961
1962 /* Check the current thread against the thread that reported the most recent
1963 event. If a step-over is required return TRUE and set the current thread
1964 to the old thread. Otherwise return FALSE.
1965
1966 This should be suitable for any targets that support threads. */
1967
1968 static int
1969 prepare_to_proceed (int step)
1970 {
1971 ptid_t wait_ptid;
1972 struct target_waitstatus wait_status;
1973 int schedlock_enabled;
1974
1975 /* With non-stop mode on, threads are always handled individually. */
1976 gdb_assert (! non_stop);
1977
1978 /* Get the last target status returned by target_wait(). */
1979 get_last_target_status (&wait_ptid, &wait_status);
1980
1981 /* Make sure we were stopped at a breakpoint. */
1982 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1983 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1984 && wait_status.value.sig != TARGET_SIGNAL_ILL
1985 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1986 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1987 {
1988 return 0;
1989 }
1990
1991 schedlock_enabled = (scheduler_mode == schedlock_on
1992 || (scheduler_mode == schedlock_step
1993 && step));
1994
1995 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1996 if (schedlock_enabled)
1997 return 0;
1998
1999 /* Don't switch over if we're about to resume some other process
2000 other than WAIT_PTID's, and schedule-multiple is off. */
2001 if (!sched_multi
2002 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2003 return 0;
2004
2005 /* Switched over from WAIT_PID. */
2006 if (!ptid_equal (wait_ptid, minus_one_ptid)
2007 && !ptid_equal (inferior_ptid, wait_ptid))
2008 {
2009 struct regcache *regcache = get_thread_regcache (wait_ptid);
2010
2011 if (breakpoint_here_p (get_regcache_aspace (regcache),
2012 regcache_read_pc (regcache)))
2013 {
2014 /* If stepping, remember current thread to switch back to. */
2015 if (step)
2016 deferred_step_ptid = inferior_ptid;
2017
2018 /* Switch back to WAIT_PID thread. */
2019 switch_to_thread (wait_ptid);
2020
2021 if (debug_infrun)
2022 fprintf_unfiltered (gdb_stdlog,
2023 "infrun: prepare_to_proceed (step=%d), "
2024 "switched to [%s]\n",
2025 step, target_pid_to_str (inferior_ptid));
2026
2027 /* We return 1 to indicate that there is a breakpoint here,
2028 so we need to step over it before continuing to avoid
2029 hitting it straight away. */
2030 return 1;
2031 }
2032 }
2033
2034 return 0;
2035 }
2036
2037 /* Basic routine for continuing the program in various fashions.
2038
2039 ADDR is the address to resume at, or -1 for resume where stopped.
2040 SIGGNAL is the signal to give it, or 0 for none,
2041 or -1 for act according to how it stopped.
2042 STEP is nonzero if should trap after one instruction.
2043 -1 means return after that and print nothing.
2044 You should probably set various step_... variables
2045 before calling here, if you are stepping.
2046
2047 You should call clear_proceed_status before calling proceed. */
2048
2049 void
2050 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2051 {
2052 struct regcache *regcache;
2053 struct gdbarch *gdbarch;
2054 struct thread_info *tp;
2055 CORE_ADDR pc;
2056 struct address_space *aspace;
2057 int oneproc = 0;
2058
2059 /* If we're stopped at a fork/vfork, follow the branch set by the
2060 "set follow-fork-mode" command; otherwise, we'll just proceed
2061 resuming the current thread. */
2062 if (!follow_fork ())
2063 {
2064 /* The target for some reason decided not to resume. */
2065 normal_stop ();
2066 if (target_can_async_p ())
2067 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2068 return;
2069 }
2070
2071 /* We'll update this if & when we switch to a new thread. */
2072 previous_inferior_ptid = inferior_ptid;
2073
2074 regcache = get_current_regcache ();
2075 gdbarch = get_regcache_arch (regcache);
2076 aspace = get_regcache_aspace (regcache);
2077 pc = regcache_read_pc (regcache);
2078
2079 if (step > 0)
2080 step_start_function = find_pc_function (pc);
2081 if (step < 0)
2082 stop_after_trap = 1;
2083
2084 if (addr == (CORE_ADDR) -1)
2085 {
2086 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2087 && execution_direction != EXEC_REVERSE)
2088 /* There is a breakpoint at the address we will resume at,
2089 step one instruction before inserting breakpoints so that
2090 we do not stop right away (and report a second hit at this
2091 breakpoint).
2092
2093 Note, we don't do this in reverse, because we won't
2094 actually be executing the breakpoint insn anyway.
2095 We'll be (un-)executing the previous instruction. */
2096
2097 oneproc = 1;
2098 else if (gdbarch_single_step_through_delay_p (gdbarch)
2099 && gdbarch_single_step_through_delay (gdbarch,
2100 get_current_frame ()))
2101 /* We stepped onto an instruction that needs to be stepped
2102 again before re-inserting the breakpoint, do so. */
2103 oneproc = 1;
2104 }
2105 else
2106 {
2107 regcache_write_pc (regcache, addr);
2108 }
2109
2110 if (debug_infrun)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2113 paddress (gdbarch, addr), siggnal, step);
2114
2115 if (non_stop)
2116 /* In non-stop, each thread is handled individually. The context
2117 must already be set to the right thread here. */
2118 ;
2119 else
2120 {
2121 /* In a multi-threaded task we may select another thread and
2122 then continue or step.
2123
2124 But if the old thread was stopped at a breakpoint, it will
2125 immediately cause another breakpoint stop without any
2126 execution (i.e. it will report a breakpoint hit incorrectly).
2127 So we must step over it first.
2128
2129 prepare_to_proceed checks the current thread against the
2130 thread that reported the most recent event. If a step-over
2131 is required it returns TRUE and sets the current thread to
2132 the old thread. */
2133 if (prepare_to_proceed (step))
2134 oneproc = 1;
2135 }
2136
2137 /* prepare_to_proceed may change the current thread. */
2138 tp = inferior_thread ();
2139
2140 if (oneproc)
2141 {
2142 tp->control.trap_expected = 1;
2143 /* If displaced stepping is enabled, we can step over the
2144 breakpoint without hitting it, so leave all breakpoints
2145 inserted. Otherwise we need to disable all breakpoints, step
2146 one instruction, and then re-add them when that step is
2147 finished. */
2148 if (!use_displaced_stepping (gdbarch))
2149 remove_breakpoints ();
2150 }
2151
2152 /* We can insert breakpoints if we're not trying to step over one,
2153 or if we are stepping over one but we're using displaced stepping
2154 to do so. */
2155 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2156 insert_breakpoints ();
2157
2158 if (!non_stop)
2159 {
2160 /* Pass the last stop signal to the thread we're resuming,
2161 irrespective of whether the current thread is the thread that
2162 got the last event or not. This was historically GDB's
2163 behaviour before keeping a stop_signal per thread. */
2164
2165 struct thread_info *last_thread;
2166 ptid_t last_ptid;
2167 struct target_waitstatus last_status;
2168
2169 get_last_target_status (&last_ptid, &last_status);
2170 if (!ptid_equal (inferior_ptid, last_ptid)
2171 && !ptid_equal (last_ptid, null_ptid)
2172 && !ptid_equal (last_ptid, minus_one_ptid))
2173 {
2174 last_thread = find_thread_ptid (last_ptid);
2175 if (last_thread)
2176 {
2177 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2178 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2179 }
2180 }
2181 }
2182
2183 if (siggnal != TARGET_SIGNAL_DEFAULT)
2184 tp->suspend.stop_signal = siggnal;
2185 /* If this signal should not be seen by program,
2186 give it zero. Used for debugging signals. */
2187 else if (!signal_program[tp->suspend.stop_signal])
2188 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2189
2190 annotate_starting ();
2191
2192 /* Make sure that output from GDB appears before output from the
2193 inferior. */
2194 gdb_flush (gdb_stdout);
2195
2196 /* Refresh prev_pc value just prior to resuming. This used to be
2197 done in stop_stepping, however, setting prev_pc there did not handle
2198 scenarios such as inferior function calls or returning from
2199 a function via the return command. In those cases, the prev_pc
2200 value was not set properly for subsequent commands. The prev_pc value
2201 is used to initialize the starting line number in the ecs. With an
2202 invalid value, the gdb next command ends up stopping at the position
2203 represented by the next line table entry past our start position.
2204 On platforms that generate one line table entry per line, this
2205 is not a problem. However, on the ia64, the compiler generates
2206 extraneous line table entries that do not increase the line number.
2207 When we issue the gdb next command on the ia64 after an inferior call
2208 or a return command, we often end up a few instructions forward, still
2209 within the original line we started.
2210
2211 An attempt was made to refresh the prev_pc at the same time the
2212 execution_control_state is initialized (for instance, just before
2213 waiting for an inferior event). But this approach did not work
2214 because of platforms that use ptrace, where the pc register cannot
2215 be read unless the inferior is stopped. At that point, we are not
2216 guaranteed the inferior is stopped and so the regcache_read_pc() call
2217 can fail. Setting the prev_pc value here ensures the value is updated
2218 correctly when the inferior is stopped. */
2219 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2220
2221 /* Fill in with reasonable starting values. */
2222 init_thread_stepping_state (tp);
2223
2224 /* Reset to normal state. */
2225 init_infwait_state ();
2226
2227 /* Resume inferior. */
2228 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2229
2230 /* Wait for it to stop (if not standalone)
2231 and in any case decode why it stopped, and act accordingly. */
2232 /* Do this only if we are not using the event loop, or if the target
2233 does not support asynchronous execution. */
2234 if (!target_can_async_p ())
2235 {
2236 wait_for_inferior ();
2237 normal_stop ();
2238 }
2239 }
2240 \f
2241
2242 /* Start remote-debugging of a machine over a serial link. */
2243
2244 void
2245 start_remote (int from_tty)
2246 {
2247 struct inferior *inferior;
2248
2249 inferior = current_inferior ();
2250 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2251
2252 /* Always go on waiting for the target, regardless of the mode. */
2253 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2254 indicate to wait_for_inferior that a target should timeout if
2255 nothing is returned (instead of just blocking). Because of this,
2256 targets expecting an immediate response need to, internally, set
2257 things up so that the target_wait() is forced to eventually
2258 timeout. */
2259 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2260 differentiate to its caller what the state of the target is after
2261 the initial open has been performed. Here we're assuming that
2262 the target has stopped. It should be possible to eventually have
2263 target_open() return to the caller an indication that the target
2264 is currently running and GDB state should be set to the same as
2265 for an async run. */
2266 wait_for_inferior ();
2267
2268 /* Now that the inferior has stopped, do any bookkeeping like
2269 loading shared libraries. We want to do this before normal_stop,
2270 so that the displayed frame is up to date. */
2271 post_create_inferior (&current_target, from_tty);
2272
2273 normal_stop ();
2274 }
2275
2276 /* Initialize static vars when a new inferior begins. */
2277
2278 void
2279 init_wait_for_inferior (void)
2280 {
2281 /* These are meaningless until the first time through wait_for_inferior. */
2282
2283 breakpoint_init_inferior (inf_starting);
2284
2285 clear_proceed_status ();
2286
2287 stepping_past_singlestep_breakpoint = 0;
2288 deferred_step_ptid = null_ptid;
2289
2290 target_last_wait_ptid = minus_one_ptid;
2291
2292 previous_inferior_ptid = inferior_ptid;
2293 init_infwait_state ();
2294
2295 /* Discard any skipped inlined frames. */
2296 clear_inline_frame_state (minus_one_ptid);
2297 }
2298
2299 \f
2300 /* This enum encodes possible reasons for doing a target_wait, so that
2301 wfi can call target_wait in one place. (Ultimately the call will be
2302 moved out of the infinite loop entirely.) */
2303
2304 enum infwait_states
2305 {
2306 infwait_normal_state,
2307 infwait_thread_hop_state,
2308 infwait_step_watch_state,
2309 infwait_nonstep_watch_state
2310 };
2311
2312 /* The PTID we'll do a target_wait on.*/
2313 ptid_t waiton_ptid;
2314
2315 /* Current inferior wait state. */
2316 enum infwait_states infwait_state;
2317
2318 /* Data to be passed around while handling an event. This data is
2319 discarded between events. */
2320 struct execution_control_state
2321 {
2322 ptid_t ptid;
2323 /* The thread that got the event, if this was a thread event; NULL
2324 otherwise. */
2325 struct thread_info *event_thread;
2326
2327 struct target_waitstatus ws;
2328 int random_signal;
2329 int stop_func_filled_in;
2330 CORE_ADDR stop_func_start;
2331 CORE_ADDR stop_func_end;
2332 char *stop_func_name;
2333 int new_thread_event;
2334 int wait_some_more;
2335 };
2336
2337 static void handle_inferior_event (struct execution_control_state *ecs);
2338
2339 static void handle_step_into_function (struct gdbarch *gdbarch,
2340 struct execution_control_state *ecs);
2341 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2342 struct execution_control_state *ecs);
2343 static void check_exception_resume (struct execution_control_state *,
2344 struct frame_info *, struct symbol *);
2345
2346 static void stop_stepping (struct execution_control_state *ecs);
2347 static void prepare_to_wait (struct execution_control_state *ecs);
2348 static void keep_going (struct execution_control_state *ecs);
2349
2350 /* Callback for iterate over threads. If the thread is stopped, but
2351 the user/frontend doesn't know about that yet, go through
2352 normal_stop, as if the thread had just stopped now. ARG points at
2353 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2354 ptid_is_pid(PTID) is true, applies to all threads of the process
2355 pointed at by PTID. Otherwise, apply only to the thread pointed by
2356 PTID. */
2357
2358 static int
2359 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2360 {
2361 ptid_t ptid = * (ptid_t *) arg;
2362
2363 if ((ptid_equal (info->ptid, ptid)
2364 || ptid_equal (minus_one_ptid, ptid)
2365 || (ptid_is_pid (ptid)
2366 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2367 && is_running (info->ptid)
2368 && !is_executing (info->ptid))
2369 {
2370 struct cleanup *old_chain;
2371 struct execution_control_state ecss;
2372 struct execution_control_state *ecs = &ecss;
2373
2374 memset (ecs, 0, sizeof (*ecs));
2375
2376 old_chain = make_cleanup_restore_current_thread ();
2377
2378 switch_to_thread (info->ptid);
2379
2380 /* Go through handle_inferior_event/normal_stop, so we always
2381 have consistent output as if the stop event had been
2382 reported. */
2383 ecs->ptid = info->ptid;
2384 ecs->event_thread = find_thread_ptid (info->ptid);
2385 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2386 ecs->ws.value.sig = TARGET_SIGNAL_0;
2387
2388 handle_inferior_event (ecs);
2389
2390 if (!ecs->wait_some_more)
2391 {
2392 struct thread_info *tp;
2393
2394 normal_stop ();
2395
2396 /* Finish off the continuations. */
2397 tp = inferior_thread ();
2398 do_all_intermediate_continuations_thread (tp, 1);
2399 do_all_continuations_thread (tp, 1);
2400 }
2401
2402 do_cleanups (old_chain);
2403 }
2404
2405 return 0;
2406 }
2407
2408 /* This function is attached as a "thread_stop_requested" observer.
2409 Cleanup local state that assumed the PTID was to be resumed, and
2410 report the stop to the frontend. */
2411
2412 static void
2413 infrun_thread_stop_requested (ptid_t ptid)
2414 {
2415 struct displaced_step_inferior_state *displaced;
2416
2417 /* PTID was requested to stop. Remove it from the displaced
2418 stepping queue, so we don't try to resume it automatically. */
2419
2420 for (displaced = displaced_step_inferior_states;
2421 displaced;
2422 displaced = displaced->next)
2423 {
2424 struct displaced_step_request *it, **prev_next_p;
2425
2426 it = displaced->step_request_queue;
2427 prev_next_p = &displaced->step_request_queue;
2428 while (it)
2429 {
2430 if (ptid_match (it->ptid, ptid))
2431 {
2432 *prev_next_p = it->next;
2433 it->next = NULL;
2434 xfree (it);
2435 }
2436 else
2437 {
2438 prev_next_p = &it->next;
2439 }
2440
2441 it = *prev_next_p;
2442 }
2443 }
2444
2445 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2446 }
2447
2448 static void
2449 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2450 {
2451 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2452 nullify_last_target_wait_ptid ();
2453 }
2454
2455 /* Callback for iterate_over_threads. */
2456
2457 static int
2458 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2459 {
2460 if (is_exited (info->ptid))
2461 return 0;
2462
2463 delete_step_resume_breakpoint (info);
2464 delete_exception_resume_breakpoint (info);
2465 return 0;
2466 }
2467
2468 /* In all-stop, delete the step resume breakpoint of any thread that
2469 had one. In non-stop, delete the step resume breakpoint of the
2470 thread that just stopped. */
2471
2472 static void
2473 delete_step_thread_step_resume_breakpoint (void)
2474 {
2475 if (!target_has_execution
2476 || ptid_equal (inferior_ptid, null_ptid))
2477 /* If the inferior has exited, we have already deleted the step
2478 resume breakpoints out of GDB's lists. */
2479 return;
2480
2481 if (non_stop)
2482 {
2483 /* If in non-stop mode, only delete the step-resume or
2484 longjmp-resume breakpoint of the thread that just stopped
2485 stepping. */
2486 struct thread_info *tp = inferior_thread ();
2487
2488 delete_step_resume_breakpoint (tp);
2489 delete_exception_resume_breakpoint (tp);
2490 }
2491 else
2492 /* In all-stop mode, delete all step-resume and longjmp-resume
2493 breakpoints of any thread that had them. */
2494 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2495 }
2496
2497 /* A cleanup wrapper. */
2498
2499 static void
2500 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2501 {
2502 delete_step_thread_step_resume_breakpoint ();
2503 }
2504
2505 /* Pretty print the results of target_wait, for debugging purposes. */
2506
2507 static void
2508 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2509 const struct target_waitstatus *ws)
2510 {
2511 char *status_string = target_waitstatus_to_string (ws);
2512 struct ui_file *tmp_stream = mem_fileopen ();
2513 char *text;
2514
2515 /* The text is split over several lines because it was getting too long.
2516 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2517 output as a unit; we want only one timestamp printed if debug_timestamp
2518 is set. */
2519
2520 fprintf_unfiltered (tmp_stream,
2521 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2522 if (PIDGET (waiton_ptid) != -1)
2523 fprintf_unfiltered (tmp_stream,
2524 " [%s]", target_pid_to_str (waiton_ptid));
2525 fprintf_unfiltered (tmp_stream, ", status) =\n");
2526 fprintf_unfiltered (tmp_stream,
2527 "infrun: %d [%s],\n",
2528 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2529 fprintf_unfiltered (tmp_stream,
2530 "infrun: %s\n",
2531 status_string);
2532
2533 text = ui_file_xstrdup (tmp_stream, NULL);
2534
2535 /* This uses %s in part to handle %'s in the text, but also to avoid
2536 a gcc error: the format attribute requires a string literal. */
2537 fprintf_unfiltered (gdb_stdlog, "%s", text);
2538
2539 xfree (status_string);
2540 xfree (text);
2541 ui_file_delete (tmp_stream);
2542 }
2543
2544 /* Prepare and stabilize the inferior for detaching it. E.g.,
2545 detaching while a thread is displaced stepping is a recipe for
2546 crashing it, as nothing would readjust the PC out of the scratch
2547 pad. */
2548
2549 void
2550 prepare_for_detach (void)
2551 {
2552 struct inferior *inf = current_inferior ();
2553 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2554 struct cleanup *old_chain_1;
2555 struct displaced_step_inferior_state *displaced;
2556
2557 displaced = get_displaced_stepping_state (inf->pid);
2558
2559 /* Is any thread of this process displaced stepping? If not,
2560 there's nothing else to do. */
2561 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2562 return;
2563
2564 if (debug_infrun)
2565 fprintf_unfiltered (gdb_stdlog,
2566 "displaced-stepping in-process while detaching");
2567
2568 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2569 inf->detaching = 1;
2570
2571 while (!ptid_equal (displaced->step_ptid, null_ptid))
2572 {
2573 struct cleanup *old_chain_2;
2574 struct execution_control_state ecss;
2575 struct execution_control_state *ecs;
2576
2577 ecs = &ecss;
2578 memset (ecs, 0, sizeof (*ecs));
2579
2580 overlay_cache_invalid = 1;
2581
2582 /* We have to invalidate the registers BEFORE calling
2583 target_wait because they can be loaded from the target while
2584 in target_wait. This makes remote debugging a bit more
2585 efficient for those targets that provide critical registers
2586 as part of their normal status mechanism. */
2587
2588 registers_changed ();
2589
2590 if (deprecated_target_wait_hook)
2591 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2592 else
2593 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2594
2595 if (debug_infrun)
2596 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2597
2598 /* If an error happens while handling the event, propagate GDB's
2599 knowledge of the executing state to the frontend/user running
2600 state. */
2601 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2602 &minus_one_ptid);
2603
2604 /* In non-stop mode, each thread is handled individually.
2605 Switch early, so the global state is set correctly for this
2606 thread. */
2607 if (non_stop
2608 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2609 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2610 context_switch (ecs->ptid);
2611
2612 /* Now figure out what to do with the result of the result. */
2613 handle_inferior_event (ecs);
2614
2615 /* No error, don't finish the state yet. */
2616 discard_cleanups (old_chain_2);
2617
2618 /* Breakpoints and watchpoints are not installed on the target
2619 at this point, and signals are passed directly to the
2620 inferior, so this must mean the process is gone. */
2621 if (!ecs->wait_some_more)
2622 {
2623 discard_cleanups (old_chain_1);
2624 error (_("Program exited while detaching"));
2625 }
2626 }
2627
2628 discard_cleanups (old_chain_1);
2629 }
2630
2631 /* Wait for control to return from inferior to debugger.
2632
2633 If inferior gets a signal, we may decide to start it up again
2634 instead of returning. That is why there is a loop in this function.
2635 When this function actually returns it means the inferior
2636 should be left stopped and GDB should read more commands. */
2637
2638 void
2639 wait_for_inferior (void)
2640 {
2641 struct cleanup *old_cleanups;
2642 struct execution_control_state ecss;
2643 struct execution_control_state *ecs;
2644
2645 if (debug_infrun)
2646 fprintf_unfiltered
2647 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2648
2649 old_cleanups =
2650 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2651
2652 ecs = &ecss;
2653 memset (ecs, 0, sizeof (*ecs));
2654
2655 while (1)
2656 {
2657 struct cleanup *old_chain;
2658
2659 /* We have to invalidate the registers BEFORE calling target_wait
2660 because they can be loaded from the target while in target_wait.
2661 This makes remote debugging a bit more efficient for those
2662 targets that provide critical registers as part of their normal
2663 status mechanism. */
2664
2665 overlay_cache_invalid = 1;
2666 registers_changed ();
2667
2668 if (deprecated_target_wait_hook)
2669 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2670 else
2671 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2672
2673 if (debug_infrun)
2674 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2675
2676 /* If an error happens while handling the event, propagate GDB's
2677 knowledge of the executing state to the frontend/user running
2678 state. */
2679 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2680
2681 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2682 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2683 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2684
2685 /* Now figure out what to do with the result of the result. */
2686 handle_inferior_event (ecs);
2687
2688 /* No error, don't finish the state yet. */
2689 discard_cleanups (old_chain);
2690
2691 if (!ecs->wait_some_more)
2692 break;
2693 }
2694
2695 do_cleanups (old_cleanups);
2696 }
2697
2698 /* Asynchronous version of wait_for_inferior. It is called by the
2699 event loop whenever a change of state is detected on the file
2700 descriptor corresponding to the target. It can be called more than
2701 once to complete a single execution command. In such cases we need
2702 to keep the state in a global variable ECSS. If it is the last time
2703 that this function is called for a single execution command, then
2704 report to the user that the inferior has stopped, and do the
2705 necessary cleanups. */
2706
2707 void
2708 fetch_inferior_event (void *client_data)
2709 {
2710 struct execution_control_state ecss;
2711 struct execution_control_state *ecs = &ecss;
2712 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2713 struct cleanup *ts_old_chain;
2714 int was_sync = sync_execution;
2715
2716 memset (ecs, 0, sizeof (*ecs));
2717
2718 /* We're handling a live event, so make sure we're doing live
2719 debugging. If we're looking at traceframes while the target is
2720 running, we're going to need to get back to that mode after
2721 handling the event. */
2722 if (non_stop)
2723 {
2724 make_cleanup_restore_current_traceframe ();
2725 set_current_traceframe (-1);
2726 }
2727
2728 if (non_stop)
2729 /* In non-stop mode, the user/frontend should not notice a thread
2730 switch due to internal events. Make sure we reverse to the
2731 user selected thread and frame after handling the event and
2732 running any breakpoint commands. */
2733 make_cleanup_restore_current_thread ();
2734
2735 /* We have to invalidate the registers BEFORE calling target_wait
2736 because they can be loaded from the target while in target_wait.
2737 This makes remote debugging a bit more efficient for those
2738 targets that provide critical registers as part of their normal
2739 status mechanism. */
2740
2741 overlay_cache_invalid = 1;
2742
2743 /* But don't do it if the current thread is already stopped (hence
2744 this is either a delayed event that will result in
2745 TARGET_WAITKIND_IGNORE, or it's an event for another thread (and
2746 we always clear the register and frame caches when the user
2747 switches threads anyway). If we didn't do this, a spurious
2748 delayed event in all-stop mode would make the user lose the
2749 selected frame. */
2750 if (non_stop || is_executing (inferior_ptid))
2751 registers_changed ();
2752
2753 make_cleanup_restore_integer (&execution_direction);
2754 execution_direction = target_execution_direction ();
2755
2756 if (deprecated_target_wait_hook)
2757 ecs->ptid =
2758 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2759 else
2760 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2761
2762 if (debug_infrun)
2763 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2764
2765 if (non_stop
2766 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2767 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2768 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2769 /* In non-stop mode, each thread is handled individually. Switch
2770 early, so the global state is set correctly for this
2771 thread. */
2772 context_switch (ecs->ptid);
2773
2774 /* If an error happens while handling the event, propagate GDB's
2775 knowledge of the executing state to the frontend/user running
2776 state. */
2777 if (!non_stop)
2778 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2779 else
2780 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2781
2782 /* Now figure out what to do with the result of the result. */
2783 handle_inferior_event (ecs);
2784
2785 if (!ecs->wait_some_more)
2786 {
2787 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2788
2789 delete_step_thread_step_resume_breakpoint ();
2790
2791 /* We may not find an inferior if this was a process exit. */
2792 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2793 normal_stop ();
2794
2795 if (target_has_execution
2796 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2797 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2798 && ecs->event_thread->step_multi
2799 && ecs->event_thread->control.stop_step)
2800 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2801 else
2802 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2803 }
2804
2805 /* No error, don't finish the thread states yet. */
2806 discard_cleanups (ts_old_chain);
2807
2808 /* Revert thread and frame. */
2809 do_cleanups (old_chain);
2810
2811 /* If the inferior was in sync execution mode, and now isn't,
2812 restore the prompt. */
2813 if (was_sync && !sync_execution)
2814 display_gdb_prompt (0);
2815 }
2816
2817 /* Record the frame and location we're currently stepping through. */
2818 void
2819 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2820 {
2821 struct thread_info *tp = inferior_thread ();
2822
2823 tp->control.step_frame_id = get_frame_id (frame);
2824 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2825
2826 tp->current_symtab = sal.symtab;
2827 tp->current_line = sal.line;
2828 }
2829
2830 /* Clear context switchable stepping state. */
2831
2832 void
2833 init_thread_stepping_state (struct thread_info *tss)
2834 {
2835 tss->stepping_over_breakpoint = 0;
2836 tss->step_after_step_resume_breakpoint = 0;
2837 tss->stepping_through_solib_after_catch = 0;
2838 tss->stepping_through_solib_catchpoints = NULL;
2839 }
2840
2841 /* Return the cached copy of the last pid/waitstatus returned by
2842 target_wait()/deprecated_target_wait_hook(). The data is actually
2843 cached by handle_inferior_event(), which gets called immediately
2844 after target_wait()/deprecated_target_wait_hook(). */
2845
2846 void
2847 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2848 {
2849 *ptidp = target_last_wait_ptid;
2850 *status = target_last_waitstatus;
2851 }
2852
2853 void
2854 nullify_last_target_wait_ptid (void)
2855 {
2856 target_last_wait_ptid = minus_one_ptid;
2857 }
2858
2859 /* Switch thread contexts. */
2860
2861 static void
2862 context_switch (ptid_t ptid)
2863 {
2864 if (debug_infrun)
2865 {
2866 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2867 target_pid_to_str (inferior_ptid));
2868 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2869 target_pid_to_str (ptid));
2870 }
2871
2872 switch_to_thread (ptid);
2873 }
2874
2875 static void
2876 adjust_pc_after_break (struct execution_control_state *ecs)
2877 {
2878 struct regcache *regcache;
2879 struct gdbarch *gdbarch;
2880 struct address_space *aspace;
2881 CORE_ADDR breakpoint_pc;
2882
2883 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2884 we aren't, just return.
2885
2886 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2887 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2888 implemented by software breakpoints should be handled through the normal
2889 breakpoint layer.
2890
2891 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2892 different signals (SIGILL or SIGEMT for instance), but it is less
2893 clear where the PC is pointing afterwards. It may not match
2894 gdbarch_decr_pc_after_break. I don't know any specific target that
2895 generates these signals at breakpoints (the code has been in GDB since at
2896 least 1992) so I can not guess how to handle them here.
2897
2898 In earlier versions of GDB, a target with
2899 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2900 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2901 target with both of these set in GDB history, and it seems unlikely to be
2902 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2903
2904 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2905 return;
2906
2907 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2908 return;
2909
2910 /* In reverse execution, when a breakpoint is hit, the instruction
2911 under it has already been de-executed. The reported PC always
2912 points at the breakpoint address, so adjusting it further would
2913 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2914 architecture:
2915
2916 B1 0x08000000 : INSN1
2917 B2 0x08000001 : INSN2
2918 0x08000002 : INSN3
2919 PC -> 0x08000003 : INSN4
2920
2921 Say you're stopped at 0x08000003 as above. Reverse continuing
2922 from that point should hit B2 as below. Reading the PC when the
2923 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2924 been de-executed already.
2925
2926 B1 0x08000000 : INSN1
2927 B2 PC -> 0x08000001 : INSN2
2928 0x08000002 : INSN3
2929 0x08000003 : INSN4
2930
2931 We can't apply the same logic as for forward execution, because
2932 we would wrongly adjust the PC to 0x08000000, since there's a
2933 breakpoint at PC - 1. We'd then report a hit on B1, although
2934 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2935 behaviour. */
2936 if (execution_direction == EXEC_REVERSE)
2937 return;
2938
2939 /* If this target does not decrement the PC after breakpoints, then
2940 we have nothing to do. */
2941 regcache = get_thread_regcache (ecs->ptid);
2942 gdbarch = get_regcache_arch (regcache);
2943 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2944 return;
2945
2946 aspace = get_regcache_aspace (regcache);
2947
2948 /* Find the location where (if we've hit a breakpoint) the
2949 breakpoint would be. */
2950 breakpoint_pc = regcache_read_pc (regcache)
2951 - gdbarch_decr_pc_after_break (gdbarch);
2952
2953 /* Check whether there actually is a software breakpoint inserted at
2954 that location.
2955
2956 If in non-stop mode, a race condition is possible where we've
2957 removed a breakpoint, but stop events for that breakpoint were
2958 already queued and arrive later. To suppress those spurious
2959 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2960 and retire them after a number of stop events are reported. */
2961 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2962 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2963 {
2964 struct cleanup *old_cleanups = NULL;
2965
2966 if (RECORD_IS_USED)
2967 old_cleanups = record_gdb_operation_disable_set ();
2968
2969 /* When using hardware single-step, a SIGTRAP is reported for both
2970 a completed single-step and a software breakpoint. Need to
2971 differentiate between the two, as the latter needs adjusting
2972 but the former does not.
2973
2974 The SIGTRAP can be due to a completed hardware single-step only if
2975 - we didn't insert software single-step breakpoints
2976 - the thread to be examined is still the current thread
2977 - this thread is currently being stepped
2978
2979 If any of these events did not occur, we must have stopped due
2980 to hitting a software breakpoint, and have to back up to the
2981 breakpoint address.
2982
2983 As a special case, we could have hardware single-stepped a
2984 software breakpoint. In this case (prev_pc == breakpoint_pc),
2985 we also need to back up to the breakpoint address. */
2986
2987 if (singlestep_breakpoints_inserted_p
2988 || !ptid_equal (ecs->ptid, inferior_ptid)
2989 || !currently_stepping (ecs->event_thread)
2990 || ecs->event_thread->prev_pc == breakpoint_pc)
2991 regcache_write_pc (regcache, breakpoint_pc);
2992
2993 if (RECORD_IS_USED)
2994 do_cleanups (old_cleanups);
2995 }
2996 }
2997
2998 void
2999 init_infwait_state (void)
3000 {
3001 waiton_ptid = pid_to_ptid (-1);
3002 infwait_state = infwait_normal_state;
3003 }
3004
3005 void
3006 error_is_running (void)
3007 {
3008 error (_("Cannot execute this command while "
3009 "the selected thread is running."));
3010 }
3011
3012 void
3013 ensure_not_running (void)
3014 {
3015 if (is_running (inferior_ptid))
3016 error_is_running ();
3017 }
3018
3019 static int
3020 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3021 {
3022 for (frame = get_prev_frame (frame);
3023 frame != NULL;
3024 frame = get_prev_frame (frame))
3025 {
3026 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3027 return 1;
3028 if (get_frame_type (frame) != INLINE_FRAME)
3029 break;
3030 }
3031
3032 return 0;
3033 }
3034
3035 /* Auxiliary function that handles syscall entry/return events.
3036 It returns 1 if the inferior should keep going (and GDB
3037 should ignore the event), or 0 if the event deserves to be
3038 processed. */
3039
3040 static int
3041 handle_syscall_event (struct execution_control_state *ecs)
3042 {
3043 struct regcache *regcache;
3044 struct gdbarch *gdbarch;
3045 int syscall_number;
3046
3047 if (!ptid_equal (ecs->ptid, inferior_ptid))
3048 context_switch (ecs->ptid);
3049
3050 regcache = get_thread_regcache (ecs->ptid);
3051 gdbarch = get_regcache_arch (regcache);
3052 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3053 stop_pc = regcache_read_pc (regcache);
3054
3055 target_last_waitstatus.value.syscall_number = syscall_number;
3056
3057 if (catch_syscall_enabled () > 0
3058 && catching_syscall_number (syscall_number) > 0)
3059 {
3060 if (debug_infrun)
3061 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3062 syscall_number);
3063
3064 ecs->event_thread->control.stop_bpstat
3065 = bpstat_stop_status (get_regcache_aspace (regcache),
3066 stop_pc, ecs->ptid);
3067 ecs->random_signal
3068 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3069
3070 if (!ecs->random_signal)
3071 {
3072 /* Catchpoint hit. */
3073 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3074 return 0;
3075 }
3076 }
3077
3078 /* If no catchpoint triggered for this, then keep going. */
3079 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3080 keep_going (ecs);
3081 return 1;
3082 }
3083
3084 /* Clear the supplied execution_control_state's stop_func_* fields. */
3085
3086 static void
3087 clear_stop_func (struct execution_control_state *ecs)
3088 {
3089 ecs->stop_func_filled_in = 0;
3090 ecs->stop_func_start = 0;
3091 ecs->stop_func_end = 0;
3092 ecs->stop_func_name = NULL;
3093 }
3094
3095 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3096
3097 static void
3098 fill_in_stop_func (struct gdbarch *gdbarch,
3099 struct execution_control_state *ecs)
3100 {
3101 if (!ecs->stop_func_filled_in)
3102 {
3103 /* Don't care about return value; stop_func_start and stop_func_name
3104 will both be 0 if it doesn't work. */
3105 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3106 &ecs->stop_func_start, &ecs->stop_func_end);
3107 ecs->stop_func_start
3108 += gdbarch_deprecated_function_start_offset (gdbarch);
3109
3110 ecs->stop_func_filled_in = 1;
3111 }
3112 }
3113
3114 /* Given an execution control state that has been freshly filled in
3115 by an event from the inferior, figure out what it means and take
3116 appropriate action. */
3117
3118 static void
3119 handle_inferior_event (struct execution_control_state *ecs)
3120 {
3121 struct frame_info *frame;
3122 struct gdbarch *gdbarch;
3123 int stopped_by_watchpoint;
3124 int stepped_after_stopped_by_watchpoint = 0;
3125 struct symtab_and_line stop_pc_sal;
3126 enum stop_kind stop_soon;
3127
3128 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3129 {
3130 /* We had an event in the inferior, but we are not interested in
3131 handling it at this level. The lower layers have already
3132 done what needs to be done, if anything.
3133
3134 One of the possible circumstances for this is when the
3135 inferior produces output for the console. The inferior has
3136 not stopped, and we are ignoring the event. Another possible
3137 circumstance is any event which the lower level knows will be
3138 reported multiple times without an intervening resume. */
3139 if (debug_infrun)
3140 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3141 prepare_to_wait (ecs);
3142 return;
3143 }
3144
3145 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3146 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3147 {
3148 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3149
3150 gdb_assert (inf);
3151 stop_soon = inf->control.stop_soon;
3152 }
3153 else
3154 stop_soon = NO_STOP_QUIETLY;
3155
3156 /* Cache the last pid/waitstatus. */
3157 target_last_wait_ptid = ecs->ptid;
3158 target_last_waitstatus = ecs->ws;
3159
3160 /* Always clear state belonging to the previous time we stopped. */
3161 stop_stack_dummy = STOP_NONE;
3162
3163 /* If it's a new process, add it to the thread database. */
3164
3165 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3166 && !ptid_equal (ecs->ptid, minus_one_ptid)
3167 && !in_thread_list (ecs->ptid));
3168
3169 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3170 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3171 add_thread (ecs->ptid);
3172
3173 ecs->event_thread = find_thread_ptid (ecs->ptid);
3174
3175 /* Dependent on valid ECS->EVENT_THREAD. */
3176 adjust_pc_after_break (ecs);
3177
3178 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3179 reinit_frame_cache ();
3180
3181 breakpoint_retire_moribund ();
3182
3183 /* First, distinguish signals caused by the debugger from signals
3184 that have to do with the program's own actions. Note that
3185 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3186 on the operating system version. Here we detect when a SIGILL or
3187 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3188 something similar for SIGSEGV, since a SIGSEGV will be generated
3189 when we're trying to execute a breakpoint instruction on a
3190 non-executable stack. This happens for call dummy breakpoints
3191 for architectures like SPARC that place call dummies on the
3192 stack. */
3193 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3194 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3195 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3196 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3197 {
3198 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3199
3200 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3201 regcache_read_pc (regcache)))
3202 {
3203 if (debug_infrun)
3204 fprintf_unfiltered (gdb_stdlog,
3205 "infrun: Treating signal as SIGTRAP\n");
3206 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3207 }
3208 }
3209
3210 /* Mark the non-executing threads accordingly. In all-stop, all
3211 threads of all processes are stopped when we get any event
3212 reported. In non-stop mode, only the event thread stops. If
3213 we're handling a process exit in non-stop mode, there's nothing
3214 to do, as threads of the dead process are gone, and threads of
3215 any other process were left running. */
3216 if (!non_stop)
3217 set_executing (minus_one_ptid, 0);
3218 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3219 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3220 set_executing (inferior_ptid, 0);
3221
3222 switch (infwait_state)
3223 {
3224 case infwait_thread_hop_state:
3225 if (debug_infrun)
3226 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3227 break;
3228
3229 case infwait_normal_state:
3230 if (debug_infrun)
3231 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3232 break;
3233
3234 case infwait_step_watch_state:
3235 if (debug_infrun)
3236 fprintf_unfiltered (gdb_stdlog,
3237 "infrun: infwait_step_watch_state\n");
3238
3239 stepped_after_stopped_by_watchpoint = 1;
3240 break;
3241
3242 case infwait_nonstep_watch_state:
3243 if (debug_infrun)
3244 fprintf_unfiltered (gdb_stdlog,
3245 "infrun: infwait_nonstep_watch_state\n");
3246 insert_breakpoints ();
3247
3248 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3249 handle things like signals arriving and other things happening
3250 in combination correctly? */
3251 stepped_after_stopped_by_watchpoint = 1;
3252 break;
3253
3254 default:
3255 internal_error (__FILE__, __LINE__, _("bad switch"));
3256 }
3257
3258 infwait_state = infwait_normal_state;
3259 waiton_ptid = pid_to_ptid (-1);
3260
3261 switch (ecs->ws.kind)
3262 {
3263 case TARGET_WAITKIND_LOADED:
3264 if (debug_infrun)
3265 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3266 /* Ignore gracefully during startup of the inferior, as it might
3267 be the shell which has just loaded some objects, otherwise
3268 add the symbols for the newly loaded objects. Also ignore at
3269 the beginning of an attach or remote session; we will query
3270 the full list of libraries once the connection is
3271 established. */
3272 if (stop_soon == NO_STOP_QUIETLY)
3273 {
3274 /* Check for any newly added shared libraries if we're
3275 supposed to be adding them automatically. Switch
3276 terminal for any messages produced by
3277 breakpoint_re_set. */
3278 target_terminal_ours_for_output ();
3279 /* NOTE: cagney/2003-11-25: Make certain that the target
3280 stack's section table is kept up-to-date. Architectures,
3281 (e.g., PPC64), use the section table to perform
3282 operations such as address => section name and hence
3283 require the table to contain all sections (including
3284 those found in shared libraries). */
3285 #ifdef SOLIB_ADD
3286 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3287 #else
3288 solib_add (NULL, 0, &current_target, auto_solib_add);
3289 #endif
3290 target_terminal_inferior ();
3291
3292 /* If requested, stop when the dynamic linker notifies
3293 gdb of events. This allows the user to get control
3294 and place breakpoints in initializer routines for
3295 dynamically loaded objects (among other things). */
3296 if (stop_on_solib_events)
3297 {
3298 /* Make sure we print "Stopped due to solib-event" in
3299 normal_stop. */
3300 stop_print_frame = 1;
3301
3302 stop_stepping (ecs);
3303 return;
3304 }
3305
3306 /* NOTE drow/2007-05-11: This might be a good place to check
3307 for "catch load". */
3308 }
3309
3310 /* If we are skipping through a shell, or through shared library
3311 loading that we aren't interested in, resume the program. If
3312 we're running the program normally, also resume. But stop if
3313 we're attaching or setting up a remote connection. */
3314 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3315 {
3316 /* Loading of shared libraries might have changed breakpoint
3317 addresses. Make sure new breakpoints are inserted. */
3318 if (stop_soon == NO_STOP_QUIETLY
3319 && !breakpoints_always_inserted_mode ())
3320 insert_breakpoints ();
3321 resume (0, TARGET_SIGNAL_0);
3322 prepare_to_wait (ecs);
3323 return;
3324 }
3325
3326 break;
3327
3328 case TARGET_WAITKIND_SPURIOUS:
3329 if (debug_infrun)
3330 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3331 resume (0, TARGET_SIGNAL_0);
3332 prepare_to_wait (ecs);
3333 return;
3334
3335 case TARGET_WAITKIND_EXITED:
3336 if (debug_infrun)
3337 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3338 inferior_ptid = ecs->ptid;
3339 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3340 set_current_program_space (current_inferior ()->pspace);
3341 handle_vfork_child_exec_or_exit (0);
3342 target_terminal_ours (); /* Must do this before mourn anyway. */
3343 print_exited_reason (ecs->ws.value.integer);
3344
3345 /* Record the exit code in the convenience variable $_exitcode, so
3346 that the user can inspect this again later. */
3347 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3348 (LONGEST) ecs->ws.value.integer);
3349
3350 /* Also record this in the inferior itself. */
3351 current_inferior ()->has_exit_code = 1;
3352 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3353
3354 gdb_flush (gdb_stdout);
3355 target_mourn_inferior ();
3356 singlestep_breakpoints_inserted_p = 0;
3357 cancel_single_step_breakpoints ();
3358 stop_print_frame = 0;
3359 stop_stepping (ecs);
3360 return;
3361
3362 case TARGET_WAITKIND_SIGNALLED:
3363 if (debug_infrun)
3364 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3365 inferior_ptid = ecs->ptid;
3366 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3367 set_current_program_space (current_inferior ()->pspace);
3368 handle_vfork_child_exec_or_exit (0);
3369 stop_print_frame = 0;
3370 target_terminal_ours (); /* Must do this before mourn anyway. */
3371
3372 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3373 reach here unless the inferior is dead. However, for years
3374 target_kill() was called here, which hints that fatal signals aren't
3375 really fatal on some systems. If that's true, then some changes
3376 may be needed. */
3377 target_mourn_inferior ();
3378
3379 print_signal_exited_reason (ecs->ws.value.sig);
3380 singlestep_breakpoints_inserted_p = 0;
3381 cancel_single_step_breakpoints ();
3382 stop_stepping (ecs);
3383 return;
3384
3385 /* The following are the only cases in which we keep going;
3386 the above cases end in a continue or goto. */
3387 case TARGET_WAITKIND_FORKED:
3388 case TARGET_WAITKIND_VFORKED:
3389 if (debug_infrun)
3390 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3391
3392 if (!ptid_equal (ecs->ptid, inferior_ptid))
3393 {
3394 context_switch (ecs->ptid);
3395 reinit_frame_cache ();
3396 }
3397
3398 /* Immediately detach breakpoints from the child before there's
3399 any chance of letting the user delete breakpoints from the
3400 breakpoint lists. If we don't do this early, it's easy to
3401 leave left over traps in the child, vis: "break foo; catch
3402 fork; c; <fork>; del; c; <child calls foo>". We only follow
3403 the fork on the last `continue', and by that time the
3404 breakpoint at "foo" is long gone from the breakpoint table.
3405 If we vforked, then we don't need to unpatch here, since both
3406 parent and child are sharing the same memory pages; we'll
3407 need to unpatch at follow/detach time instead to be certain
3408 that new breakpoints added between catchpoint hit time and
3409 vfork follow are detached. */
3410 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3411 {
3412 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3413
3414 /* This won't actually modify the breakpoint list, but will
3415 physically remove the breakpoints from the child. */
3416 detach_breakpoints (child_pid);
3417 }
3418
3419 if (singlestep_breakpoints_inserted_p)
3420 {
3421 /* Pull the single step breakpoints out of the target. */
3422 remove_single_step_breakpoints ();
3423 singlestep_breakpoints_inserted_p = 0;
3424 }
3425
3426 /* In case the event is caught by a catchpoint, remember that
3427 the event is to be followed at the next resume of the thread,
3428 and not immediately. */
3429 ecs->event_thread->pending_follow = ecs->ws;
3430
3431 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3432
3433 ecs->event_thread->control.stop_bpstat
3434 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3435 stop_pc, ecs->ptid);
3436
3437 /* Note that we're interested in knowing the bpstat actually
3438 causes a stop, not just if it may explain the signal.
3439 Software watchpoints, for example, always appear in the
3440 bpstat. */
3441 ecs->random_signal
3442 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3443
3444 /* If no catchpoint triggered for this, then keep going. */
3445 if (ecs->random_signal)
3446 {
3447 ptid_t parent;
3448 ptid_t child;
3449 int should_resume;
3450 int follow_child
3451 = (follow_fork_mode_string == follow_fork_mode_child);
3452
3453 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3454
3455 should_resume = follow_fork ();
3456
3457 parent = ecs->ptid;
3458 child = ecs->ws.value.related_pid;
3459
3460 /* In non-stop mode, also resume the other branch. */
3461 if (non_stop && !detach_fork)
3462 {
3463 if (follow_child)
3464 switch_to_thread (parent);
3465 else
3466 switch_to_thread (child);
3467
3468 ecs->event_thread = inferior_thread ();
3469 ecs->ptid = inferior_ptid;
3470 keep_going (ecs);
3471 }
3472
3473 if (follow_child)
3474 switch_to_thread (child);
3475 else
3476 switch_to_thread (parent);
3477
3478 ecs->event_thread = inferior_thread ();
3479 ecs->ptid = inferior_ptid;
3480
3481 if (should_resume)
3482 keep_going (ecs);
3483 else
3484 stop_stepping (ecs);
3485 return;
3486 }
3487 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3488 goto process_event_stop_test;
3489
3490 case TARGET_WAITKIND_VFORK_DONE:
3491 /* Done with the shared memory region. Re-insert breakpoints in
3492 the parent, and keep going. */
3493
3494 if (debug_infrun)
3495 fprintf_unfiltered (gdb_stdlog,
3496 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3497
3498 if (!ptid_equal (ecs->ptid, inferior_ptid))
3499 context_switch (ecs->ptid);
3500
3501 current_inferior ()->waiting_for_vfork_done = 0;
3502 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3503 /* This also takes care of reinserting breakpoints in the
3504 previously locked inferior. */
3505 keep_going (ecs);
3506 return;
3507
3508 case TARGET_WAITKIND_EXECD:
3509 if (debug_infrun)
3510 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3511
3512 if (!ptid_equal (ecs->ptid, inferior_ptid))
3513 {
3514 context_switch (ecs->ptid);
3515 reinit_frame_cache ();
3516 }
3517
3518 singlestep_breakpoints_inserted_p = 0;
3519 cancel_single_step_breakpoints ();
3520
3521 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3522
3523 /* Do whatever is necessary to the parent branch of the vfork. */
3524 handle_vfork_child_exec_or_exit (1);
3525
3526 /* This causes the eventpoints and symbol table to be reset.
3527 Must do this now, before trying to determine whether to
3528 stop. */
3529 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3530
3531 ecs->event_thread->control.stop_bpstat
3532 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3533 stop_pc, ecs->ptid);
3534 ecs->random_signal
3535 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3536
3537 /* Note that this may be referenced from inside
3538 bpstat_stop_status above, through inferior_has_execd. */
3539 xfree (ecs->ws.value.execd_pathname);
3540 ecs->ws.value.execd_pathname = NULL;
3541
3542 /* If no catchpoint triggered for this, then keep going. */
3543 if (ecs->random_signal)
3544 {
3545 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3546 keep_going (ecs);
3547 return;
3548 }
3549 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3550 goto process_event_stop_test;
3551
3552 /* Be careful not to try to gather much state about a thread
3553 that's in a syscall. It's frequently a losing proposition. */
3554 case TARGET_WAITKIND_SYSCALL_ENTRY:
3555 if (debug_infrun)
3556 fprintf_unfiltered (gdb_stdlog,
3557 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3558 /* Getting the current syscall number. */
3559 if (handle_syscall_event (ecs) != 0)
3560 return;
3561 goto process_event_stop_test;
3562
3563 /* Before examining the threads further, step this thread to
3564 get it entirely out of the syscall. (We get notice of the
3565 event when the thread is just on the verge of exiting a
3566 syscall. Stepping one instruction seems to get it back
3567 into user code.) */
3568 case TARGET_WAITKIND_SYSCALL_RETURN:
3569 if (debug_infrun)
3570 fprintf_unfiltered (gdb_stdlog,
3571 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3572 if (handle_syscall_event (ecs) != 0)
3573 return;
3574 goto process_event_stop_test;
3575
3576 case TARGET_WAITKIND_STOPPED:
3577 if (debug_infrun)
3578 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3579 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3580 break;
3581
3582 case TARGET_WAITKIND_NO_HISTORY:
3583 /* Reverse execution: target ran out of history info. */
3584 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3585 print_no_history_reason ();
3586 stop_stepping (ecs);
3587 return;
3588 }
3589
3590 if (ecs->new_thread_event)
3591 {
3592 if (non_stop)
3593 /* Non-stop assumes that the target handles adding new threads
3594 to the thread list. */
3595 internal_error (__FILE__, __LINE__,
3596 "targets should add new threads to the thread "
3597 "list themselves in non-stop mode.");
3598
3599 /* We may want to consider not doing a resume here in order to
3600 give the user a chance to play with the new thread. It might
3601 be good to make that a user-settable option. */
3602
3603 /* At this point, all threads are stopped (happens automatically
3604 in either the OS or the native code). Therefore we need to
3605 continue all threads in order to make progress. */
3606
3607 if (!ptid_equal (ecs->ptid, inferior_ptid))
3608 context_switch (ecs->ptid);
3609 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3610 prepare_to_wait (ecs);
3611 return;
3612 }
3613
3614 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3615 {
3616 /* Do we need to clean up the state of a thread that has
3617 completed a displaced single-step? (Doing so usually affects
3618 the PC, so do it here, before we set stop_pc.) */
3619 displaced_step_fixup (ecs->ptid,
3620 ecs->event_thread->suspend.stop_signal);
3621
3622 /* If we either finished a single-step or hit a breakpoint, but
3623 the user wanted this thread to be stopped, pretend we got a
3624 SIG0 (generic unsignaled stop). */
3625
3626 if (ecs->event_thread->stop_requested
3627 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3628 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3629 }
3630
3631 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3632
3633 if (debug_infrun)
3634 {
3635 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3636 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3637 struct cleanup *old_chain = save_inferior_ptid ();
3638
3639 inferior_ptid = ecs->ptid;
3640
3641 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3642 paddress (gdbarch, stop_pc));
3643 if (target_stopped_by_watchpoint ())
3644 {
3645 CORE_ADDR addr;
3646
3647 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3648
3649 if (target_stopped_data_address (&current_target, &addr))
3650 fprintf_unfiltered (gdb_stdlog,
3651 "infrun: stopped data address = %s\n",
3652 paddress (gdbarch, addr));
3653 else
3654 fprintf_unfiltered (gdb_stdlog,
3655 "infrun: (no data address available)\n");
3656 }
3657
3658 do_cleanups (old_chain);
3659 }
3660
3661 if (stepping_past_singlestep_breakpoint)
3662 {
3663 gdb_assert (singlestep_breakpoints_inserted_p);
3664 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3665 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3666
3667 stepping_past_singlestep_breakpoint = 0;
3668
3669 /* We've either finished single-stepping past the single-step
3670 breakpoint, or stopped for some other reason. It would be nice if
3671 we could tell, but we can't reliably. */
3672 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3673 {
3674 if (debug_infrun)
3675 fprintf_unfiltered (gdb_stdlog,
3676 "infrun: stepping_past_"
3677 "singlestep_breakpoint\n");
3678 /* Pull the single step breakpoints out of the target. */
3679 remove_single_step_breakpoints ();
3680 singlestep_breakpoints_inserted_p = 0;
3681
3682 ecs->random_signal = 0;
3683 ecs->event_thread->control.trap_expected = 0;
3684
3685 context_switch (saved_singlestep_ptid);
3686 if (deprecated_context_hook)
3687 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3688
3689 resume (1, TARGET_SIGNAL_0);
3690 prepare_to_wait (ecs);
3691 return;
3692 }
3693 }
3694
3695 if (!ptid_equal (deferred_step_ptid, null_ptid))
3696 {
3697 /* In non-stop mode, there's never a deferred_step_ptid set. */
3698 gdb_assert (!non_stop);
3699
3700 /* If we stopped for some other reason than single-stepping, ignore
3701 the fact that we were supposed to switch back. */
3702 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3703 {
3704 if (debug_infrun)
3705 fprintf_unfiltered (gdb_stdlog,
3706 "infrun: handling deferred step\n");
3707
3708 /* Pull the single step breakpoints out of the target. */
3709 if (singlestep_breakpoints_inserted_p)
3710 {
3711 remove_single_step_breakpoints ();
3712 singlestep_breakpoints_inserted_p = 0;
3713 }
3714
3715 ecs->event_thread->control.trap_expected = 0;
3716
3717 /* Note: We do not call context_switch at this point, as the
3718 context is already set up for stepping the original thread. */
3719 switch_to_thread (deferred_step_ptid);
3720 deferred_step_ptid = null_ptid;
3721 /* Suppress spurious "Switching to ..." message. */
3722 previous_inferior_ptid = inferior_ptid;
3723
3724 resume (1, TARGET_SIGNAL_0);
3725 prepare_to_wait (ecs);
3726 return;
3727 }
3728
3729 deferred_step_ptid = null_ptid;
3730 }
3731
3732 /* See if a thread hit a thread-specific breakpoint that was meant for
3733 another thread. If so, then step that thread past the breakpoint,
3734 and continue it. */
3735
3736 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3737 {
3738 int thread_hop_needed = 0;
3739 struct address_space *aspace =
3740 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3741
3742 /* Check if a regular breakpoint has been hit before checking
3743 for a potential single step breakpoint. Otherwise, GDB will
3744 not see this breakpoint hit when stepping onto breakpoints. */
3745 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3746 {
3747 ecs->random_signal = 0;
3748 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3749 thread_hop_needed = 1;
3750 }
3751 else if (singlestep_breakpoints_inserted_p)
3752 {
3753 /* We have not context switched yet, so this should be true
3754 no matter which thread hit the singlestep breakpoint. */
3755 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3756 if (debug_infrun)
3757 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3758 "trap for %s\n",
3759 target_pid_to_str (ecs->ptid));
3760
3761 ecs->random_signal = 0;
3762 /* The call to in_thread_list is necessary because PTIDs sometimes
3763 change when we go from single-threaded to multi-threaded. If
3764 the singlestep_ptid is still in the list, assume that it is
3765 really different from ecs->ptid. */
3766 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3767 && in_thread_list (singlestep_ptid))
3768 {
3769 /* If the PC of the thread we were trying to single-step
3770 has changed, discard this event (which we were going
3771 to ignore anyway), and pretend we saw that thread
3772 trap. This prevents us continuously moving the
3773 single-step breakpoint forward, one instruction at a
3774 time. If the PC has changed, then the thread we were
3775 trying to single-step has trapped or been signalled,
3776 but the event has not been reported to GDB yet.
3777
3778 There might be some cases where this loses signal
3779 information, if a signal has arrived at exactly the
3780 same time that the PC changed, but this is the best
3781 we can do with the information available. Perhaps we
3782 should arrange to report all events for all threads
3783 when they stop, or to re-poll the remote looking for
3784 this particular thread (i.e. temporarily enable
3785 schedlock). */
3786
3787 CORE_ADDR new_singlestep_pc
3788 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3789
3790 if (new_singlestep_pc != singlestep_pc)
3791 {
3792 enum target_signal stop_signal;
3793
3794 if (debug_infrun)
3795 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3796 " but expected thread advanced also\n");
3797
3798 /* The current context still belongs to
3799 singlestep_ptid. Don't swap here, since that's
3800 the context we want to use. Just fudge our
3801 state and continue. */
3802 stop_signal = ecs->event_thread->suspend.stop_signal;
3803 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3804 ecs->ptid = singlestep_ptid;
3805 ecs->event_thread = find_thread_ptid (ecs->ptid);
3806 ecs->event_thread->suspend.stop_signal = stop_signal;
3807 stop_pc = new_singlestep_pc;
3808 }
3809 else
3810 {
3811 if (debug_infrun)
3812 fprintf_unfiltered (gdb_stdlog,
3813 "infrun: unexpected thread\n");
3814
3815 thread_hop_needed = 1;
3816 stepping_past_singlestep_breakpoint = 1;
3817 saved_singlestep_ptid = singlestep_ptid;
3818 }
3819 }
3820 }
3821
3822 if (thread_hop_needed)
3823 {
3824 struct regcache *thread_regcache;
3825 int remove_status = 0;
3826
3827 if (debug_infrun)
3828 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3829
3830 /* Switch context before touching inferior memory, the
3831 previous thread may have exited. */
3832 if (!ptid_equal (inferior_ptid, ecs->ptid))
3833 context_switch (ecs->ptid);
3834
3835 /* Saw a breakpoint, but it was hit by the wrong thread.
3836 Just continue. */
3837
3838 if (singlestep_breakpoints_inserted_p)
3839 {
3840 /* Pull the single step breakpoints out of the target. */
3841 remove_single_step_breakpoints ();
3842 singlestep_breakpoints_inserted_p = 0;
3843 }
3844
3845 /* If the arch can displace step, don't remove the
3846 breakpoints. */
3847 thread_regcache = get_thread_regcache (ecs->ptid);
3848 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3849 remove_status = remove_breakpoints ();
3850
3851 /* Did we fail to remove breakpoints? If so, try
3852 to set the PC past the bp. (There's at least
3853 one situation in which we can fail to remove
3854 the bp's: On HP-UX's that use ttrace, we can't
3855 change the address space of a vforking child
3856 process until the child exits (well, okay, not
3857 then either :-) or execs. */
3858 if (remove_status != 0)
3859 error (_("Cannot step over breakpoint hit in wrong thread"));
3860 else
3861 { /* Single step */
3862 if (!non_stop)
3863 {
3864 /* Only need to require the next event from this
3865 thread in all-stop mode. */
3866 waiton_ptid = ecs->ptid;
3867 infwait_state = infwait_thread_hop_state;
3868 }
3869
3870 ecs->event_thread->stepping_over_breakpoint = 1;
3871 keep_going (ecs);
3872 return;
3873 }
3874 }
3875 else if (singlestep_breakpoints_inserted_p)
3876 {
3877 ecs->random_signal = 0;
3878 }
3879 }
3880 else
3881 ecs->random_signal = 1;
3882
3883 /* See if something interesting happened to the non-current thread. If
3884 so, then switch to that thread. */
3885 if (!ptid_equal (ecs->ptid, inferior_ptid))
3886 {
3887 if (debug_infrun)
3888 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3889
3890 context_switch (ecs->ptid);
3891
3892 if (deprecated_context_hook)
3893 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3894 }
3895
3896 /* At this point, get hold of the now-current thread's frame. */
3897 frame = get_current_frame ();
3898 gdbarch = get_frame_arch (frame);
3899
3900 if (singlestep_breakpoints_inserted_p)
3901 {
3902 /* Pull the single step breakpoints out of the target. */
3903 remove_single_step_breakpoints ();
3904 singlestep_breakpoints_inserted_p = 0;
3905 }
3906
3907 if (stepped_after_stopped_by_watchpoint)
3908 stopped_by_watchpoint = 0;
3909 else
3910 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3911
3912 /* If necessary, step over this watchpoint. We'll be back to display
3913 it in a moment. */
3914 if (stopped_by_watchpoint
3915 && (target_have_steppable_watchpoint
3916 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3917 {
3918 /* At this point, we are stopped at an instruction which has
3919 attempted to write to a piece of memory under control of
3920 a watchpoint. The instruction hasn't actually executed
3921 yet. If we were to evaluate the watchpoint expression
3922 now, we would get the old value, and therefore no change
3923 would seem to have occurred.
3924
3925 In order to make watchpoints work `right', we really need
3926 to complete the memory write, and then evaluate the
3927 watchpoint expression. We do this by single-stepping the
3928 target.
3929
3930 It may not be necessary to disable the watchpoint to stop over
3931 it. For example, the PA can (with some kernel cooperation)
3932 single step over a watchpoint without disabling the watchpoint.
3933
3934 It is far more common to need to disable a watchpoint to step
3935 the inferior over it. If we have non-steppable watchpoints,
3936 we must disable the current watchpoint; it's simplest to
3937 disable all watchpoints and breakpoints. */
3938 int hw_step = 1;
3939
3940 if (!target_have_steppable_watchpoint)
3941 {
3942 remove_breakpoints ();
3943 /* See comment in resume why we need to stop bypassing signals
3944 while breakpoints have been removed. */
3945 target_pass_signals (0, NULL);
3946 }
3947 /* Single step */
3948 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3949 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3950 waiton_ptid = ecs->ptid;
3951 if (target_have_steppable_watchpoint)
3952 infwait_state = infwait_step_watch_state;
3953 else
3954 infwait_state = infwait_nonstep_watch_state;
3955 prepare_to_wait (ecs);
3956 return;
3957 }
3958
3959 clear_stop_func (ecs);
3960 ecs->event_thread->stepping_over_breakpoint = 0;
3961 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
3962 ecs->event_thread->control.stop_step = 0;
3963 stop_print_frame = 1;
3964 ecs->random_signal = 0;
3965 stopped_by_random_signal = 0;
3966
3967 /* Hide inlined functions starting here, unless we just performed stepi or
3968 nexti. After stepi and nexti, always show the innermost frame (not any
3969 inline function call sites). */
3970 if (ecs->event_thread->control.step_range_end != 1)
3971 skip_inline_frames (ecs->ptid);
3972
3973 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3974 && ecs->event_thread->control.trap_expected
3975 && gdbarch_single_step_through_delay_p (gdbarch)
3976 && currently_stepping (ecs->event_thread))
3977 {
3978 /* We're trying to step off a breakpoint. Turns out that we're
3979 also on an instruction that needs to be stepped multiple
3980 times before it's been fully executing. E.g., architectures
3981 with a delay slot. It needs to be stepped twice, once for
3982 the instruction and once for the delay slot. */
3983 int step_through_delay
3984 = gdbarch_single_step_through_delay (gdbarch, frame);
3985
3986 if (debug_infrun && step_through_delay)
3987 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3988 if (ecs->event_thread->control.step_range_end == 0
3989 && step_through_delay)
3990 {
3991 /* The user issued a continue when stopped at a breakpoint.
3992 Set up for another trap and get out of here. */
3993 ecs->event_thread->stepping_over_breakpoint = 1;
3994 keep_going (ecs);
3995 return;
3996 }
3997 else if (step_through_delay)
3998 {
3999 /* The user issued a step when stopped at a breakpoint.
4000 Maybe we should stop, maybe we should not - the delay
4001 slot *might* correspond to a line of source. In any
4002 case, don't decide that here, just set
4003 ecs->stepping_over_breakpoint, making sure we
4004 single-step again before breakpoints are re-inserted. */
4005 ecs->event_thread->stepping_over_breakpoint = 1;
4006 }
4007 }
4008
4009 /* Look at the cause of the stop, and decide what to do.
4010 The alternatives are:
4011 1) stop_stepping and return; to really stop and return to the debugger,
4012 2) keep_going and return to start up again
4013 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4014 3) set ecs->random_signal to 1, and the decision between 1 and 2
4015 will be made according to the signal handling tables. */
4016
4017 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4018 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4019 || stop_soon == STOP_QUIETLY_REMOTE)
4020 {
4021 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4022 && stop_after_trap)
4023 {
4024 if (debug_infrun)
4025 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4026 stop_print_frame = 0;
4027 stop_stepping (ecs);
4028 return;
4029 }
4030
4031 /* This is originated from start_remote(), start_inferior() and
4032 shared libraries hook functions. */
4033 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4034 {
4035 if (debug_infrun)
4036 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4037 stop_stepping (ecs);
4038 return;
4039 }
4040
4041 /* This originates from attach_command(). We need to overwrite
4042 the stop_signal here, because some kernels don't ignore a
4043 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4044 See more comments in inferior.h. On the other hand, if we
4045 get a non-SIGSTOP, report it to the user - assume the backend
4046 will handle the SIGSTOP if it should show up later.
4047
4048 Also consider that the attach is complete when we see a
4049 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4050 target extended-remote report it instead of a SIGSTOP
4051 (e.g. gdbserver). We already rely on SIGTRAP being our
4052 signal, so this is no exception.
4053
4054 Also consider that the attach is complete when we see a
4055 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4056 the target to stop all threads of the inferior, in case the
4057 low level attach operation doesn't stop them implicitly. If
4058 they weren't stopped implicitly, then the stub will report a
4059 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4060 other than GDB's request. */
4061 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4062 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4063 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4064 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4065 {
4066 stop_stepping (ecs);
4067 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4068 return;
4069 }
4070
4071 /* See if there is a breakpoint at the current PC. */
4072 ecs->event_thread->control.stop_bpstat
4073 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4074 stop_pc, ecs->ptid);
4075
4076 /* Following in case break condition called a
4077 function. */
4078 stop_print_frame = 1;
4079
4080 /* This is where we handle "moribund" watchpoints. Unlike
4081 software breakpoints traps, hardware watchpoint traps are
4082 always distinguishable from random traps. If no high-level
4083 watchpoint is associated with the reported stop data address
4084 anymore, then the bpstat does not explain the signal ---
4085 simply make sure to ignore it if `stopped_by_watchpoint' is
4086 set. */
4087
4088 if (debug_infrun
4089 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4090 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4091 && stopped_by_watchpoint)
4092 fprintf_unfiltered (gdb_stdlog,
4093 "infrun: no user watchpoint explains "
4094 "watchpoint SIGTRAP, ignoring\n");
4095
4096 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4097 at one stage in the past included checks for an inferior
4098 function call's call dummy's return breakpoint. The original
4099 comment, that went with the test, read:
4100
4101 ``End of a stack dummy. Some systems (e.g. Sony news) give
4102 another signal besides SIGTRAP, so check here as well as
4103 above.''
4104
4105 If someone ever tries to get call dummys on a
4106 non-executable stack to work (where the target would stop
4107 with something like a SIGSEGV), then those tests might need
4108 to be re-instated. Given, however, that the tests were only
4109 enabled when momentary breakpoints were not being used, I
4110 suspect that it won't be the case.
4111
4112 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4113 be necessary for call dummies on a non-executable stack on
4114 SPARC. */
4115
4116 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4117 ecs->random_signal
4118 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4119 || stopped_by_watchpoint
4120 || ecs->event_thread->control.trap_expected
4121 || (ecs->event_thread->control.step_range_end
4122 && (ecs->event_thread->control.step_resume_breakpoint
4123 == NULL)));
4124 else
4125 {
4126 ecs->random_signal = !bpstat_explains_signal
4127 (ecs->event_thread->control.stop_bpstat);
4128 if (!ecs->random_signal)
4129 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4130 }
4131 }
4132
4133 /* When we reach this point, we've pretty much decided
4134 that the reason for stopping must've been a random
4135 (unexpected) signal. */
4136
4137 else
4138 ecs->random_signal = 1;
4139
4140 process_event_stop_test:
4141
4142 /* Re-fetch current thread's frame in case we did a
4143 "goto process_event_stop_test" above. */
4144 frame = get_current_frame ();
4145 gdbarch = get_frame_arch (frame);
4146
4147 /* For the program's own signals, act according to
4148 the signal handling tables. */
4149
4150 if (ecs->random_signal)
4151 {
4152 /* Signal not for debugging purposes. */
4153 int printed = 0;
4154 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4155
4156 if (debug_infrun)
4157 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4158 ecs->event_thread->suspend.stop_signal);
4159
4160 stopped_by_random_signal = 1;
4161
4162 if (signal_print[ecs->event_thread->suspend.stop_signal])
4163 {
4164 printed = 1;
4165 target_terminal_ours_for_output ();
4166 print_signal_received_reason
4167 (ecs->event_thread->suspend.stop_signal);
4168 }
4169 /* Always stop on signals if we're either just gaining control
4170 of the program, or the user explicitly requested this thread
4171 to remain stopped. */
4172 if (stop_soon != NO_STOP_QUIETLY
4173 || ecs->event_thread->stop_requested
4174 || (!inf->detaching
4175 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4176 {
4177 stop_stepping (ecs);
4178 return;
4179 }
4180 /* If not going to stop, give terminal back
4181 if we took it away. */
4182 else if (printed)
4183 target_terminal_inferior ();
4184
4185 /* Clear the signal if it should not be passed. */
4186 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4187 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4188
4189 if (ecs->event_thread->prev_pc == stop_pc
4190 && ecs->event_thread->control.trap_expected
4191 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4192 {
4193 /* We were just starting a new sequence, attempting to
4194 single-step off of a breakpoint and expecting a SIGTRAP.
4195 Instead this signal arrives. This signal will take us out
4196 of the stepping range so GDB needs to remember to, when
4197 the signal handler returns, resume stepping off that
4198 breakpoint. */
4199 /* To simplify things, "continue" is forced to use the same
4200 code paths as single-step - set a breakpoint at the
4201 signal return address and then, once hit, step off that
4202 breakpoint. */
4203 if (debug_infrun)
4204 fprintf_unfiltered (gdb_stdlog,
4205 "infrun: signal arrived while stepping over "
4206 "breakpoint\n");
4207
4208 insert_hp_step_resume_breakpoint_at_frame (frame);
4209 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4210 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4211 ecs->event_thread->control.trap_expected = 0;
4212 keep_going (ecs);
4213 return;
4214 }
4215
4216 if (ecs->event_thread->control.step_range_end != 0
4217 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4218 && (ecs->event_thread->control.step_range_start <= stop_pc
4219 && stop_pc < ecs->event_thread->control.step_range_end)
4220 && frame_id_eq (get_stack_frame_id (frame),
4221 ecs->event_thread->control.step_stack_frame_id)
4222 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4223 {
4224 /* The inferior is about to take a signal that will take it
4225 out of the single step range. Set a breakpoint at the
4226 current PC (which is presumably where the signal handler
4227 will eventually return) and then allow the inferior to
4228 run free.
4229
4230 Note that this is only needed for a signal delivered
4231 while in the single-step range. Nested signals aren't a
4232 problem as they eventually all return. */
4233 if (debug_infrun)
4234 fprintf_unfiltered (gdb_stdlog,
4235 "infrun: signal may take us out of "
4236 "single-step range\n");
4237
4238 insert_hp_step_resume_breakpoint_at_frame (frame);
4239 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4240 ecs->event_thread->control.trap_expected = 0;
4241 keep_going (ecs);
4242 return;
4243 }
4244
4245 /* Note: step_resume_breakpoint may be non-NULL. This occures
4246 when either there's a nested signal, or when there's a
4247 pending signal enabled just as the signal handler returns
4248 (leaving the inferior at the step-resume-breakpoint without
4249 actually executing it). Either way continue until the
4250 breakpoint is really hit. */
4251 keep_going (ecs);
4252 return;
4253 }
4254
4255 /* Handle cases caused by hitting a breakpoint. */
4256 {
4257 CORE_ADDR jmp_buf_pc;
4258 struct bpstat_what what;
4259
4260 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4261
4262 if (what.call_dummy)
4263 {
4264 stop_stack_dummy = what.call_dummy;
4265 }
4266
4267 /* If we hit an internal event that triggers symbol changes, the
4268 current frame will be invalidated within bpstat_what (e.g., if
4269 we hit an internal solib event). Re-fetch it. */
4270 frame = get_current_frame ();
4271 gdbarch = get_frame_arch (frame);
4272
4273 switch (what.main_action)
4274 {
4275 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4276 /* If we hit the breakpoint at longjmp while stepping, we
4277 install a momentary breakpoint at the target of the
4278 jmp_buf. */
4279
4280 if (debug_infrun)
4281 fprintf_unfiltered (gdb_stdlog,
4282 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4283
4284 ecs->event_thread->stepping_over_breakpoint = 1;
4285
4286 if (what.is_longjmp)
4287 {
4288 if (!gdbarch_get_longjmp_target_p (gdbarch)
4289 || !gdbarch_get_longjmp_target (gdbarch,
4290 frame, &jmp_buf_pc))
4291 {
4292 if (debug_infrun)
4293 fprintf_unfiltered (gdb_stdlog,
4294 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4295 "(!gdbarch_get_longjmp_target)\n");
4296 keep_going (ecs);
4297 return;
4298 }
4299
4300 /* We're going to replace the current step-resume breakpoint
4301 with a longjmp-resume breakpoint. */
4302 delete_step_resume_breakpoint (ecs->event_thread);
4303
4304 /* Insert a breakpoint at resume address. */
4305 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4306 }
4307 else
4308 {
4309 struct symbol *func = get_frame_function (frame);
4310
4311 if (func)
4312 check_exception_resume (ecs, frame, func);
4313 }
4314 keep_going (ecs);
4315 return;
4316
4317 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4318 if (debug_infrun)
4319 fprintf_unfiltered (gdb_stdlog,
4320 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4321
4322 if (what.is_longjmp)
4323 {
4324 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4325 != NULL);
4326 delete_step_resume_breakpoint (ecs->event_thread);
4327 }
4328 else
4329 {
4330 /* There are several cases to consider.
4331
4332 1. The initiating frame no longer exists. In this case
4333 we must stop, because the exception has gone too far.
4334
4335 2. The initiating frame exists, and is the same as the
4336 current frame. We stop, because the exception has been
4337 caught.
4338
4339 3. The initiating frame exists and is different from
4340 the current frame. This means the exception has been
4341 caught beneath the initiating frame, so keep going. */
4342 struct frame_info *init_frame
4343 = frame_find_by_id (ecs->event_thread->initiating_frame);
4344
4345 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4346 != NULL);
4347 delete_exception_resume_breakpoint (ecs->event_thread);
4348
4349 if (init_frame)
4350 {
4351 struct frame_id current_id
4352 = get_frame_id (get_current_frame ());
4353 if (frame_id_eq (current_id,
4354 ecs->event_thread->initiating_frame))
4355 {
4356 /* Case 2. Fall through. */
4357 }
4358 else
4359 {
4360 /* Case 3. */
4361 keep_going (ecs);
4362 return;
4363 }
4364 }
4365
4366 /* For Cases 1 and 2, remove the step-resume breakpoint,
4367 if it exists. */
4368 delete_step_resume_breakpoint (ecs->event_thread);
4369 }
4370
4371 ecs->event_thread->control.stop_step = 1;
4372 print_end_stepping_range_reason ();
4373 stop_stepping (ecs);
4374 return;
4375
4376 case BPSTAT_WHAT_SINGLE:
4377 if (debug_infrun)
4378 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4379 ecs->event_thread->stepping_over_breakpoint = 1;
4380 /* Still need to check other stuff, at least the case
4381 where we are stepping and step out of the right range. */
4382 break;
4383
4384 case BPSTAT_WHAT_STEP_RESUME:
4385 if (debug_infrun)
4386 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4387
4388 delete_step_resume_breakpoint (ecs->event_thread);
4389 if (ecs->event_thread->control.proceed_to_finish
4390 && execution_direction == EXEC_REVERSE)
4391 {
4392 struct thread_info *tp = ecs->event_thread;
4393
4394 /* We are finishing a function in reverse, and just hit
4395 the step-resume breakpoint at the start address of the
4396 function, and we're almost there -- just need to back
4397 up by one more single-step, which should take us back
4398 to the function call. */
4399 tp->control.step_range_start = tp->control.step_range_end = 1;
4400 keep_going (ecs);
4401 return;
4402 }
4403 fill_in_stop_func (gdbarch, ecs);
4404 if (stop_pc == ecs->stop_func_start
4405 && execution_direction == EXEC_REVERSE)
4406 {
4407 /* We are stepping over a function call in reverse, and
4408 just hit the step-resume breakpoint at the start
4409 address of the function. Go back to single-stepping,
4410 which should take us back to the function call. */
4411 ecs->event_thread->stepping_over_breakpoint = 1;
4412 keep_going (ecs);
4413 return;
4414 }
4415 break;
4416
4417 case BPSTAT_WHAT_STOP_NOISY:
4418 if (debug_infrun)
4419 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4420 stop_print_frame = 1;
4421
4422 /* We are about to nuke the step_resume_breakpointt via the
4423 cleanup chain, so no need to worry about it here. */
4424
4425 stop_stepping (ecs);
4426 return;
4427
4428 case BPSTAT_WHAT_STOP_SILENT:
4429 if (debug_infrun)
4430 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4431 stop_print_frame = 0;
4432
4433 /* We are about to nuke the step_resume_breakpoin via the
4434 cleanup chain, so no need to worry about it here. */
4435
4436 stop_stepping (ecs);
4437 return;
4438
4439 case BPSTAT_WHAT_HP_STEP_RESUME:
4440 if (debug_infrun)
4441 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4442
4443 delete_step_resume_breakpoint (ecs->event_thread);
4444 if (ecs->event_thread->step_after_step_resume_breakpoint)
4445 {
4446 /* Back when the step-resume breakpoint was inserted, we
4447 were trying to single-step off a breakpoint. Go back
4448 to doing that. */
4449 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4450 ecs->event_thread->stepping_over_breakpoint = 1;
4451 keep_going (ecs);
4452 return;
4453 }
4454 break;
4455
4456 case BPSTAT_WHAT_KEEP_CHECKING:
4457 break;
4458 }
4459 }
4460
4461 /* We come here if we hit a breakpoint but should not
4462 stop for it. Possibly we also were stepping
4463 and should stop for that. So fall through and
4464 test for stepping. But, if not stepping,
4465 do not stop. */
4466
4467 /* In all-stop mode, if we're currently stepping but have stopped in
4468 some other thread, we need to switch back to the stepped thread. */
4469 if (!non_stop)
4470 {
4471 struct thread_info *tp;
4472
4473 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4474 ecs->event_thread);
4475 if (tp)
4476 {
4477 /* However, if the current thread is blocked on some internal
4478 breakpoint, and we simply need to step over that breakpoint
4479 to get it going again, do that first. */
4480 if ((ecs->event_thread->control.trap_expected
4481 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4482 || ecs->event_thread->stepping_over_breakpoint)
4483 {
4484 keep_going (ecs);
4485 return;
4486 }
4487
4488 /* If the stepping thread exited, then don't try to switch
4489 back and resume it, which could fail in several different
4490 ways depending on the target. Instead, just keep going.
4491
4492 We can find a stepping dead thread in the thread list in
4493 two cases:
4494
4495 - The target supports thread exit events, and when the
4496 target tries to delete the thread from the thread list,
4497 inferior_ptid pointed at the exiting thread. In such
4498 case, calling delete_thread does not really remove the
4499 thread from the list; instead, the thread is left listed,
4500 with 'exited' state.
4501
4502 - The target's debug interface does not support thread
4503 exit events, and so we have no idea whatsoever if the
4504 previously stepping thread is still alive. For that
4505 reason, we need to synchronously query the target
4506 now. */
4507 if (is_exited (tp->ptid)
4508 || !target_thread_alive (tp->ptid))
4509 {
4510 if (debug_infrun)
4511 fprintf_unfiltered (gdb_stdlog,
4512 "infrun: not switching back to "
4513 "stepped thread, it has vanished\n");
4514
4515 delete_thread (tp->ptid);
4516 keep_going (ecs);
4517 return;
4518 }
4519
4520 /* Otherwise, we no longer expect a trap in the current thread.
4521 Clear the trap_expected flag before switching back -- this is
4522 what keep_going would do as well, if we called it. */
4523 ecs->event_thread->control.trap_expected = 0;
4524
4525 if (debug_infrun)
4526 fprintf_unfiltered (gdb_stdlog,
4527 "infrun: switching back to stepped thread\n");
4528
4529 ecs->event_thread = tp;
4530 ecs->ptid = tp->ptid;
4531 context_switch (ecs->ptid);
4532 keep_going (ecs);
4533 return;
4534 }
4535 }
4536
4537 /* Are we stepping to get the inferior out of the dynamic linker's
4538 hook (and possibly the dld itself) after catching a shlib
4539 event? */
4540 if (ecs->event_thread->stepping_through_solib_after_catch)
4541 {
4542 #if defined(SOLIB_ADD)
4543 /* Have we reached our destination? If not, keep going. */
4544 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4545 {
4546 if (debug_infrun)
4547 fprintf_unfiltered (gdb_stdlog,
4548 "infrun: stepping in dynamic linker\n");
4549 ecs->event_thread->stepping_over_breakpoint = 1;
4550 keep_going (ecs);
4551 return;
4552 }
4553 #endif
4554 if (debug_infrun)
4555 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4556 /* Else, stop and report the catchpoint(s) whose triggering
4557 caused us to begin stepping. */
4558 ecs->event_thread->stepping_through_solib_after_catch = 0;
4559 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4560 ecs->event_thread->control.stop_bpstat
4561 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4562 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4563 stop_print_frame = 1;
4564 stop_stepping (ecs);
4565 return;
4566 }
4567
4568 if (ecs->event_thread->control.step_resume_breakpoint)
4569 {
4570 if (debug_infrun)
4571 fprintf_unfiltered (gdb_stdlog,
4572 "infrun: step-resume breakpoint is inserted\n");
4573
4574 /* Having a step-resume breakpoint overrides anything
4575 else having to do with stepping commands until
4576 that breakpoint is reached. */
4577 keep_going (ecs);
4578 return;
4579 }
4580
4581 if (ecs->event_thread->control.step_range_end == 0)
4582 {
4583 if (debug_infrun)
4584 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4585 /* Likewise if we aren't even stepping. */
4586 keep_going (ecs);
4587 return;
4588 }
4589
4590 /* Re-fetch current thread's frame in case the code above caused
4591 the frame cache to be re-initialized, making our FRAME variable
4592 a dangling pointer. */
4593 frame = get_current_frame ();
4594 gdbarch = get_frame_arch (frame);
4595 fill_in_stop_func (gdbarch, ecs);
4596
4597 /* If stepping through a line, keep going if still within it.
4598
4599 Note that step_range_end is the address of the first instruction
4600 beyond the step range, and NOT the address of the last instruction
4601 within it!
4602
4603 Note also that during reverse execution, we may be stepping
4604 through a function epilogue and therefore must detect when
4605 the current-frame changes in the middle of a line. */
4606
4607 if (stop_pc >= ecs->event_thread->control.step_range_start
4608 && stop_pc < ecs->event_thread->control.step_range_end
4609 && (execution_direction != EXEC_REVERSE
4610 || frame_id_eq (get_frame_id (frame),
4611 ecs->event_thread->control.step_frame_id)))
4612 {
4613 if (debug_infrun)
4614 fprintf_unfiltered
4615 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4616 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4617 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4618
4619 /* When stepping backward, stop at beginning of line range
4620 (unless it's the function entry point, in which case
4621 keep going back to the call point). */
4622 if (stop_pc == ecs->event_thread->control.step_range_start
4623 && stop_pc != ecs->stop_func_start
4624 && execution_direction == EXEC_REVERSE)
4625 {
4626 ecs->event_thread->control.stop_step = 1;
4627 print_end_stepping_range_reason ();
4628 stop_stepping (ecs);
4629 }
4630 else
4631 keep_going (ecs);
4632
4633 return;
4634 }
4635
4636 /* We stepped out of the stepping range. */
4637
4638 /* If we are stepping at the source level and entered the runtime
4639 loader dynamic symbol resolution code...
4640
4641 EXEC_FORWARD: we keep on single stepping until we exit the run
4642 time loader code and reach the callee's address.
4643
4644 EXEC_REVERSE: we've already executed the callee (backward), and
4645 the runtime loader code is handled just like any other
4646 undebuggable function call. Now we need only keep stepping
4647 backward through the trampoline code, and that's handled further
4648 down, so there is nothing for us to do here. */
4649
4650 if (execution_direction != EXEC_REVERSE
4651 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4652 && in_solib_dynsym_resolve_code (stop_pc))
4653 {
4654 CORE_ADDR pc_after_resolver =
4655 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4656
4657 if (debug_infrun)
4658 fprintf_unfiltered (gdb_stdlog,
4659 "infrun: stepped into dynsym resolve code\n");
4660
4661 if (pc_after_resolver)
4662 {
4663 /* Set up a step-resume breakpoint at the address
4664 indicated by SKIP_SOLIB_RESOLVER. */
4665 struct symtab_and_line sr_sal;
4666
4667 init_sal (&sr_sal);
4668 sr_sal.pc = pc_after_resolver;
4669 sr_sal.pspace = get_frame_program_space (frame);
4670
4671 insert_step_resume_breakpoint_at_sal (gdbarch,
4672 sr_sal, null_frame_id);
4673 }
4674
4675 keep_going (ecs);
4676 return;
4677 }
4678
4679 if (ecs->event_thread->control.step_range_end != 1
4680 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4681 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4682 && get_frame_type (frame) == SIGTRAMP_FRAME)
4683 {
4684 if (debug_infrun)
4685 fprintf_unfiltered (gdb_stdlog,
4686 "infrun: stepped into signal trampoline\n");
4687 /* The inferior, while doing a "step" or "next", has ended up in
4688 a signal trampoline (either by a signal being delivered or by
4689 the signal handler returning). Just single-step until the
4690 inferior leaves the trampoline (either by calling the handler
4691 or returning). */
4692 keep_going (ecs);
4693 return;
4694 }
4695
4696 /* Check for subroutine calls. The check for the current frame
4697 equalling the step ID is not necessary - the check of the
4698 previous frame's ID is sufficient - but it is a common case and
4699 cheaper than checking the previous frame's ID.
4700
4701 NOTE: frame_id_eq will never report two invalid frame IDs as
4702 being equal, so to get into this block, both the current and
4703 previous frame must have valid frame IDs. */
4704 /* The outer_frame_id check is a heuristic to detect stepping
4705 through startup code. If we step over an instruction which
4706 sets the stack pointer from an invalid value to a valid value,
4707 we may detect that as a subroutine call from the mythical
4708 "outermost" function. This could be fixed by marking
4709 outermost frames as !stack_p,code_p,special_p. Then the
4710 initial outermost frame, before sp was valid, would
4711 have code_addr == &_start. See the comment in frame_id_eq
4712 for more. */
4713 if (!frame_id_eq (get_stack_frame_id (frame),
4714 ecs->event_thread->control.step_stack_frame_id)
4715 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4716 ecs->event_thread->control.step_stack_frame_id)
4717 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4718 outer_frame_id)
4719 || step_start_function != find_pc_function (stop_pc))))
4720 {
4721 CORE_ADDR real_stop_pc;
4722
4723 if (debug_infrun)
4724 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4725
4726 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4727 || ((ecs->event_thread->control.step_range_end == 1)
4728 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4729 ecs->stop_func_start)))
4730 {
4731 /* I presume that step_over_calls is only 0 when we're
4732 supposed to be stepping at the assembly language level
4733 ("stepi"). Just stop. */
4734 /* Also, maybe we just did a "nexti" inside a prolog, so we
4735 thought it was a subroutine call but it was not. Stop as
4736 well. FENN */
4737 /* And this works the same backward as frontward. MVS */
4738 ecs->event_thread->control.stop_step = 1;
4739 print_end_stepping_range_reason ();
4740 stop_stepping (ecs);
4741 return;
4742 }
4743
4744 /* Reverse stepping through solib trampolines. */
4745
4746 if (execution_direction == EXEC_REVERSE
4747 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4748 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4749 || (ecs->stop_func_start == 0
4750 && in_solib_dynsym_resolve_code (stop_pc))))
4751 {
4752 /* Any solib trampoline code can be handled in reverse
4753 by simply continuing to single-step. We have already
4754 executed the solib function (backwards), and a few
4755 steps will take us back through the trampoline to the
4756 caller. */
4757 keep_going (ecs);
4758 return;
4759 }
4760
4761 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4762 {
4763 /* We're doing a "next".
4764
4765 Normal (forward) execution: set a breakpoint at the
4766 callee's return address (the address at which the caller
4767 will resume).
4768
4769 Reverse (backward) execution. set the step-resume
4770 breakpoint at the start of the function that we just
4771 stepped into (backwards), and continue to there. When we
4772 get there, we'll need to single-step back to the caller. */
4773
4774 if (execution_direction == EXEC_REVERSE)
4775 {
4776 struct symtab_and_line sr_sal;
4777
4778 /* Normal function call return (static or dynamic). */
4779 init_sal (&sr_sal);
4780 sr_sal.pc = ecs->stop_func_start;
4781 sr_sal.pspace = get_frame_program_space (frame);
4782 insert_step_resume_breakpoint_at_sal (gdbarch,
4783 sr_sal, null_frame_id);
4784 }
4785 else
4786 insert_step_resume_breakpoint_at_caller (frame);
4787
4788 keep_going (ecs);
4789 return;
4790 }
4791
4792 /* If we are in a function call trampoline (a stub between the
4793 calling routine and the real function), locate the real
4794 function. That's what tells us (a) whether we want to step
4795 into it at all, and (b) what prologue we want to run to the
4796 end of, if we do step into it. */
4797 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4798 if (real_stop_pc == 0)
4799 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4800 if (real_stop_pc != 0)
4801 ecs->stop_func_start = real_stop_pc;
4802
4803 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4804 {
4805 struct symtab_and_line sr_sal;
4806
4807 init_sal (&sr_sal);
4808 sr_sal.pc = ecs->stop_func_start;
4809 sr_sal.pspace = get_frame_program_space (frame);
4810
4811 insert_step_resume_breakpoint_at_sal (gdbarch,
4812 sr_sal, null_frame_id);
4813 keep_going (ecs);
4814 return;
4815 }
4816
4817 /* If we have line number information for the function we are
4818 thinking of stepping into, step into it.
4819
4820 If there are several symtabs at that PC (e.g. with include
4821 files), just want to know whether *any* of them have line
4822 numbers. find_pc_line handles this. */
4823 {
4824 struct symtab_and_line tmp_sal;
4825
4826 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4827 if (tmp_sal.line != 0)
4828 {
4829 if (execution_direction == EXEC_REVERSE)
4830 handle_step_into_function_backward (gdbarch, ecs);
4831 else
4832 handle_step_into_function (gdbarch, ecs);
4833 return;
4834 }
4835 }
4836
4837 /* If we have no line number and the step-stop-if-no-debug is
4838 set, we stop the step so that the user has a chance to switch
4839 in assembly mode. */
4840 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4841 && step_stop_if_no_debug)
4842 {
4843 ecs->event_thread->control.stop_step = 1;
4844 print_end_stepping_range_reason ();
4845 stop_stepping (ecs);
4846 return;
4847 }
4848
4849 if (execution_direction == EXEC_REVERSE)
4850 {
4851 /* Set a breakpoint at callee's start address.
4852 From there we can step once and be back in the caller. */
4853 struct symtab_and_line sr_sal;
4854
4855 init_sal (&sr_sal);
4856 sr_sal.pc = ecs->stop_func_start;
4857 sr_sal.pspace = get_frame_program_space (frame);
4858 insert_step_resume_breakpoint_at_sal (gdbarch,
4859 sr_sal, null_frame_id);
4860 }
4861 else
4862 /* Set a breakpoint at callee's return address (the address
4863 at which the caller will resume). */
4864 insert_step_resume_breakpoint_at_caller (frame);
4865
4866 keep_going (ecs);
4867 return;
4868 }
4869
4870 /* Reverse stepping through solib trampolines. */
4871
4872 if (execution_direction == EXEC_REVERSE
4873 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4874 {
4875 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4876 || (ecs->stop_func_start == 0
4877 && in_solib_dynsym_resolve_code (stop_pc)))
4878 {
4879 /* Any solib trampoline code can be handled in reverse
4880 by simply continuing to single-step. We have already
4881 executed the solib function (backwards), and a few
4882 steps will take us back through the trampoline to the
4883 caller. */
4884 keep_going (ecs);
4885 return;
4886 }
4887 else if (in_solib_dynsym_resolve_code (stop_pc))
4888 {
4889 /* Stepped backward into the solib dynsym resolver.
4890 Set a breakpoint at its start and continue, then
4891 one more step will take us out. */
4892 struct symtab_and_line sr_sal;
4893
4894 init_sal (&sr_sal);
4895 sr_sal.pc = ecs->stop_func_start;
4896 sr_sal.pspace = get_frame_program_space (frame);
4897 insert_step_resume_breakpoint_at_sal (gdbarch,
4898 sr_sal, null_frame_id);
4899 keep_going (ecs);
4900 return;
4901 }
4902 }
4903
4904 /* If we're in the return path from a shared library trampoline,
4905 we want to proceed through the trampoline when stepping. */
4906 if (gdbarch_in_solib_return_trampoline (gdbarch,
4907 stop_pc, ecs->stop_func_name))
4908 {
4909 /* Determine where this trampoline returns. */
4910 CORE_ADDR real_stop_pc;
4911
4912 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4913
4914 if (debug_infrun)
4915 fprintf_unfiltered (gdb_stdlog,
4916 "infrun: stepped into solib return tramp\n");
4917
4918 /* Only proceed through if we know where it's going. */
4919 if (real_stop_pc)
4920 {
4921 /* And put the step-breakpoint there and go until there. */
4922 struct symtab_and_line sr_sal;
4923
4924 init_sal (&sr_sal); /* initialize to zeroes */
4925 sr_sal.pc = real_stop_pc;
4926 sr_sal.section = find_pc_overlay (sr_sal.pc);
4927 sr_sal.pspace = get_frame_program_space (frame);
4928
4929 /* Do not specify what the fp should be when we stop since
4930 on some machines the prologue is where the new fp value
4931 is established. */
4932 insert_step_resume_breakpoint_at_sal (gdbarch,
4933 sr_sal, null_frame_id);
4934
4935 /* Restart without fiddling with the step ranges or
4936 other state. */
4937 keep_going (ecs);
4938 return;
4939 }
4940 }
4941
4942 stop_pc_sal = find_pc_line (stop_pc, 0);
4943
4944 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4945 the trampoline processing logic, however, there are some trampolines
4946 that have no names, so we should do trampoline handling first. */
4947 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4948 && ecs->stop_func_name == NULL
4949 && stop_pc_sal.line == 0)
4950 {
4951 if (debug_infrun)
4952 fprintf_unfiltered (gdb_stdlog,
4953 "infrun: stepped into undebuggable function\n");
4954
4955 /* The inferior just stepped into, or returned to, an
4956 undebuggable function (where there is no debugging information
4957 and no line number corresponding to the address where the
4958 inferior stopped). Since we want to skip this kind of code,
4959 we keep going until the inferior returns from this
4960 function - unless the user has asked us not to (via
4961 set step-mode) or we no longer know how to get back
4962 to the call site. */
4963 if (step_stop_if_no_debug
4964 || !frame_id_p (frame_unwind_caller_id (frame)))
4965 {
4966 /* If we have no line number and the step-stop-if-no-debug
4967 is set, we stop the step so that the user has a chance to
4968 switch in assembly mode. */
4969 ecs->event_thread->control.stop_step = 1;
4970 print_end_stepping_range_reason ();
4971 stop_stepping (ecs);
4972 return;
4973 }
4974 else
4975 {
4976 /* Set a breakpoint at callee's return address (the address
4977 at which the caller will resume). */
4978 insert_step_resume_breakpoint_at_caller (frame);
4979 keep_going (ecs);
4980 return;
4981 }
4982 }
4983
4984 if (ecs->event_thread->control.step_range_end == 1)
4985 {
4986 /* It is stepi or nexti. We always want to stop stepping after
4987 one instruction. */
4988 if (debug_infrun)
4989 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4990 ecs->event_thread->control.stop_step = 1;
4991 print_end_stepping_range_reason ();
4992 stop_stepping (ecs);
4993 return;
4994 }
4995
4996 if (stop_pc_sal.line == 0)
4997 {
4998 /* We have no line number information. That means to stop
4999 stepping (does this always happen right after one instruction,
5000 when we do "s" in a function with no line numbers,
5001 or can this happen as a result of a return or longjmp?). */
5002 if (debug_infrun)
5003 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5004 ecs->event_thread->control.stop_step = 1;
5005 print_end_stepping_range_reason ();
5006 stop_stepping (ecs);
5007 return;
5008 }
5009
5010 /* Look for "calls" to inlined functions, part one. If the inline
5011 frame machinery detected some skipped call sites, we have entered
5012 a new inline function. */
5013
5014 if (frame_id_eq (get_frame_id (get_current_frame ()),
5015 ecs->event_thread->control.step_frame_id)
5016 && inline_skipped_frames (ecs->ptid))
5017 {
5018 struct symtab_and_line call_sal;
5019
5020 if (debug_infrun)
5021 fprintf_unfiltered (gdb_stdlog,
5022 "infrun: stepped into inlined function\n");
5023
5024 find_frame_sal (get_current_frame (), &call_sal);
5025
5026 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5027 {
5028 /* For "step", we're going to stop. But if the call site
5029 for this inlined function is on the same source line as
5030 we were previously stepping, go down into the function
5031 first. Otherwise stop at the call site. */
5032
5033 if (call_sal.line == ecs->event_thread->current_line
5034 && call_sal.symtab == ecs->event_thread->current_symtab)
5035 step_into_inline_frame (ecs->ptid);
5036
5037 ecs->event_thread->control.stop_step = 1;
5038 print_end_stepping_range_reason ();
5039 stop_stepping (ecs);
5040 return;
5041 }
5042 else
5043 {
5044 /* For "next", we should stop at the call site if it is on a
5045 different source line. Otherwise continue through the
5046 inlined function. */
5047 if (call_sal.line == ecs->event_thread->current_line
5048 && call_sal.symtab == ecs->event_thread->current_symtab)
5049 keep_going (ecs);
5050 else
5051 {
5052 ecs->event_thread->control.stop_step = 1;
5053 print_end_stepping_range_reason ();
5054 stop_stepping (ecs);
5055 }
5056 return;
5057 }
5058 }
5059
5060 /* Look for "calls" to inlined functions, part two. If we are still
5061 in the same real function we were stepping through, but we have
5062 to go further up to find the exact frame ID, we are stepping
5063 through a more inlined call beyond its call site. */
5064
5065 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5066 && !frame_id_eq (get_frame_id (get_current_frame ()),
5067 ecs->event_thread->control.step_frame_id)
5068 && stepped_in_from (get_current_frame (),
5069 ecs->event_thread->control.step_frame_id))
5070 {
5071 if (debug_infrun)
5072 fprintf_unfiltered (gdb_stdlog,
5073 "infrun: stepping through inlined function\n");
5074
5075 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5076 keep_going (ecs);
5077 else
5078 {
5079 ecs->event_thread->control.stop_step = 1;
5080 print_end_stepping_range_reason ();
5081 stop_stepping (ecs);
5082 }
5083 return;
5084 }
5085
5086 if ((stop_pc == stop_pc_sal.pc)
5087 && (ecs->event_thread->current_line != stop_pc_sal.line
5088 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5089 {
5090 /* We are at the start of a different line. So stop. Note that
5091 we don't stop if we step into the middle of a different line.
5092 That is said to make things like for (;;) statements work
5093 better. */
5094 if (debug_infrun)
5095 fprintf_unfiltered (gdb_stdlog,
5096 "infrun: stepped to a different line\n");
5097 ecs->event_thread->control.stop_step = 1;
5098 print_end_stepping_range_reason ();
5099 stop_stepping (ecs);
5100 return;
5101 }
5102
5103 /* We aren't done stepping.
5104
5105 Optimize by setting the stepping range to the line.
5106 (We might not be in the original line, but if we entered a
5107 new line in mid-statement, we continue stepping. This makes
5108 things like for(;;) statements work better.) */
5109
5110 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5111 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5112 set_step_info (frame, stop_pc_sal);
5113
5114 if (debug_infrun)
5115 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5116 keep_going (ecs);
5117 }
5118
5119 /* Is thread TP in the middle of single-stepping? */
5120
5121 static int
5122 currently_stepping (struct thread_info *tp)
5123 {
5124 return ((tp->control.step_range_end
5125 && tp->control.step_resume_breakpoint == NULL)
5126 || tp->control.trap_expected
5127 || tp->stepping_through_solib_after_catch
5128 || bpstat_should_step ());
5129 }
5130
5131 /* Returns true if any thread *but* the one passed in "data" is in the
5132 middle of stepping or of handling a "next". */
5133
5134 static int
5135 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5136 {
5137 if (tp == data)
5138 return 0;
5139
5140 return (tp->control.step_range_end
5141 || tp->control.trap_expected
5142 || tp->stepping_through_solib_after_catch);
5143 }
5144
5145 /* Inferior has stepped into a subroutine call with source code that
5146 we should not step over. Do step to the first line of code in
5147 it. */
5148
5149 static void
5150 handle_step_into_function (struct gdbarch *gdbarch,
5151 struct execution_control_state *ecs)
5152 {
5153 struct symtab *s;
5154 struct symtab_and_line stop_func_sal, sr_sal;
5155
5156 fill_in_stop_func (gdbarch, ecs);
5157
5158 s = find_pc_symtab (stop_pc);
5159 if (s && s->language != language_asm)
5160 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5161 ecs->stop_func_start);
5162
5163 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5164 /* Use the step_resume_break to step until the end of the prologue,
5165 even if that involves jumps (as it seems to on the vax under
5166 4.2). */
5167 /* If the prologue ends in the middle of a source line, continue to
5168 the end of that source line (if it is still within the function).
5169 Otherwise, just go to end of prologue. */
5170 if (stop_func_sal.end
5171 && stop_func_sal.pc != ecs->stop_func_start
5172 && stop_func_sal.end < ecs->stop_func_end)
5173 ecs->stop_func_start = stop_func_sal.end;
5174
5175 /* Architectures which require breakpoint adjustment might not be able
5176 to place a breakpoint at the computed address. If so, the test
5177 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5178 ecs->stop_func_start to an address at which a breakpoint may be
5179 legitimately placed.
5180
5181 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5182 made, GDB will enter an infinite loop when stepping through
5183 optimized code consisting of VLIW instructions which contain
5184 subinstructions corresponding to different source lines. On
5185 FR-V, it's not permitted to place a breakpoint on any but the
5186 first subinstruction of a VLIW instruction. When a breakpoint is
5187 set, GDB will adjust the breakpoint address to the beginning of
5188 the VLIW instruction. Thus, we need to make the corresponding
5189 adjustment here when computing the stop address. */
5190
5191 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5192 {
5193 ecs->stop_func_start
5194 = gdbarch_adjust_breakpoint_address (gdbarch,
5195 ecs->stop_func_start);
5196 }
5197
5198 if (ecs->stop_func_start == stop_pc)
5199 {
5200 /* We are already there: stop now. */
5201 ecs->event_thread->control.stop_step = 1;
5202 print_end_stepping_range_reason ();
5203 stop_stepping (ecs);
5204 return;
5205 }
5206 else
5207 {
5208 /* Put the step-breakpoint there and go until there. */
5209 init_sal (&sr_sal); /* initialize to zeroes */
5210 sr_sal.pc = ecs->stop_func_start;
5211 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5212 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5213
5214 /* Do not specify what the fp should be when we stop since on
5215 some machines the prologue is where the new fp value is
5216 established. */
5217 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5218
5219 /* And make sure stepping stops right away then. */
5220 ecs->event_thread->control.step_range_end
5221 = ecs->event_thread->control.step_range_start;
5222 }
5223 keep_going (ecs);
5224 }
5225
5226 /* Inferior has stepped backward into a subroutine call with source
5227 code that we should not step over. Do step to the beginning of the
5228 last line of code in it. */
5229
5230 static void
5231 handle_step_into_function_backward (struct gdbarch *gdbarch,
5232 struct execution_control_state *ecs)
5233 {
5234 struct symtab *s;
5235 struct symtab_and_line stop_func_sal;
5236
5237 fill_in_stop_func (gdbarch, ecs);
5238
5239 s = find_pc_symtab (stop_pc);
5240 if (s && s->language != language_asm)
5241 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5242 ecs->stop_func_start);
5243
5244 stop_func_sal = find_pc_line (stop_pc, 0);
5245
5246 /* OK, we're just going to keep stepping here. */
5247 if (stop_func_sal.pc == stop_pc)
5248 {
5249 /* We're there already. Just stop stepping now. */
5250 ecs->event_thread->control.stop_step = 1;
5251 print_end_stepping_range_reason ();
5252 stop_stepping (ecs);
5253 }
5254 else
5255 {
5256 /* Else just reset the step range and keep going.
5257 No step-resume breakpoint, they don't work for
5258 epilogues, which can have multiple entry paths. */
5259 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5260 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5261 keep_going (ecs);
5262 }
5263 return;
5264 }
5265
5266 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5267 This is used to both functions and to skip over code. */
5268
5269 static void
5270 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5271 struct symtab_and_line sr_sal,
5272 struct frame_id sr_id,
5273 enum bptype sr_type)
5274 {
5275 /* There should never be more than one step-resume or longjmp-resume
5276 breakpoint per thread, so we should never be setting a new
5277 step_resume_breakpoint when one is already active. */
5278 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5279 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5280
5281 if (debug_infrun)
5282 fprintf_unfiltered (gdb_stdlog,
5283 "infrun: inserting step-resume breakpoint at %s\n",
5284 paddress (gdbarch, sr_sal.pc));
5285
5286 inferior_thread ()->control.step_resume_breakpoint
5287 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5288 }
5289
5290 void
5291 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5292 struct symtab_and_line sr_sal,
5293 struct frame_id sr_id)
5294 {
5295 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5296 sr_sal, sr_id,
5297 bp_step_resume);
5298 }
5299
5300 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5301 This is used to skip a potential signal handler.
5302
5303 This is called with the interrupted function's frame. The signal
5304 handler, when it returns, will resume the interrupted function at
5305 RETURN_FRAME.pc. */
5306
5307 static void
5308 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5309 {
5310 struct symtab_and_line sr_sal;
5311 struct gdbarch *gdbarch;
5312
5313 gdb_assert (return_frame != NULL);
5314 init_sal (&sr_sal); /* initialize to zeros */
5315
5316 gdbarch = get_frame_arch (return_frame);
5317 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5318 sr_sal.section = find_pc_overlay (sr_sal.pc);
5319 sr_sal.pspace = get_frame_program_space (return_frame);
5320
5321 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5322 get_stack_frame_id (return_frame),
5323 bp_hp_step_resume);
5324 }
5325
5326 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5327 is used to skip a function after stepping into it (for "next" or if
5328 the called function has no debugging information).
5329
5330 The current function has almost always been reached by single
5331 stepping a call or return instruction. NEXT_FRAME belongs to the
5332 current function, and the breakpoint will be set at the caller's
5333 resume address.
5334
5335 This is a separate function rather than reusing
5336 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5337 get_prev_frame, which may stop prematurely (see the implementation
5338 of frame_unwind_caller_id for an example). */
5339
5340 static void
5341 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5342 {
5343 struct symtab_and_line sr_sal;
5344 struct gdbarch *gdbarch;
5345
5346 /* We shouldn't have gotten here if we don't know where the call site
5347 is. */
5348 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5349
5350 init_sal (&sr_sal); /* initialize to zeros */
5351
5352 gdbarch = frame_unwind_caller_arch (next_frame);
5353 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5354 frame_unwind_caller_pc (next_frame));
5355 sr_sal.section = find_pc_overlay (sr_sal.pc);
5356 sr_sal.pspace = frame_unwind_program_space (next_frame);
5357
5358 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5359 frame_unwind_caller_id (next_frame));
5360 }
5361
5362 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5363 new breakpoint at the target of a jmp_buf. The handling of
5364 longjmp-resume uses the same mechanisms used for handling
5365 "step-resume" breakpoints. */
5366
5367 static void
5368 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5369 {
5370 /* There should never be more than one step-resume or longjmp-resume
5371 breakpoint per thread, so we should never be setting a new
5372 longjmp_resume_breakpoint when one is already active. */
5373 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5374
5375 if (debug_infrun)
5376 fprintf_unfiltered (gdb_stdlog,
5377 "infrun: inserting longjmp-resume breakpoint at %s\n",
5378 paddress (gdbarch, pc));
5379
5380 inferior_thread ()->control.step_resume_breakpoint =
5381 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5382 }
5383
5384 /* Insert an exception resume breakpoint. TP is the thread throwing
5385 the exception. The block B is the block of the unwinder debug hook
5386 function. FRAME is the frame corresponding to the call to this
5387 function. SYM is the symbol of the function argument holding the
5388 target PC of the exception. */
5389
5390 static void
5391 insert_exception_resume_breakpoint (struct thread_info *tp,
5392 struct block *b,
5393 struct frame_info *frame,
5394 struct symbol *sym)
5395 {
5396 struct gdb_exception e;
5397
5398 /* We want to ignore errors here. */
5399 TRY_CATCH (e, RETURN_MASK_ERROR)
5400 {
5401 struct symbol *vsym;
5402 struct value *value;
5403 CORE_ADDR handler;
5404 struct breakpoint *bp;
5405
5406 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5407 value = read_var_value (vsym, frame);
5408 /* If the value was optimized out, revert to the old behavior. */
5409 if (! value_optimized_out (value))
5410 {
5411 handler = value_as_address (value);
5412
5413 if (debug_infrun)
5414 fprintf_unfiltered (gdb_stdlog,
5415 "infrun: exception resume at %lx\n",
5416 (unsigned long) handler);
5417
5418 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5419 handler, bp_exception_resume);
5420 bp->thread = tp->num;
5421 inferior_thread ()->control.exception_resume_breakpoint = bp;
5422 }
5423 }
5424 }
5425
5426 /* This is called when an exception has been intercepted. Check to
5427 see whether the exception's destination is of interest, and if so,
5428 set an exception resume breakpoint there. */
5429
5430 static void
5431 check_exception_resume (struct execution_control_state *ecs,
5432 struct frame_info *frame, struct symbol *func)
5433 {
5434 struct gdb_exception e;
5435
5436 TRY_CATCH (e, RETURN_MASK_ERROR)
5437 {
5438 struct block *b;
5439 struct dict_iterator iter;
5440 struct symbol *sym;
5441 int argno = 0;
5442
5443 /* The exception breakpoint is a thread-specific breakpoint on
5444 the unwinder's debug hook, declared as:
5445
5446 void _Unwind_DebugHook (void *cfa, void *handler);
5447
5448 The CFA argument indicates the frame to which control is
5449 about to be transferred. HANDLER is the destination PC.
5450
5451 We ignore the CFA and set a temporary breakpoint at HANDLER.
5452 This is not extremely efficient but it avoids issues in gdb
5453 with computing the DWARF CFA, and it also works even in weird
5454 cases such as throwing an exception from inside a signal
5455 handler. */
5456
5457 b = SYMBOL_BLOCK_VALUE (func);
5458 ALL_BLOCK_SYMBOLS (b, iter, sym)
5459 {
5460 if (!SYMBOL_IS_ARGUMENT (sym))
5461 continue;
5462
5463 if (argno == 0)
5464 ++argno;
5465 else
5466 {
5467 insert_exception_resume_breakpoint (ecs->event_thread,
5468 b, frame, sym);
5469 break;
5470 }
5471 }
5472 }
5473 }
5474
5475 static void
5476 stop_stepping (struct execution_control_state *ecs)
5477 {
5478 if (debug_infrun)
5479 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5480
5481 /* Let callers know we don't want to wait for the inferior anymore. */
5482 ecs->wait_some_more = 0;
5483 }
5484
5485 /* This function handles various cases where we need to continue
5486 waiting for the inferior. */
5487 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5488
5489 static void
5490 keep_going (struct execution_control_state *ecs)
5491 {
5492 /* Make sure normal_stop is called if we get a QUIT handled before
5493 reaching resume. */
5494 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5495
5496 /* Save the pc before execution, to compare with pc after stop. */
5497 ecs->event_thread->prev_pc
5498 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5499
5500 /* If we did not do break;, it means we should keep running the
5501 inferior and not return to debugger. */
5502
5503 if (ecs->event_thread->control.trap_expected
5504 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5505 {
5506 /* We took a signal (which we are supposed to pass through to
5507 the inferior, else we'd not get here) and we haven't yet
5508 gotten our trap. Simply continue. */
5509
5510 discard_cleanups (old_cleanups);
5511 resume (currently_stepping (ecs->event_thread),
5512 ecs->event_thread->suspend.stop_signal);
5513 }
5514 else
5515 {
5516 /* Either the trap was not expected, but we are continuing
5517 anyway (the user asked that this signal be passed to the
5518 child)
5519 -- or --
5520 The signal was SIGTRAP, e.g. it was our signal, but we
5521 decided we should resume from it.
5522
5523 We're going to run this baby now!
5524
5525 Note that insert_breakpoints won't try to re-insert
5526 already inserted breakpoints. Therefore, we don't
5527 care if breakpoints were already inserted, or not. */
5528
5529 if (ecs->event_thread->stepping_over_breakpoint)
5530 {
5531 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5532
5533 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5534 /* Since we can't do a displaced step, we have to remove
5535 the breakpoint while we step it. To keep things
5536 simple, we remove them all. */
5537 remove_breakpoints ();
5538 }
5539 else
5540 {
5541 struct gdb_exception e;
5542
5543 /* Stop stepping when inserting breakpoints
5544 has failed. */
5545 TRY_CATCH (e, RETURN_MASK_ERROR)
5546 {
5547 insert_breakpoints ();
5548 }
5549 if (e.reason < 0)
5550 {
5551 exception_print (gdb_stderr, e);
5552 stop_stepping (ecs);
5553 return;
5554 }
5555 }
5556
5557 ecs->event_thread->control.trap_expected
5558 = ecs->event_thread->stepping_over_breakpoint;
5559
5560 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5561 specifies that such a signal should be delivered to the
5562 target program).
5563
5564 Typically, this would occure when a user is debugging a
5565 target monitor on a simulator: the target monitor sets a
5566 breakpoint; the simulator encounters this break-point and
5567 halts the simulation handing control to GDB; GDB, noteing
5568 that the break-point isn't valid, returns control back to the
5569 simulator; the simulator then delivers the hardware
5570 equivalent of a SIGNAL_TRAP to the program being debugged. */
5571
5572 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5573 && !signal_program[ecs->event_thread->suspend.stop_signal])
5574 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5575
5576 discard_cleanups (old_cleanups);
5577 resume (currently_stepping (ecs->event_thread),
5578 ecs->event_thread->suspend.stop_signal);
5579 }
5580
5581 prepare_to_wait (ecs);
5582 }
5583
5584 /* This function normally comes after a resume, before
5585 handle_inferior_event exits. It takes care of any last bits of
5586 housekeeping, and sets the all-important wait_some_more flag. */
5587
5588 static void
5589 prepare_to_wait (struct execution_control_state *ecs)
5590 {
5591 if (debug_infrun)
5592 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5593
5594 /* This is the old end of the while loop. Let everybody know we
5595 want to wait for the inferior some more and get called again
5596 soon. */
5597 ecs->wait_some_more = 1;
5598 }
5599
5600 /* Several print_*_reason functions to print why the inferior has stopped.
5601 We always print something when the inferior exits, or receives a signal.
5602 The rest of the cases are dealt with later on in normal_stop and
5603 print_it_typical. Ideally there should be a call to one of these
5604 print_*_reason functions functions from handle_inferior_event each time
5605 stop_stepping is called. */
5606
5607 /* Print why the inferior has stopped.
5608 We are done with a step/next/si/ni command, print why the inferior has
5609 stopped. For now print nothing. Print a message only if not in the middle
5610 of doing a "step n" operation for n > 1. */
5611
5612 static void
5613 print_end_stepping_range_reason (void)
5614 {
5615 if ((!inferior_thread ()->step_multi
5616 || !inferior_thread ()->control.stop_step)
5617 && ui_out_is_mi_like_p (current_uiout))
5618 ui_out_field_string (current_uiout, "reason",
5619 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5620 }
5621
5622 /* The inferior was terminated by a signal, print why it stopped. */
5623
5624 static void
5625 print_signal_exited_reason (enum target_signal siggnal)
5626 {
5627 struct ui_out *uiout = current_uiout;
5628
5629 annotate_signalled ();
5630 if (ui_out_is_mi_like_p (uiout))
5631 ui_out_field_string
5632 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5633 ui_out_text (uiout, "\nProgram terminated with signal ");
5634 annotate_signal_name ();
5635 ui_out_field_string (uiout, "signal-name",
5636 target_signal_to_name (siggnal));
5637 annotate_signal_name_end ();
5638 ui_out_text (uiout, ", ");
5639 annotate_signal_string ();
5640 ui_out_field_string (uiout, "signal-meaning",
5641 target_signal_to_string (siggnal));
5642 annotate_signal_string_end ();
5643 ui_out_text (uiout, ".\n");
5644 ui_out_text (uiout, "The program no longer exists.\n");
5645 }
5646
5647 /* The inferior program is finished, print why it stopped. */
5648
5649 static void
5650 print_exited_reason (int exitstatus)
5651 {
5652 struct inferior *inf = current_inferior ();
5653 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5654 struct ui_out *uiout = current_uiout;
5655
5656 annotate_exited (exitstatus);
5657 if (exitstatus)
5658 {
5659 if (ui_out_is_mi_like_p (uiout))
5660 ui_out_field_string (uiout, "reason",
5661 async_reason_lookup (EXEC_ASYNC_EXITED));
5662 ui_out_text (uiout, "[Inferior ");
5663 ui_out_text (uiout, plongest (inf->num));
5664 ui_out_text (uiout, " (");
5665 ui_out_text (uiout, pidstr);
5666 ui_out_text (uiout, ") exited with code ");
5667 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5668 ui_out_text (uiout, "]\n");
5669 }
5670 else
5671 {
5672 if (ui_out_is_mi_like_p (uiout))
5673 ui_out_field_string
5674 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5675 ui_out_text (uiout, "[Inferior ");
5676 ui_out_text (uiout, plongest (inf->num));
5677 ui_out_text (uiout, " (");
5678 ui_out_text (uiout, pidstr);
5679 ui_out_text (uiout, ") exited normally]\n");
5680 }
5681 /* Support the --return-child-result option. */
5682 return_child_result_value = exitstatus;
5683 }
5684
5685 /* Signal received, print why the inferior has stopped. The signal table
5686 tells us to print about it. */
5687
5688 static void
5689 print_signal_received_reason (enum target_signal siggnal)
5690 {
5691 struct ui_out *uiout = current_uiout;
5692
5693 annotate_signal ();
5694
5695 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5696 {
5697 struct thread_info *t = inferior_thread ();
5698
5699 ui_out_text (uiout, "\n[");
5700 ui_out_field_string (uiout, "thread-name",
5701 target_pid_to_str (t->ptid));
5702 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5703 ui_out_text (uiout, " stopped");
5704 }
5705 else
5706 {
5707 ui_out_text (uiout, "\nProgram received signal ");
5708 annotate_signal_name ();
5709 if (ui_out_is_mi_like_p (uiout))
5710 ui_out_field_string
5711 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5712 ui_out_field_string (uiout, "signal-name",
5713 target_signal_to_name (siggnal));
5714 annotate_signal_name_end ();
5715 ui_out_text (uiout, ", ");
5716 annotate_signal_string ();
5717 ui_out_field_string (uiout, "signal-meaning",
5718 target_signal_to_string (siggnal));
5719 annotate_signal_string_end ();
5720 }
5721 ui_out_text (uiout, ".\n");
5722 }
5723
5724 /* Reverse execution: target ran out of history info, print why the inferior
5725 has stopped. */
5726
5727 static void
5728 print_no_history_reason (void)
5729 {
5730 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5731 }
5732
5733 /* Here to return control to GDB when the inferior stops for real.
5734 Print appropriate messages, remove breakpoints, give terminal our modes.
5735
5736 STOP_PRINT_FRAME nonzero means print the executing frame
5737 (pc, function, args, file, line number and line text).
5738 BREAKPOINTS_FAILED nonzero means stop was due to error
5739 attempting to insert breakpoints. */
5740
5741 void
5742 normal_stop (void)
5743 {
5744 struct target_waitstatus last;
5745 ptid_t last_ptid;
5746 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5747
5748 get_last_target_status (&last_ptid, &last);
5749
5750 /* If an exception is thrown from this point on, make sure to
5751 propagate GDB's knowledge of the executing state to the
5752 frontend/user running state. A QUIT is an easy exception to see
5753 here, so do this before any filtered output. */
5754 if (!non_stop)
5755 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5756 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5757 && last.kind != TARGET_WAITKIND_EXITED)
5758 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5759
5760 /* In non-stop mode, we don't want GDB to switch threads behind the
5761 user's back, to avoid races where the user is typing a command to
5762 apply to thread x, but GDB switches to thread y before the user
5763 finishes entering the command. */
5764
5765 /* As with the notification of thread events, we want to delay
5766 notifying the user that we've switched thread context until
5767 the inferior actually stops.
5768
5769 There's no point in saying anything if the inferior has exited.
5770 Note that SIGNALLED here means "exited with a signal", not
5771 "received a signal". */
5772 if (!non_stop
5773 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5774 && target_has_execution
5775 && last.kind != TARGET_WAITKIND_SIGNALLED
5776 && last.kind != TARGET_WAITKIND_EXITED)
5777 {
5778 target_terminal_ours_for_output ();
5779 printf_filtered (_("[Switching to %s]\n"),
5780 target_pid_to_str (inferior_ptid));
5781 annotate_thread_changed ();
5782 previous_inferior_ptid = inferior_ptid;
5783 }
5784
5785 if (!breakpoints_always_inserted_mode () && target_has_execution)
5786 {
5787 if (remove_breakpoints ())
5788 {
5789 target_terminal_ours_for_output ();
5790 printf_filtered (_("Cannot remove breakpoints because "
5791 "program is no longer writable.\nFurther "
5792 "execution is probably impossible.\n"));
5793 }
5794 }
5795
5796 /* If an auto-display called a function and that got a signal,
5797 delete that auto-display to avoid an infinite recursion. */
5798
5799 if (stopped_by_random_signal)
5800 disable_current_display ();
5801
5802 /* Don't print a message if in the middle of doing a "step n"
5803 operation for n > 1 */
5804 if (target_has_execution
5805 && last.kind != TARGET_WAITKIND_SIGNALLED
5806 && last.kind != TARGET_WAITKIND_EXITED
5807 && inferior_thread ()->step_multi
5808 && inferior_thread ()->control.stop_step)
5809 goto done;
5810
5811 target_terminal_ours ();
5812
5813 /* Set the current source location. This will also happen if we
5814 display the frame below, but the current SAL will be incorrect
5815 during a user hook-stop function. */
5816 if (has_stack_frames () && !stop_stack_dummy)
5817 set_current_sal_from_frame (get_current_frame (), 1);
5818
5819 /* Let the user/frontend see the threads as stopped. */
5820 do_cleanups (old_chain);
5821
5822 /* Look up the hook_stop and run it (CLI internally handles problem
5823 of stop_command's pre-hook not existing). */
5824 if (stop_command)
5825 catch_errors (hook_stop_stub, stop_command,
5826 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5827
5828 if (!has_stack_frames ())
5829 goto done;
5830
5831 if (last.kind == TARGET_WAITKIND_SIGNALLED
5832 || last.kind == TARGET_WAITKIND_EXITED)
5833 goto done;
5834
5835 /* Select innermost stack frame - i.e., current frame is frame 0,
5836 and current location is based on that.
5837 Don't do this on return from a stack dummy routine,
5838 or if the program has exited. */
5839
5840 if (!stop_stack_dummy)
5841 {
5842 select_frame (get_current_frame ());
5843
5844 /* Print current location without a level number, if
5845 we have changed functions or hit a breakpoint.
5846 Print source line if we have one.
5847 bpstat_print() contains the logic deciding in detail
5848 what to print, based on the event(s) that just occurred. */
5849
5850 /* If --batch-silent is enabled then there's no need to print the current
5851 source location, and to try risks causing an error message about
5852 missing source files. */
5853 if (stop_print_frame && !batch_silent)
5854 {
5855 int bpstat_ret;
5856 int source_flag;
5857 int do_frame_printing = 1;
5858 struct thread_info *tp = inferior_thread ();
5859
5860 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5861 switch (bpstat_ret)
5862 {
5863 case PRINT_UNKNOWN:
5864 /* If we had hit a shared library event breakpoint,
5865 bpstat_print would print out this message. If we hit
5866 an OS-level shared library event, do the same
5867 thing. */
5868 if (last.kind == TARGET_WAITKIND_LOADED)
5869 {
5870 printf_filtered (_("Stopped due to shared library event\n"));
5871 source_flag = SRC_LINE; /* something bogus */
5872 do_frame_printing = 0;
5873 break;
5874 }
5875
5876 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5877 (or should) carry around the function and does (or
5878 should) use that when doing a frame comparison. */
5879 if (tp->control.stop_step
5880 && frame_id_eq (tp->control.step_frame_id,
5881 get_frame_id (get_current_frame ()))
5882 && step_start_function == find_pc_function (stop_pc))
5883 source_flag = SRC_LINE; /* Finished step, just
5884 print source line. */
5885 else
5886 source_flag = SRC_AND_LOC; /* Print location and
5887 source line. */
5888 break;
5889 case PRINT_SRC_AND_LOC:
5890 source_flag = SRC_AND_LOC; /* Print location and
5891 source line. */
5892 break;
5893 case PRINT_SRC_ONLY:
5894 source_flag = SRC_LINE;
5895 break;
5896 case PRINT_NOTHING:
5897 source_flag = SRC_LINE; /* something bogus */
5898 do_frame_printing = 0;
5899 break;
5900 default:
5901 internal_error (__FILE__, __LINE__, _("Unknown value."));
5902 }
5903
5904 /* The behavior of this routine with respect to the source
5905 flag is:
5906 SRC_LINE: Print only source line
5907 LOCATION: Print only location
5908 SRC_AND_LOC: Print location and source line. */
5909 if (do_frame_printing)
5910 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5911
5912 /* Display the auto-display expressions. */
5913 do_displays ();
5914 }
5915 }
5916
5917 /* Save the function value return registers, if we care.
5918 We might be about to restore their previous contents. */
5919 if (inferior_thread ()->control.proceed_to_finish
5920 && execution_direction != EXEC_REVERSE)
5921 {
5922 /* This should not be necessary. */
5923 if (stop_registers)
5924 regcache_xfree (stop_registers);
5925
5926 /* NB: The copy goes through to the target picking up the value of
5927 all the registers. */
5928 stop_registers = regcache_dup (get_current_regcache ());
5929 }
5930
5931 if (stop_stack_dummy == STOP_STACK_DUMMY)
5932 {
5933 /* Pop the empty frame that contains the stack dummy.
5934 This also restores inferior state prior to the call
5935 (struct infcall_suspend_state). */
5936 struct frame_info *frame = get_current_frame ();
5937
5938 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5939 frame_pop (frame);
5940 /* frame_pop() calls reinit_frame_cache as the last thing it
5941 does which means there's currently no selected frame. We
5942 don't need to re-establish a selected frame if the dummy call
5943 returns normally, that will be done by
5944 restore_infcall_control_state. However, we do have to handle
5945 the case where the dummy call is returning after being
5946 stopped (e.g. the dummy call previously hit a breakpoint).
5947 We can't know which case we have so just always re-establish
5948 a selected frame here. */
5949 select_frame (get_current_frame ());
5950 }
5951
5952 done:
5953 annotate_stopped ();
5954
5955 /* Suppress the stop observer if we're in the middle of:
5956
5957 - a step n (n > 1), as there still more steps to be done.
5958
5959 - a "finish" command, as the observer will be called in
5960 finish_command_continuation, so it can include the inferior
5961 function's return value.
5962
5963 - calling an inferior function, as we pretend we inferior didn't
5964 run at all. The return value of the call is handled by the
5965 expression evaluator, through call_function_by_hand. */
5966
5967 if (!target_has_execution
5968 || last.kind == TARGET_WAITKIND_SIGNALLED
5969 || last.kind == TARGET_WAITKIND_EXITED
5970 || (!inferior_thread ()->step_multi
5971 && !(inferior_thread ()->control.stop_bpstat
5972 && inferior_thread ()->control.proceed_to_finish)
5973 && !inferior_thread ()->control.in_infcall))
5974 {
5975 if (!ptid_equal (inferior_ptid, null_ptid))
5976 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
5977 stop_print_frame);
5978 else
5979 observer_notify_normal_stop (NULL, stop_print_frame);
5980 }
5981
5982 if (target_has_execution)
5983 {
5984 if (last.kind != TARGET_WAITKIND_SIGNALLED
5985 && last.kind != TARGET_WAITKIND_EXITED)
5986 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5987 Delete any breakpoint that is to be deleted at the next stop. */
5988 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
5989 }
5990
5991 /* Try to get rid of automatically added inferiors that are no
5992 longer needed. Keeping those around slows down things linearly.
5993 Note that this never removes the current inferior. */
5994 prune_inferiors ();
5995 }
5996
5997 static int
5998 hook_stop_stub (void *cmd)
5999 {
6000 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6001 return (0);
6002 }
6003 \f
6004 int
6005 signal_stop_state (int signo)
6006 {
6007 return signal_stop[signo];
6008 }
6009
6010 int
6011 signal_print_state (int signo)
6012 {
6013 return signal_print[signo];
6014 }
6015
6016 int
6017 signal_pass_state (int signo)
6018 {
6019 return signal_program[signo];
6020 }
6021
6022 static void
6023 signal_cache_update (int signo)
6024 {
6025 if (signo == -1)
6026 {
6027 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6028 signal_cache_update (signo);
6029
6030 return;
6031 }
6032
6033 signal_pass[signo] = (signal_stop[signo] == 0
6034 && signal_print[signo] == 0
6035 && signal_program[signo] == 1);
6036 }
6037
6038 int
6039 signal_stop_update (int signo, int state)
6040 {
6041 int ret = signal_stop[signo];
6042
6043 signal_stop[signo] = state;
6044 signal_cache_update (signo);
6045 return ret;
6046 }
6047
6048 int
6049 signal_print_update (int signo, int state)
6050 {
6051 int ret = signal_print[signo];
6052
6053 signal_print[signo] = state;
6054 signal_cache_update (signo);
6055 return ret;
6056 }
6057
6058 int
6059 signal_pass_update (int signo, int state)
6060 {
6061 int ret = signal_program[signo];
6062
6063 signal_program[signo] = state;
6064 signal_cache_update (signo);
6065 return ret;
6066 }
6067
6068 static void
6069 sig_print_header (void)
6070 {
6071 printf_filtered (_("Signal Stop\tPrint\tPass "
6072 "to program\tDescription\n"));
6073 }
6074
6075 static void
6076 sig_print_info (enum target_signal oursig)
6077 {
6078 const char *name = target_signal_to_name (oursig);
6079 int name_padding = 13 - strlen (name);
6080
6081 if (name_padding <= 0)
6082 name_padding = 0;
6083
6084 printf_filtered ("%s", name);
6085 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6086 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6087 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6088 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6089 printf_filtered ("%s\n", target_signal_to_string (oursig));
6090 }
6091
6092 /* Specify how various signals in the inferior should be handled. */
6093
6094 static void
6095 handle_command (char *args, int from_tty)
6096 {
6097 char **argv;
6098 int digits, wordlen;
6099 int sigfirst, signum, siglast;
6100 enum target_signal oursig;
6101 int allsigs;
6102 int nsigs;
6103 unsigned char *sigs;
6104 struct cleanup *old_chain;
6105
6106 if (args == NULL)
6107 {
6108 error_no_arg (_("signal to handle"));
6109 }
6110
6111 /* Allocate and zero an array of flags for which signals to handle. */
6112
6113 nsigs = (int) TARGET_SIGNAL_LAST;
6114 sigs = (unsigned char *) alloca (nsigs);
6115 memset (sigs, 0, nsigs);
6116
6117 /* Break the command line up into args. */
6118
6119 argv = gdb_buildargv (args);
6120 old_chain = make_cleanup_freeargv (argv);
6121
6122 /* Walk through the args, looking for signal oursigs, signal names, and
6123 actions. Signal numbers and signal names may be interspersed with
6124 actions, with the actions being performed for all signals cumulatively
6125 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6126
6127 while (*argv != NULL)
6128 {
6129 wordlen = strlen (*argv);
6130 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6131 {;
6132 }
6133 allsigs = 0;
6134 sigfirst = siglast = -1;
6135
6136 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6137 {
6138 /* Apply action to all signals except those used by the
6139 debugger. Silently skip those. */
6140 allsigs = 1;
6141 sigfirst = 0;
6142 siglast = nsigs - 1;
6143 }
6144 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6145 {
6146 SET_SIGS (nsigs, sigs, signal_stop);
6147 SET_SIGS (nsigs, sigs, signal_print);
6148 }
6149 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6150 {
6151 UNSET_SIGS (nsigs, sigs, signal_program);
6152 }
6153 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6154 {
6155 SET_SIGS (nsigs, sigs, signal_print);
6156 }
6157 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6158 {
6159 SET_SIGS (nsigs, sigs, signal_program);
6160 }
6161 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6162 {
6163 UNSET_SIGS (nsigs, sigs, signal_stop);
6164 }
6165 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6166 {
6167 SET_SIGS (nsigs, sigs, signal_program);
6168 }
6169 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6170 {
6171 UNSET_SIGS (nsigs, sigs, signal_print);
6172 UNSET_SIGS (nsigs, sigs, signal_stop);
6173 }
6174 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6175 {
6176 UNSET_SIGS (nsigs, sigs, signal_program);
6177 }
6178 else if (digits > 0)
6179 {
6180 /* It is numeric. The numeric signal refers to our own
6181 internal signal numbering from target.h, not to host/target
6182 signal number. This is a feature; users really should be
6183 using symbolic names anyway, and the common ones like
6184 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6185
6186 sigfirst = siglast = (int)
6187 target_signal_from_command (atoi (*argv));
6188 if ((*argv)[digits] == '-')
6189 {
6190 siglast = (int)
6191 target_signal_from_command (atoi ((*argv) + digits + 1));
6192 }
6193 if (sigfirst > siglast)
6194 {
6195 /* Bet he didn't figure we'd think of this case... */
6196 signum = sigfirst;
6197 sigfirst = siglast;
6198 siglast = signum;
6199 }
6200 }
6201 else
6202 {
6203 oursig = target_signal_from_name (*argv);
6204 if (oursig != TARGET_SIGNAL_UNKNOWN)
6205 {
6206 sigfirst = siglast = (int) oursig;
6207 }
6208 else
6209 {
6210 /* Not a number and not a recognized flag word => complain. */
6211 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6212 }
6213 }
6214
6215 /* If any signal numbers or symbol names were found, set flags for
6216 which signals to apply actions to. */
6217
6218 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6219 {
6220 switch ((enum target_signal) signum)
6221 {
6222 case TARGET_SIGNAL_TRAP:
6223 case TARGET_SIGNAL_INT:
6224 if (!allsigs && !sigs[signum])
6225 {
6226 if (query (_("%s is used by the debugger.\n\
6227 Are you sure you want to change it? "),
6228 target_signal_to_name ((enum target_signal) signum)))
6229 {
6230 sigs[signum] = 1;
6231 }
6232 else
6233 {
6234 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6235 gdb_flush (gdb_stdout);
6236 }
6237 }
6238 break;
6239 case TARGET_SIGNAL_0:
6240 case TARGET_SIGNAL_DEFAULT:
6241 case TARGET_SIGNAL_UNKNOWN:
6242 /* Make sure that "all" doesn't print these. */
6243 break;
6244 default:
6245 sigs[signum] = 1;
6246 break;
6247 }
6248 }
6249
6250 argv++;
6251 }
6252
6253 for (signum = 0; signum < nsigs; signum++)
6254 if (sigs[signum])
6255 {
6256 signal_cache_update (-1);
6257 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6258
6259 if (from_tty)
6260 {
6261 /* Show the results. */
6262 sig_print_header ();
6263 for (; signum < nsigs; signum++)
6264 if (sigs[signum])
6265 sig_print_info (signum);
6266 }
6267
6268 break;
6269 }
6270
6271 do_cleanups (old_chain);
6272 }
6273
6274 static void
6275 xdb_handle_command (char *args, int from_tty)
6276 {
6277 char **argv;
6278 struct cleanup *old_chain;
6279
6280 if (args == NULL)
6281 error_no_arg (_("xdb command"));
6282
6283 /* Break the command line up into args. */
6284
6285 argv = gdb_buildargv (args);
6286 old_chain = make_cleanup_freeargv (argv);
6287 if (argv[1] != (char *) NULL)
6288 {
6289 char *argBuf;
6290 int bufLen;
6291
6292 bufLen = strlen (argv[0]) + 20;
6293 argBuf = (char *) xmalloc (bufLen);
6294 if (argBuf)
6295 {
6296 int validFlag = 1;
6297 enum target_signal oursig;
6298
6299 oursig = target_signal_from_name (argv[0]);
6300 memset (argBuf, 0, bufLen);
6301 if (strcmp (argv[1], "Q") == 0)
6302 sprintf (argBuf, "%s %s", argv[0], "noprint");
6303 else
6304 {
6305 if (strcmp (argv[1], "s") == 0)
6306 {
6307 if (!signal_stop[oursig])
6308 sprintf (argBuf, "%s %s", argv[0], "stop");
6309 else
6310 sprintf (argBuf, "%s %s", argv[0], "nostop");
6311 }
6312 else if (strcmp (argv[1], "i") == 0)
6313 {
6314 if (!signal_program[oursig])
6315 sprintf (argBuf, "%s %s", argv[0], "pass");
6316 else
6317 sprintf (argBuf, "%s %s", argv[0], "nopass");
6318 }
6319 else if (strcmp (argv[1], "r") == 0)
6320 {
6321 if (!signal_print[oursig])
6322 sprintf (argBuf, "%s %s", argv[0], "print");
6323 else
6324 sprintf (argBuf, "%s %s", argv[0], "noprint");
6325 }
6326 else
6327 validFlag = 0;
6328 }
6329 if (validFlag)
6330 handle_command (argBuf, from_tty);
6331 else
6332 printf_filtered (_("Invalid signal handling flag.\n"));
6333 if (argBuf)
6334 xfree (argBuf);
6335 }
6336 }
6337 do_cleanups (old_chain);
6338 }
6339
6340 /* Print current contents of the tables set by the handle command.
6341 It is possible we should just be printing signals actually used
6342 by the current target (but for things to work right when switching
6343 targets, all signals should be in the signal tables). */
6344
6345 static void
6346 signals_info (char *signum_exp, int from_tty)
6347 {
6348 enum target_signal oursig;
6349
6350 sig_print_header ();
6351
6352 if (signum_exp)
6353 {
6354 /* First see if this is a symbol name. */
6355 oursig = target_signal_from_name (signum_exp);
6356 if (oursig == TARGET_SIGNAL_UNKNOWN)
6357 {
6358 /* No, try numeric. */
6359 oursig =
6360 target_signal_from_command (parse_and_eval_long (signum_exp));
6361 }
6362 sig_print_info (oursig);
6363 return;
6364 }
6365
6366 printf_filtered ("\n");
6367 /* These ugly casts brought to you by the native VAX compiler. */
6368 for (oursig = TARGET_SIGNAL_FIRST;
6369 (int) oursig < (int) TARGET_SIGNAL_LAST;
6370 oursig = (enum target_signal) ((int) oursig + 1))
6371 {
6372 QUIT;
6373
6374 if (oursig != TARGET_SIGNAL_UNKNOWN
6375 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6376 sig_print_info (oursig);
6377 }
6378
6379 printf_filtered (_("\nUse the \"handle\" command "
6380 "to change these tables.\n"));
6381 }
6382
6383 /* The $_siginfo convenience variable is a bit special. We don't know
6384 for sure the type of the value until we actually have a chance to
6385 fetch the data. The type can change depending on gdbarch, so it is
6386 also dependent on which thread you have selected.
6387
6388 1. making $_siginfo be an internalvar that creates a new value on
6389 access.
6390
6391 2. making the value of $_siginfo be an lval_computed value. */
6392
6393 /* This function implements the lval_computed support for reading a
6394 $_siginfo value. */
6395
6396 static void
6397 siginfo_value_read (struct value *v)
6398 {
6399 LONGEST transferred;
6400
6401 transferred =
6402 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6403 NULL,
6404 value_contents_all_raw (v),
6405 value_offset (v),
6406 TYPE_LENGTH (value_type (v)));
6407
6408 if (transferred != TYPE_LENGTH (value_type (v)))
6409 error (_("Unable to read siginfo"));
6410 }
6411
6412 /* This function implements the lval_computed support for writing a
6413 $_siginfo value. */
6414
6415 static void
6416 siginfo_value_write (struct value *v, struct value *fromval)
6417 {
6418 LONGEST transferred;
6419
6420 transferred = target_write (&current_target,
6421 TARGET_OBJECT_SIGNAL_INFO,
6422 NULL,
6423 value_contents_all_raw (fromval),
6424 value_offset (v),
6425 TYPE_LENGTH (value_type (fromval)));
6426
6427 if (transferred != TYPE_LENGTH (value_type (fromval)))
6428 error (_("Unable to write siginfo"));
6429 }
6430
6431 static const struct lval_funcs siginfo_value_funcs =
6432 {
6433 siginfo_value_read,
6434 siginfo_value_write
6435 };
6436
6437 /* Return a new value with the correct type for the siginfo object of
6438 the current thread using architecture GDBARCH. Return a void value
6439 if there's no object available. */
6440
6441 static struct value *
6442 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6443 {
6444 if (target_has_stack
6445 && !ptid_equal (inferior_ptid, null_ptid)
6446 && gdbarch_get_siginfo_type_p (gdbarch))
6447 {
6448 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6449
6450 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6451 }
6452
6453 return allocate_value (builtin_type (gdbarch)->builtin_void);
6454 }
6455
6456 \f
6457 /* infcall_suspend_state contains state about the program itself like its
6458 registers and any signal it received when it last stopped.
6459 This state must be restored regardless of how the inferior function call
6460 ends (either successfully, or after it hits a breakpoint or signal)
6461 if the program is to properly continue where it left off. */
6462
6463 struct infcall_suspend_state
6464 {
6465 struct thread_suspend_state thread_suspend;
6466 struct inferior_suspend_state inferior_suspend;
6467
6468 /* Other fields: */
6469 CORE_ADDR stop_pc;
6470 struct regcache *registers;
6471
6472 /* Format of SIGINFO_DATA or NULL if it is not present. */
6473 struct gdbarch *siginfo_gdbarch;
6474
6475 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6476 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6477 content would be invalid. */
6478 gdb_byte *siginfo_data;
6479 };
6480
6481 struct infcall_suspend_state *
6482 save_infcall_suspend_state (void)
6483 {
6484 struct infcall_suspend_state *inf_state;
6485 struct thread_info *tp = inferior_thread ();
6486 struct inferior *inf = current_inferior ();
6487 struct regcache *regcache = get_current_regcache ();
6488 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6489 gdb_byte *siginfo_data = NULL;
6490
6491 if (gdbarch_get_siginfo_type_p (gdbarch))
6492 {
6493 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6494 size_t len = TYPE_LENGTH (type);
6495 struct cleanup *back_to;
6496
6497 siginfo_data = xmalloc (len);
6498 back_to = make_cleanup (xfree, siginfo_data);
6499
6500 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6501 siginfo_data, 0, len) == len)
6502 discard_cleanups (back_to);
6503 else
6504 {
6505 /* Errors ignored. */
6506 do_cleanups (back_to);
6507 siginfo_data = NULL;
6508 }
6509 }
6510
6511 inf_state = XZALLOC (struct infcall_suspend_state);
6512
6513 if (siginfo_data)
6514 {
6515 inf_state->siginfo_gdbarch = gdbarch;
6516 inf_state->siginfo_data = siginfo_data;
6517 }
6518
6519 inf_state->thread_suspend = tp->suspend;
6520 inf_state->inferior_suspend = inf->suspend;
6521
6522 /* run_inferior_call will not use the signal due to its `proceed' call with
6523 TARGET_SIGNAL_0 anyway. */
6524 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6525
6526 inf_state->stop_pc = stop_pc;
6527
6528 inf_state->registers = regcache_dup (regcache);
6529
6530 return inf_state;
6531 }
6532
6533 /* Restore inferior session state to INF_STATE. */
6534
6535 void
6536 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6537 {
6538 struct thread_info *tp = inferior_thread ();
6539 struct inferior *inf = current_inferior ();
6540 struct regcache *regcache = get_current_regcache ();
6541 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6542
6543 tp->suspend = inf_state->thread_suspend;
6544 inf->suspend = inf_state->inferior_suspend;
6545
6546 stop_pc = inf_state->stop_pc;
6547
6548 if (inf_state->siginfo_gdbarch == gdbarch)
6549 {
6550 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6551 size_t len = TYPE_LENGTH (type);
6552
6553 /* Errors ignored. */
6554 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6555 inf_state->siginfo_data, 0, len);
6556 }
6557
6558 /* The inferior can be gone if the user types "print exit(0)"
6559 (and perhaps other times). */
6560 if (target_has_execution)
6561 /* NB: The register write goes through to the target. */
6562 regcache_cpy (regcache, inf_state->registers);
6563
6564 discard_infcall_suspend_state (inf_state);
6565 }
6566
6567 static void
6568 do_restore_infcall_suspend_state_cleanup (void *state)
6569 {
6570 restore_infcall_suspend_state (state);
6571 }
6572
6573 struct cleanup *
6574 make_cleanup_restore_infcall_suspend_state
6575 (struct infcall_suspend_state *inf_state)
6576 {
6577 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6578 }
6579
6580 void
6581 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6582 {
6583 regcache_xfree (inf_state->registers);
6584 xfree (inf_state->siginfo_data);
6585 xfree (inf_state);
6586 }
6587
6588 struct regcache *
6589 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6590 {
6591 return inf_state->registers;
6592 }
6593
6594 /* infcall_control_state contains state regarding gdb's control of the
6595 inferior itself like stepping control. It also contains session state like
6596 the user's currently selected frame. */
6597
6598 struct infcall_control_state
6599 {
6600 struct thread_control_state thread_control;
6601 struct inferior_control_state inferior_control;
6602
6603 /* Other fields: */
6604 enum stop_stack_kind stop_stack_dummy;
6605 int stopped_by_random_signal;
6606 int stop_after_trap;
6607
6608 /* ID if the selected frame when the inferior function call was made. */
6609 struct frame_id selected_frame_id;
6610 };
6611
6612 /* Save all of the information associated with the inferior<==>gdb
6613 connection. */
6614
6615 struct infcall_control_state *
6616 save_infcall_control_state (void)
6617 {
6618 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6619 struct thread_info *tp = inferior_thread ();
6620 struct inferior *inf = current_inferior ();
6621
6622 inf_status->thread_control = tp->control;
6623 inf_status->inferior_control = inf->control;
6624
6625 tp->control.step_resume_breakpoint = NULL;
6626 tp->control.exception_resume_breakpoint = NULL;
6627
6628 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6629 chain. If caller's caller is walking the chain, they'll be happier if we
6630 hand them back the original chain when restore_infcall_control_state is
6631 called. */
6632 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6633
6634 /* Other fields: */
6635 inf_status->stop_stack_dummy = stop_stack_dummy;
6636 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6637 inf_status->stop_after_trap = stop_after_trap;
6638
6639 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6640
6641 return inf_status;
6642 }
6643
6644 static int
6645 restore_selected_frame (void *args)
6646 {
6647 struct frame_id *fid = (struct frame_id *) args;
6648 struct frame_info *frame;
6649
6650 frame = frame_find_by_id (*fid);
6651
6652 /* If inf_status->selected_frame_id is NULL, there was no previously
6653 selected frame. */
6654 if (frame == NULL)
6655 {
6656 warning (_("Unable to restore previously selected frame."));
6657 return 0;
6658 }
6659
6660 select_frame (frame);
6661
6662 return (1);
6663 }
6664
6665 /* Restore inferior session state to INF_STATUS. */
6666
6667 void
6668 restore_infcall_control_state (struct infcall_control_state *inf_status)
6669 {
6670 struct thread_info *tp = inferior_thread ();
6671 struct inferior *inf = current_inferior ();
6672
6673 if (tp->control.step_resume_breakpoint)
6674 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6675
6676 if (tp->control.exception_resume_breakpoint)
6677 tp->control.exception_resume_breakpoint->disposition
6678 = disp_del_at_next_stop;
6679
6680 /* Handle the bpstat_copy of the chain. */
6681 bpstat_clear (&tp->control.stop_bpstat);
6682
6683 tp->control = inf_status->thread_control;
6684 inf->control = inf_status->inferior_control;
6685
6686 /* Other fields: */
6687 stop_stack_dummy = inf_status->stop_stack_dummy;
6688 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6689 stop_after_trap = inf_status->stop_after_trap;
6690
6691 if (target_has_stack)
6692 {
6693 /* The point of catch_errors is that if the stack is clobbered,
6694 walking the stack might encounter a garbage pointer and
6695 error() trying to dereference it. */
6696 if (catch_errors
6697 (restore_selected_frame, &inf_status->selected_frame_id,
6698 "Unable to restore previously selected frame:\n",
6699 RETURN_MASK_ERROR) == 0)
6700 /* Error in restoring the selected frame. Select the innermost
6701 frame. */
6702 select_frame (get_current_frame ());
6703 }
6704
6705 xfree (inf_status);
6706 }
6707
6708 static void
6709 do_restore_infcall_control_state_cleanup (void *sts)
6710 {
6711 restore_infcall_control_state (sts);
6712 }
6713
6714 struct cleanup *
6715 make_cleanup_restore_infcall_control_state
6716 (struct infcall_control_state *inf_status)
6717 {
6718 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6719 }
6720
6721 void
6722 discard_infcall_control_state (struct infcall_control_state *inf_status)
6723 {
6724 if (inf_status->thread_control.step_resume_breakpoint)
6725 inf_status->thread_control.step_resume_breakpoint->disposition
6726 = disp_del_at_next_stop;
6727
6728 if (inf_status->thread_control.exception_resume_breakpoint)
6729 inf_status->thread_control.exception_resume_breakpoint->disposition
6730 = disp_del_at_next_stop;
6731
6732 /* See save_infcall_control_state for info on stop_bpstat. */
6733 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6734
6735 xfree (inf_status);
6736 }
6737 \f
6738 int
6739 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6740 {
6741 struct target_waitstatus last;
6742 ptid_t last_ptid;
6743
6744 get_last_target_status (&last_ptid, &last);
6745
6746 if (last.kind != TARGET_WAITKIND_FORKED)
6747 return 0;
6748
6749 if (!ptid_equal (last_ptid, pid))
6750 return 0;
6751
6752 *child_pid = last.value.related_pid;
6753 return 1;
6754 }
6755
6756 int
6757 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6758 {
6759 struct target_waitstatus last;
6760 ptid_t last_ptid;
6761
6762 get_last_target_status (&last_ptid, &last);
6763
6764 if (last.kind != TARGET_WAITKIND_VFORKED)
6765 return 0;
6766
6767 if (!ptid_equal (last_ptid, pid))
6768 return 0;
6769
6770 *child_pid = last.value.related_pid;
6771 return 1;
6772 }
6773
6774 int
6775 inferior_has_execd (ptid_t pid, char **execd_pathname)
6776 {
6777 struct target_waitstatus last;
6778 ptid_t last_ptid;
6779
6780 get_last_target_status (&last_ptid, &last);
6781
6782 if (last.kind != TARGET_WAITKIND_EXECD)
6783 return 0;
6784
6785 if (!ptid_equal (last_ptid, pid))
6786 return 0;
6787
6788 *execd_pathname = xstrdup (last.value.execd_pathname);
6789 return 1;
6790 }
6791
6792 int
6793 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6794 {
6795 struct target_waitstatus last;
6796 ptid_t last_ptid;
6797
6798 get_last_target_status (&last_ptid, &last);
6799
6800 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6801 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6802 return 0;
6803
6804 if (!ptid_equal (last_ptid, pid))
6805 return 0;
6806
6807 *syscall_number = last.value.syscall_number;
6808 return 1;
6809 }
6810
6811 int
6812 ptid_match (ptid_t ptid, ptid_t filter)
6813 {
6814 if (ptid_equal (filter, minus_one_ptid))
6815 return 1;
6816 if (ptid_is_pid (filter)
6817 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6818 return 1;
6819 else if (ptid_equal (ptid, filter))
6820 return 1;
6821
6822 return 0;
6823 }
6824
6825 /* restore_inferior_ptid() will be used by the cleanup machinery
6826 to restore the inferior_ptid value saved in a call to
6827 save_inferior_ptid(). */
6828
6829 static void
6830 restore_inferior_ptid (void *arg)
6831 {
6832 ptid_t *saved_ptid_ptr = arg;
6833
6834 inferior_ptid = *saved_ptid_ptr;
6835 xfree (arg);
6836 }
6837
6838 /* Save the value of inferior_ptid so that it may be restored by a
6839 later call to do_cleanups(). Returns the struct cleanup pointer
6840 needed for later doing the cleanup. */
6841
6842 struct cleanup *
6843 save_inferior_ptid (void)
6844 {
6845 ptid_t *saved_ptid_ptr;
6846
6847 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6848 *saved_ptid_ptr = inferior_ptid;
6849 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6850 }
6851 \f
6852
6853 /* User interface for reverse debugging:
6854 Set exec-direction / show exec-direction commands
6855 (returns error unless target implements to_set_exec_direction method). */
6856
6857 int execution_direction = EXEC_FORWARD;
6858 static const char exec_forward[] = "forward";
6859 static const char exec_reverse[] = "reverse";
6860 static const char *exec_direction = exec_forward;
6861 static const char *exec_direction_names[] = {
6862 exec_forward,
6863 exec_reverse,
6864 NULL
6865 };
6866
6867 static void
6868 set_exec_direction_func (char *args, int from_tty,
6869 struct cmd_list_element *cmd)
6870 {
6871 if (target_can_execute_reverse)
6872 {
6873 if (!strcmp (exec_direction, exec_forward))
6874 execution_direction = EXEC_FORWARD;
6875 else if (!strcmp (exec_direction, exec_reverse))
6876 execution_direction = EXEC_REVERSE;
6877 }
6878 else
6879 {
6880 exec_direction = exec_forward;
6881 error (_("Target does not support this operation."));
6882 }
6883 }
6884
6885 static void
6886 show_exec_direction_func (struct ui_file *out, int from_tty,
6887 struct cmd_list_element *cmd, const char *value)
6888 {
6889 switch (execution_direction) {
6890 case EXEC_FORWARD:
6891 fprintf_filtered (out, _("Forward.\n"));
6892 break;
6893 case EXEC_REVERSE:
6894 fprintf_filtered (out, _("Reverse.\n"));
6895 break;
6896 default:
6897 internal_error (__FILE__, __LINE__,
6898 _("bogus execution_direction value: %d"),
6899 (int) execution_direction);
6900 }
6901 }
6902
6903 /* User interface for non-stop mode. */
6904
6905 int non_stop = 0;
6906
6907 static void
6908 set_non_stop (char *args, int from_tty,
6909 struct cmd_list_element *c)
6910 {
6911 if (target_has_execution)
6912 {
6913 non_stop_1 = non_stop;
6914 error (_("Cannot change this setting while the inferior is running."));
6915 }
6916
6917 non_stop = non_stop_1;
6918 }
6919
6920 static void
6921 show_non_stop (struct ui_file *file, int from_tty,
6922 struct cmd_list_element *c, const char *value)
6923 {
6924 fprintf_filtered (file,
6925 _("Controlling the inferior in non-stop mode is %s.\n"),
6926 value);
6927 }
6928
6929 static void
6930 show_schedule_multiple (struct ui_file *file, int from_tty,
6931 struct cmd_list_element *c, const char *value)
6932 {
6933 fprintf_filtered (file, _("Resuming the execution of threads "
6934 "of all processes is %s.\n"), value);
6935 }
6936
6937 void
6938 _initialize_infrun (void)
6939 {
6940 int i;
6941 int numsigs;
6942
6943 add_info ("signals", signals_info, _("\
6944 What debugger does when program gets various signals.\n\
6945 Specify a signal as argument to print info on that signal only."));
6946 add_info_alias ("handle", "signals", 0);
6947
6948 add_com ("handle", class_run, handle_command, _("\
6949 Specify how to handle a signal.\n\
6950 Args are signals and actions to apply to those signals.\n\
6951 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6952 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6953 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6954 The special arg \"all\" is recognized to mean all signals except those\n\
6955 used by the debugger, typically SIGTRAP and SIGINT.\n\
6956 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6957 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6958 Stop means reenter debugger if this signal happens (implies print).\n\
6959 Print means print a message if this signal happens.\n\
6960 Pass means let program see this signal; otherwise program doesn't know.\n\
6961 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6962 Pass and Stop may be combined."));
6963 if (xdb_commands)
6964 {
6965 add_com ("lz", class_info, signals_info, _("\
6966 What debugger does when program gets various signals.\n\
6967 Specify a signal as argument to print info on that signal only."));
6968 add_com ("z", class_run, xdb_handle_command, _("\
6969 Specify how to handle a signal.\n\
6970 Args are signals and actions to apply to those signals.\n\
6971 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6972 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6973 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6974 The special arg \"all\" is recognized to mean all signals except those\n\
6975 used by the debugger, typically SIGTRAP and SIGINT.\n\
6976 Recognized actions include \"s\" (toggles between stop and nostop),\n\
6977 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6978 nopass), \"Q\" (noprint)\n\
6979 Stop means reenter debugger if this signal happens (implies print).\n\
6980 Print means print a message if this signal happens.\n\
6981 Pass means let program see this signal; otherwise program doesn't know.\n\
6982 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6983 Pass and Stop may be combined."));
6984 }
6985
6986 if (!dbx_commands)
6987 stop_command = add_cmd ("stop", class_obscure,
6988 not_just_help_class_command, _("\
6989 There is no `stop' command, but you can set a hook on `stop'.\n\
6990 This allows you to set a list of commands to be run each time execution\n\
6991 of the program stops."), &cmdlist);
6992
6993 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6994 Set inferior debugging."), _("\
6995 Show inferior debugging."), _("\
6996 When non-zero, inferior specific debugging is enabled."),
6997 NULL,
6998 show_debug_infrun,
6999 &setdebuglist, &showdebuglist);
7000
7001 add_setshow_boolean_cmd ("displaced", class_maintenance,
7002 &debug_displaced, _("\
7003 Set displaced stepping debugging."), _("\
7004 Show displaced stepping debugging."), _("\
7005 When non-zero, displaced stepping specific debugging is enabled."),
7006 NULL,
7007 show_debug_displaced,
7008 &setdebuglist, &showdebuglist);
7009
7010 add_setshow_boolean_cmd ("non-stop", no_class,
7011 &non_stop_1, _("\
7012 Set whether gdb controls the inferior in non-stop mode."), _("\
7013 Show whether gdb controls the inferior in non-stop mode."), _("\
7014 When debugging a multi-threaded program and this setting is\n\
7015 off (the default, also called all-stop mode), when one thread stops\n\
7016 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7017 all other threads in the program while you interact with the thread of\n\
7018 interest. When you continue or step a thread, you can allow the other\n\
7019 threads to run, or have them remain stopped, but while you inspect any\n\
7020 thread's state, all threads stop.\n\
7021 \n\
7022 In non-stop mode, when one thread stops, other threads can continue\n\
7023 to run freely. You'll be able to step each thread independently,\n\
7024 leave it stopped or free to run as needed."),
7025 set_non_stop,
7026 show_non_stop,
7027 &setlist,
7028 &showlist);
7029
7030 numsigs = (int) TARGET_SIGNAL_LAST;
7031 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7032 signal_print = (unsigned char *)
7033 xmalloc (sizeof (signal_print[0]) * numsigs);
7034 signal_program = (unsigned char *)
7035 xmalloc (sizeof (signal_program[0]) * numsigs);
7036 signal_pass = (unsigned char *)
7037 xmalloc (sizeof (signal_program[0]) * numsigs);
7038 for (i = 0; i < numsigs; i++)
7039 {
7040 signal_stop[i] = 1;
7041 signal_print[i] = 1;
7042 signal_program[i] = 1;
7043 }
7044
7045 /* Signals caused by debugger's own actions
7046 should not be given to the program afterwards. */
7047 signal_program[TARGET_SIGNAL_TRAP] = 0;
7048 signal_program[TARGET_SIGNAL_INT] = 0;
7049
7050 /* Signals that are not errors should not normally enter the debugger. */
7051 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7052 signal_print[TARGET_SIGNAL_ALRM] = 0;
7053 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7054 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7055 signal_stop[TARGET_SIGNAL_PROF] = 0;
7056 signal_print[TARGET_SIGNAL_PROF] = 0;
7057 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7058 signal_print[TARGET_SIGNAL_CHLD] = 0;
7059 signal_stop[TARGET_SIGNAL_IO] = 0;
7060 signal_print[TARGET_SIGNAL_IO] = 0;
7061 signal_stop[TARGET_SIGNAL_POLL] = 0;
7062 signal_print[TARGET_SIGNAL_POLL] = 0;
7063 signal_stop[TARGET_SIGNAL_URG] = 0;
7064 signal_print[TARGET_SIGNAL_URG] = 0;
7065 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7066 signal_print[TARGET_SIGNAL_WINCH] = 0;
7067 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7068 signal_print[TARGET_SIGNAL_PRIO] = 0;
7069
7070 /* These signals are used internally by user-level thread
7071 implementations. (See signal(5) on Solaris.) Like the above
7072 signals, a healthy program receives and handles them as part of
7073 its normal operation. */
7074 signal_stop[TARGET_SIGNAL_LWP] = 0;
7075 signal_print[TARGET_SIGNAL_LWP] = 0;
7076 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7077 signal_print[TARGET_SIGNAL_WAITING] = 0;
7078 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7079 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7080
7081 /* Update cached state. */
7082 signal_cache_update (-1);
7083
7084 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7085 &stop_on_solib_events, _("\
7086 Set stopping for shared library events."), _("\
7087 Show stopping for shared library events."), _("\
7088 If nonzero, gdb will give control to the user when the dynamic linker\n\
7089 notifies gdb of shared library events. The most common event of interest\n\
7090 to the user would be loading/unloading of a new library."),
7091 NULL,
7092 show_stop_on_solib_events,
7093 &setlist, &showlist);
7094
7095 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7096 follow_fork_mode_kind_names,
7097 &follow_fork_mode_string, _("\
7098 Set debugger response to a program call of fork or vfork."), _("\
7099 Show debugger response to a program call of fork or vfork."), _("\
7100 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7101 parent - the original process is debugged after a fork\n\
7102 child - the new process is debugged after a fork\n\
7103 The unfollowed process will continue to run.\n\
7104 By default, the debugger will follow the parent process."),
7105 NULL,
7106 show_follow_fork_mode_string,
7107 &setlist, &showlist);
7108
7109 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7110 follow_exec_mode_names,
7111 &follow_exec_mode_string, _("\
7112 Set debugger response to a program call of exec."), _("\
7113 Show debugger response to a program call of exec."), _("\
7114 An exec call replaces the program image of a process.\n\
7115 \n\
7116 follow-exec-mode can be:\n\
7117 \n\
7118 new - the debugger creates a new inferior and rebinds the process\n\
7119 to this new inferior. The program the process was running before\n\
7120 the exec call can be restarted afterwards by restarting the original\n\
7121 inferior.\n\
7122 \n\
7123 same - the debugger keeps the process bound to the same inferior.\n\
7124 The new executable image replaces the previous executable loaded in\n\
7125 the inferior. Restarting the inferior after the exec call restarts\n\
7126 the executable the process was running after the exec call.\n\
7127 \n\
7128 By default, the debugger will use the same inferior."),
7129 NULL,
7130 show_follow_exec_mode_string,
7131 &setlist, &showlist);
7132
7133 add_setshow_enum_cmd ("scheduler-locking", class_run,
7134 scheduler_enums, &scheduler_mode, _("\
7135 Set mode for locking scheduler during execution."), _("\
7136 Show mode for locking scheduler during execution."), _("\
7137 off == no locking (threads may preempt at any time)\n\
7138 on == full locking (no thread except the current thread may run)\n\
7139 step == scheduler locked during every single-step operation.\n\
7140 In this mode, no other thread may run during a step command.\n\
7141 Other threads may run while stepping over a function call ('next')."),
7142 set_schedlock_func, /* traps on target vector */
7143 show_scheduler_mode,
7144 &setlist, &showlist);
7145
7146 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7147 Set mode for resuming threads of all processes."), _("\
7148 Show mode for resuming threads of all processes."), _("\
7149 When on, execution commands (such as 'continue' or 'next') resume all\n\
7150 threads of all processes. When off (which is the default), execution\n\
7151 commands only resume the threads of the current process. The set of\n\
7152 threads that are resumed is further refined by the scheduler-locking\n\
7153 mode (see help set scheduler-locking)."),
7154 NULL,
7155 show_schedule_multiple,
7156 &setlist, &showlist);
7157
7158 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7159 Set mode of the step operation."), _("\
7160 Show mode of the step operation."), _("\
7161 When set, doing a step over a function without debug line information\n\
7162 will stop at the first instruction of that function. Otherwise, the\n\
7163 function is skipped and the step command stops at a different source line."),
7164 NULL,
7165 show_step_stop_if_no_debug,
7166 &setlist, &showlist);
7167
7168 add_setshow_enum_cmd ("displaced-stepping", class_run,
7169 can_use_displaced_stepping_enum,
7170 &can_use_displaced_stepping, _("\
7171 Set debugger's willingness to use displaced stepping."), _("\
7172 Show debugger's willingness to use displaced stepping."), _("\
7173 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7174 supported by the target architecture. If off, gdb will not use displaced\n\
7175 stepping to step over breakpoints, even if such is supported by the target\n\
7176 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7177 if the target architecture supports it and non-stop mode is active, but will not\n\
7178 use it in all-stop mode (see help set non-stop)."),
7179 NULL,
7180 show_can_use_displaced_stepping,
7181 &setlist, &showlist);
7182
7183 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7184 &exec_direction, _("Set direction of execution.\n\
7185 Options are 'forward' or 'reverse'."),
7186 _("Show direction of execution (forward/reverse)."),
7187 _("Tells gdb whether to execute forward or backward."),
7188 set_exec_direction_func, show_exec_direction_func,
7189 &setlist, &showlist);
7190
7191 /* Set/show detach-on-fork: user-settable mode. */
7192
7193 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7194 Set whether gdb will detach the child of a fork."), _("\
7195 Show whether gdb will detach the child of a fork."), _("\
7196 Tells gdb whether to detach the child of a fork."),
7197 NULL, NULL, &setlist, &showlist);
7198
7199 /* ptid initializations */
7200 inferior_ptid = null_ptid;
7201 target_last_wait_ptid = minus_one_ptid;
7202
7203 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7204 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7205 observer_attach_thread_exit (infrun_thread_thread_exit);
7206 observer_attach_inferior_exit (infrun_inferior_exit);
7207
7208 /* Explicitly create without lookup, since that tries to create a
7209 value with a void typed value, and when we get here, gdbarch
7210 isn't initialized yet. At this point, we're quite sure there
7211 isn't another convenience variable of the same name. */
7212 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7213
7214 add_setshow_boolean_cmd ("observer", no_class,
7215 &observer_mode_1, _("\
7216 Set whether gdb controls the inferior in observer mode."), _("\
7217 Show whether gdb controls the inferior in observer mode."), _("\
7218 In observer mode, GDB can get data from the inferior, but not\n\
7219 affect its execution. Registers and memory may not be changed,\n\
7220 breakpoints may not be set, and the program cannot be interrupted\n\
7221 or signalled."),
7222 set_observer_mode,
7223 show_observer_mode,
7224 &setlist,
7225 &showlist);
7226 }
This page took 0.218103 seconds and 4 git commands to generate.