2011-05-26 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57
58 /* Prototypes for local functions */
59
60 static void signals_info (char *, int);
61
62 static void handle_command (char *, int);
63
64 static void sig_print_info (enum target_signal);
65
66 static void sig_print_header (void);
67
68 static void resume_cleanups (void *);
69
70 static int hook_stop_stub (void *);
71
72 static int restore_selected_frame (void *);
73
74 static int follow_fork (void);
75
76 static void set_schedlock_func (char *args, int from_tty,
77 struct cmd_list_element *c);
78
79 static int currently_stepping (struct thread_info *tp);
80
81 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
82 void *data);
83
84 static void xdb_handle_command (char *args, int from_tty);
85
86 static int prepare_to_proceed (int);
87
88 static void print_exited_reason (int exitstatus);
89
90 static void print_signal_exited_reason (enum target_signal siggnal);
91
92 static void print_no_history_reason (void);
93
94 static void print_signal_received_reason (enum target_signal siggnal);
95
96 static void print_end_stepping_range_reason (void);
97
98 void _initialize_infrun (void);
99
100 void nullify_last_target_wait_ptid (void);
101
102 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
103
104 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
105
106 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
107
108 /* When set, stop the 'step' command if we enter a function which has
109 no line number information. The normal behavior is that we step
110 over such function. */
111 int step_stop_if_no_debug = 0;
112 static void
113 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
114 struct cmd_list_element *c, const char *value)
115 {
116 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
117 }
118
119 /* In asynchronous mode, but simulating synchronous execution. */
120
121 int sync_execution = 0;
122
123 /* wait_for_inferior and normal_stop use this to notify the user
124 when the inferior stopped in a different thread than it had been
125 running in. */
126
127 static ptid_t previous_inferior_ptid;
128
129 /* Default behavior is to detach newly forked processes (legacy). */
130 int detach_fork = 1;
131
132 int debug_displaced = 0;
133 static void
134 show_debug_displaced (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
138 }
139
140 int debug_infrun = 0;
141 static void
142 show_debug_infrun (struct ui_file *file, int from_tty,
143 struct cmd_list_element *c, const char *value)
144 {
145 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
146 }
147
148 /* If the program uses ELF-style shared libraries, then calls to
149 functions in shared libraries go through stubs, which live in a
150 table called the PLT (Procedure Linkage Table). The first time the
151 function is called, the stub sends control to the dynamic linker,
152 which looks up the function's real address, patches the stub so
153 that future calls will go directly to the function, and then passes
154 control to the function.
155
156 If we are stepping at the source level, we don't want to see any of
157 this --- we just want to skip over the stub and the dynamic linker.
158 The simple approach is to single-step until control leaves the
159 dynamic linker.
160
161 However, on some systems (e.g., Red Hat's 5.2 distribution) the
162 dynamic linker calls functions in the shared C library, so you
163 can't tell from the PC alone whether the dynamic linker is still
164 running. In this case, we use a step-resume breakpoint to get us
165 past the dynamic linker, as if we were using "next" to step over a
166 function call.
167
168 in_solib_dynsym_resolve_code() says whether we're in the dynamic
169 linker code or not. Normally, this means we single-step. However,
170 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
171 address where we can place a step-resume breakpoint to get past the
172 linker's symbol resolution function.
173
174 in_solib_dynsym_resolve_code() can generally be implemented in a
175 pretty portable way, by comparing the PC against the address ranges
176 of the dynamic linker's sections.
177
178 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
179 it depends on internal details of the dynamic linker. It's usually
180 not too hard to figure out where to put a breakpoint, but it
181 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
182 sanity checking. If it can't figure things out, returning zero and
183 getting the (possibly confusing) stepping behavior is better than
184 signalling an error, which will obscure the change in the
185 inferior's state. */
186
187 /* This function returns TRUE if pc is the address of an instruction
188 that lies within the dynamic linker (such as the event hook, or the
189 dld itself).
190
191 This function must be used only when a dynamic linker event has
192 been caught, and the inferior is being stepped out of the hook, or
193 undefined results are guaranteed. */
194
195 #ifndef SOLIB_IN_DYNAMIC_LINKER
196 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
197 #endif
198
199 /* "Observer mode" is somewhat like a more extreme version of
200 non-stop, in which all GDB operations that might affect the
201 target's execution have been disabled. */
202
203 static int non_stop_1 = 0;
204
205 int observer_mode = 0;
206 static int observer_mode_1 = 0;
207
208 static void
209 set_observer_mode (char *args, int from_tty,
210 struct cmd_list_element *c)
211 {
212 extern int pagination_enabled;
213
214 if (target_has_execution)
215 {
216 observer_mode_1 = observer_mode;
217 error (_("Cannot change this setting while the inferior is running."));
218 }
219
220 observer_mode = observer_mode_1;
221
222 may_write_registers = !observer_mode;
223 may_write_memory = !observer_mode;
224 may_insert_breakpoints = !observer_mode;
225 may_insert_tracepoints = !observer_mode;
226 /* We can insert fast tracepoints in or out of observer mode,
227 but enable them if we're going into this mode. */
228 if (observer_mode)
229 may_insert_fast_tracepoints = 1;
230 may_stop = !observer_mode;
231 update_target_permissions ();
232
233 /* Going *into* observer mode we must force non-stop, then
234 going out we leave it that way. */
235 if (observer_mode)
236 {
237 target_async_permitted = 1;
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that the target may silently handle.
286 This is automatically determined from the flags above,
287 and simply cached here. */
288 static unsigned char *signal_pass;
289
290 #define SET_SIGS(nsigs,sigs,flags) \
291 do { \
292 int signum = (nsigs); \
293 while (signum-- > 0) \
294 if ((sigs)[signum]) \
295 (flags)[signum] = 1; \
296 } while (0)
297
298 #define UNSET_SIGS(nsigs,sigs,flags) \
299 do { \
300 int signum = (nsigs); \
301 while (signum-- > 0) \
302 if ((sigs)[signum]) \
303 (flags)[signum] = 0; \
304 } while (0)
305
306 /* Value to pass to target_resume() to cause all threads to resume. */
307
308 #define RESUME_ALL minus_one_ptid
309
310 /* Command list pointer for the "stop" placeholder. */
311
312 static struct cmd_list_element *stop_command;
313
314 /* Function inferior was in as of last step command. */
315
316 static struct symbol *step_start_function;
317
318 /* Nonzero if we want to give control to the user when we're notified
319 of shared library events by the dynamic linker. */
320 int stop_on_solib_events;
321 static void
322 show_stop_on_solib_events (struct ui_file *file, int from_tty,
323 struct cmd_list_element *c, const char *value)
324 {
325 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
326 value);
327 }
328
329 /* Nonzero means expecting a trace trap
330 and should stop the inferior and return silently when it happens. */
331
332 int stop_after_trap;
333
334 /* Save register contents here when executing a "finish" command or are
335 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
336 Thus this contains the return value from the called function (assuming
337 values are returned in a register). */
338
339 struct regcache *stop_registers;
340
341 /* Nonzero after stop if current stack frame should be printed. */
342
343 static int stop_print_frame;
344
345 /* This is a cached copy of the pid/waitstatus of the last event
346 returned by target_wait()/deprecated_target_wait_hook(). This
347 information is returned by get_last_target_status(). */
348 static ptid_t target_last_wait_ptid;
349 static struct target_waitstatus target_last_waitstatus;
350
351 static void context_switch (ptid_t ptid);
352
353 void init_thread_stepping_state (struct thread_info *tss);
354
355 void init_infwait_state (void);
356
357 static const char follow_fork_mode_child[] = "child";
358 static const char follow_fork_mode_parent[] = "parent";
359
360 static const char *follow_fork_mode_kind_names[] = {
361 follow_fork_mode_child,
362 follow_fork_mode_parent,
363 NULL
364 };
365
366 static const char *follow_fork_mode_string = follow_fork_mode_parent;
367 static void
368 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
369 struct cmd_list_element *c, const char *value)
370 {
371 fprintf_filtered (file,
372 _("Debugger response to a program "
373 "call of fork or vfork is \"%s\".\n"),
374 value);
375 }
376 \f
377
378 /* Tell the target to follow the fork we're stopped at. Returns true
379 if the inferior should be resumed; false, if the target for some
380 reason decided it's best not to resume. */
381
382 static int
383 follow_fork (void)
384 {
385 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
386 int should_resume = 1;
387 struct thread_info *tp;
388
389 /* Copy user stepping state to the new inferior thread. FIXME: the
390 followed fork child thread should have a copy of most of the
391 parent thread structure's run control related fields, not just these.
392 Initialized to avoid "may be used uninitialized" warnings from gcc. */
393 struct breakpoint *step_resume_breakpoint = NULL;
394 struct breakpoint *exception_resume_breakpoint = NULL;
395 CORE_ADDR step_range_start = 0;
396 CORE_ADDR step_range_end = 0;
397 struct frame_id step_frame_id = { 0 };
398
399 if (!non_stop)
400 {
401 ptid_t wait_ptid;
402 struct target_waitstatus wait_status;
403
404 /* Get the last target status returned by target_wait(). */
405 get_last_target_status (&wait_ptid, &wait_status);
406
407 /* If not stopped at a fork event, then there's nothing else to
408 do. */
409 if (wait_status.kind != TARGET_WAITKIND_FORKED
410 && wait_status.kind != TARGET_WAITKIND_VFORKED)
411 return 1;
412
413 /* Check if we switched over from WAIT_PTID, since the event was
414 reported. */
415 if (!ptid_equal (wait_ptid, minus_one_ptid)
416 && !ptid_equal (inferior_ptid, wait_ptid))
417 {
418 /* We did. Switch back to WAIT_PTID thread, to tell the
419 target to follow it (in either direction). We'll
420 afterwards refuse to resume, and inform the user what
421 happened. */
422 switch_to_thread (wait_ptid);
423 should_resume = 0;
424 }
425 }
426
427 tp = inferior_thread ();
428
429 /* If there were any forks/vforks that were caught and are now to be
430 followed, then do so now. */
431 switch (tp->pending_follow.kind)
432 {
433 case TARGET_WAITKIND_FORKED:
434 case TARGET_WAITKIND_VFORKED:
435 {
436 ptid_t parent, child;
437
438 /* If the user did a next/step, etc, over a fork call,
439 preserve the stepping state in the fork child. */
440 if (follow_child && should_resume)
441 {
442 step_resume_breakpoint = clone_momentary_breakpoint
443 (tp->control.step_resume_breakpoint);
444 step_range_start = tp->control.step_range_start;
445 step_range_end = tp->control.step_range_end;
446 step_frame_id = tp->control.step_frame_id;
447 exception_resume_breakpoint
448 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
449
450 /* For now, delete the parent's sr breakpoint, otherwise,
451 parent/child sr breakpoints are considered duplicates,
452 and the child version will not be installed. Remove
453 this when the breakpoints module becomes aware of
454 inferiors and address spaces. */
455 delete_step_resume_breakpoint (tp);
456 tp->control.step_range_start = 0;
457 tp->control.step_range_end = 0;
458 tp->control.step_frame_id = null_frame_id;
459 delete_exception_resume_breakpoint (tp);
460 }
461
462 parent = inferior_ptid;
463 child = tp->pending_follow.value.related_pid;
464
465 /* Tell the target to do whatever is necessary to follow
466 either parent or child. */
467 if (target_follow_fork (follow_child))
468 {
469 /* Target refused to follow, or there's some other reason
470 we shouldn't resume. */
471 should_resume = 0;
472 }
473 else
474 {
475 /* This pending follow fork event is now handled, one way
476 or another. The previous selected thread may be gone
477 from the lists by now, but if it is still around, need
478 to clear the pending follow request. */
479 tp = find_thread_ptid (parent);
480 if (tp)
481 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
482
483 /* This makes sure we don't try to apply the "Switched
484 over from WAIT_PID" logic above. */
485 nullify_last_target_wait_ptid ();
486
487 /* If we followed the child, switch to it... */
488 if (follow_child)
489 {
490 switch_to_thread (child);
491
492 /* ... and preserve the stepping state, in case the
493 user was stepping over the fork call. */
494 if (should_resume)
495 {
496 tp = inferior_thread ();
497 tp->control.step_resume_breakpoint
498 = step_resume_breakpoint;
499 tp->control.step_range_start = step_range_start;
500 tp->control.step_range_end = step_range_end;
501 tp->control.step_frame_id = step_frame_id;
502 tp->control.exception_resume_breakpoint
503 = exception_resume_breakpoint;
504 }
505 else
506 {
507 /* If we get here, it was because we're trying to
508 resume from a fork catchpoint, but, the user
509 has switched threads away from the thread that
510 forked. In that case, the resume command
511 issued is most likely not applicable to the
512 child, so just warn, and refuse to resume. */
513 warning (_("Not resuming: switched threads "
514 "before following fork child.\n"));
515 }
516
517 /* Reset breakpoints in the child as appropriate. */
518 follow_inferior_reset_breakpoints ();
519 }
520 else
521 switch_to_thread (parent);
522 }
523 }
524 break;
525 case TARGET_WAITKIND_SPURIOUS:
526 /* Nothing to follow. */
527 break;
528 default:
529 internal_error (__FILE__, __LINE__,
530 "Unexpected pending_follow.kind %d\n",
531 tp->pending_follow.kind);
532 break;
533 }
534
535 return should_resume;
536 }
537
538 void
539 follow_inferior_reset_breakpoints (void)
540 {
541 struct thread_info *tp = inferior_thread ();
542
543 /* Was there a step_resume breakpoint? (There was if the user
544 did a "next" at the fork() call.) If so, explicitly reset its
545 thread number.
546
547 step_resumes are a form of bp that are made to be per-thread.
548 Since we created the step_resume bp when the parent process
549 was being debugged, and now are switching to the child process,
550 from the breakpoint package's viewpoint, that's a switch of
551 "threads". We must update the bp's notion of which thread
552 it is for, or it'll be ignored when it triggers. */
553
554 if (tp->control.step_resume_breakpoint)
555 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
556
557 if (tp->control.exception_resume_breakpoint)
558 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
559
560 /* Reinsert all breakpoints in the child. The user may have set
561 breakpoints after catching the fork, in which case those
562 were never set in the child, but only in the parent. This makes
563 sure the inserted breakpoints match the breakpoint list. */
564
565 breakpoint_re_set ();
566 insert_breakpoints ();
567 }
568
569 /* The child has exited or execed: resume threads of the parent the
570 user wanted to be executing. */
571
572 static int
573 proceed_after_vfork_done (struct thread_info *thread,
574 void *arg)
575 {
576 int pid = * (int *) arg;
577
578 if (ptid_get_pid (thread->ptid) == pid
579 && is_running (thread->ptid)
580 && !is_executing (thread->ptid)
581 && !thread->stop_requested
582 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
583 {
584 if (debug_infrun)
585 fprintf_unfiltered (gdb_stdlog,
586 "infrun: resuming vfork parent thread %s\n",
587 target_pid_to_str (thread->ptid));
588
589 switch_to_thread (thread->ptid);
590 clear_proceed_status ();
591 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
592 }
593
594 return 0;
595 }
596
597 /* Called whenever we notice an exec or exit event, to handle
598 detaching or resuming a vfork parent. */
599
600 static void
601 handle_vfork_child_exec_or_exit (int exec)
602 {
603 struct inferior *inf = current_inferior ();
604
605 if (inf->vfork_parent)
606 {
607 int resume_parent = -1;
608
609 /* This exec or exit marks the end of the shared memory region
610 between the parent and the child. If the user wanted to
611 detach from the parent, now is the time. */
612
613 if (inf->vfork_parent->pending_detach)
614 {
615 struct thread_info *tp;
616 struct cleanup *old_chain;
617 struct program_space *pspace;
618 struct address_space *aspace;
619
620 /* follow-fork child, detach-on-fork on. */
621
622 old_chain = make_cleanup_restore_current_thread ();
623
624 /* We're letting loose of the parent. */
625 tp = any_live_thread_of_process (inf->vfork_parent->pid);
626 switch_to_thread (tp->ptid);
627
628 /* We're about to detach from the parent, which implicitly
629 removes breakpoints from its address space. There's a
630 catch here: we want to reuse the spaces for the child,
631 but, parent/child are still sharing the pspace at this
632 point, although the exec in reality makes the kernel give
633 the child a fresh set of new pages. The problem here is
634 that the breakpoints module being unaware of this, would
635 likely chose the child process to write to the parent
636 address space. Swapping the child temporarily away from
637 the spaces has the desired effect. Yes, this is "sort
638 of" a hack. */
639
640 pspace = inf->pspace;
641 aspace = inf->aspace;
642 inf->aspace = NULL;
643 inf->pspace = NULL;
644
645 if (debug_infrun || info_verbose)
646 {
647 target_terminal_ours ();
648
649 if (exec)
650 fprintf_filtered (gdb_stdlog,
651 "Detaching vfork parent process "
652 "%d after child exec.\n",
653 inf->vfork_parent->pid);
654 else
655 fprintf_filtered (gdb_stdlog,
656 "Detaching vfork parent process "
657 "%d after child exit.\n",
658 inf->vfork_parent->pid);
659 }
660
661 target_detach (NULL, 0);
662
663 /* Put it back. */
664 inf->pspace = pspace;
665 inf->aspace = aspace;
666
667 do_cleanups (old_chain);
668 }
669 else if (exec)
670 {
671 /* We're staying attached to the parent, so, really give the
672 child a new address space. */
673 inf->pspace = add_program_space (maybe_new_address_space ());
674 inf->aspace = inf->pspace->aspace;
675 inf->removable = 1;
676 set_current_program_space (inf->pspace);
677
678 resume_parent = inf->vfork_parent->pid;
679
680 /* Break the bonds. */
681 inf->vfork_parent->vfork_child = NULL;
682 }
683 else
684 {
685 struct cleanup *old_chain;
686 struct program_space *pspace;
687
688 /* If this is a vfork child exiting, then the pspace and
689 aspaces were shared with the parent. Since we're
690 reporting the process exit, we'll be mourning all that is
691 found in the address space, and switching to null_ptid,
692 preparing to start a new inferior. But, since we don't
693 want to clobber the parent's address/program spaces, we
694 go ahead and create a new one for this exiting
695 inferior. */
696
697 /* Switch to null_ptid, so that clone_program_space doesn't want
698 to read the selected frame of a dead process. */
699 old_chain = save_inferior_ptid ();
700 inferior_ptid = null_ptid;
701
702 /* This inferior is dead, so avoid giving the breakpoints
703 module the option to write through to it (cloning a
704 program space resets breakpoints). */
705 inf->aspace = NULL;
706 inf->pspace = NULL;
707 pspace = add_program_space (maybe_new_address_space ());
708 set_current_program_space (pspace);
709 inf->removable = 1;
710 clone_program_space (pspace, inf->vfork_parent->pspace);
711 inf->pspace = pspace;
712 inf->aspace = pspace->aspace;
713
714 /* Put back inferior_ptid. We'll continue mourning this
715 inferior. */
716 do_cleanups (old_chain);
717
718 resume_parent = inf->vfork_parent->pid;
719 /* Break the bonds. */
720 inf->vfork_parent->vfork_child = NULL;
721 }
722
723 inf->vfork_parent = NULL;
724
725 gdb_assert (current_program_space == inf->pspace);
726
727 if (non_stop && resume_parent != -1)
728 {
729 /* If the user wanted the parent to be running, let it go
730 free now. */
731 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
732
733 if (debug_infrun)
734 fprintf_unfiltered (gdb_stdlog,
735 "infrun: resuming vfork parent process %d\n",
736 resume_parent);
737
738 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
739
740 do_cleanups (old_chain);
741 }
742 }
743 }
744
745 /* Enum strings for "set|show displaced-stepping". */
746
747 static const char follow_exec_mode_new[] = "new";
748 static const char follow_exec_mode_same[] = "same";
749 static const char *follow_exec_mode_names[] =
750 {
751 follow_exec_mode_new,
752 follow_exec_mode_same,
753 NULL,
754 };
755
756 static const char *follow_exec_mode_string = follow_exec_mode_same;
757 static void
758 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
759 struct cmd_list_element *c, const char *value)
760 {
761 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
762 }
763
764 /* EXECD_PATHNAME is assumed to be non-NULL. */
765
766 static void
767 follow_exec (ptid_t pid, char *execd_pathname)
768 {
769 struct thread_info *th = inferior_thread ();
770 struct inferior *inf = current_inferior ();
771
772 /* This is an exec event that we actually wish to pay attention to.
773 Refresh our symbol table to the newly exec'd program, remove any
774 momentary bp's, etc.
775
776 If there are breakpoints, they aren't really inserted now,
777 since the exec() transformed our inferior into a fresh set
778 of instructions.
779
780 We want to preserve symbolic breakpoints on the list, since
781 we have hopes that they can be reset after the new a.out's
782 symbol table is read.
783
784 However, any "raw" breakpoints must be removed from the list
785 (e.g., the solib bp's), since their address is probably invalid
786 now.
787
788 And, we DON'T want to call delete_breakpoints() here, since
789 that may write the bp's "shadow contents" (the instruction
790 value that was overwritten witha TRAP instruction). Since
791 we now have a new a.out, those shadow contents aren't valid. */
792
793 mark_breakpoints_out ();
794
795 update_breakpoints_after_exec ();
796
797 /* If there was one, it's gone now. We cannot truly step-to-next
798 statement through an exec(). */
799 th->control.step_resume_breakpoint = NULL;
800 th->control.exception_resume_breakpoint = NULL;
801 th->control.step_range_start = 0;
802 th->control.step_range_end = 0;
803
804 /* The target reports the exec event to the main thread, even if
805 some other thread does the exec, and even if the main thread was
806 already stopped --- if debugging in non-stop mode, it's possible
807 the user had the main thread held stopped in the previous image
808 --- release it now. This is the same behavior as step-over-exec
809 with scheduler-locking on in all-stop mode. */
810 th->stop_requested = 0;
811
812 /* What is this a.out's name? */
813 printf_unfiltered (_("%s is executing new program: %s\n"),
814 target_pid_to_str (inferior_ptid),
815 execd_pathname);
816
817 /* We've followed the inferior through an exec. Therefore, the
818 inferior has essentially been killed & reborn. */
819
820 gdb_flush (gdb_stdout);
821
822 breakpoint_init_inferior (inf_execd);
823
824 if (gdb_sysroot && *gdb_sysroot)
825 {
826 char *name = alloca (strlen (gdb_sysroot)
827 + strlen (execd_pathname)
828 + 1);
829
830 strcpy (name, gdb_sysroot);
831 strcat (name, execd_pathname);
832 execd_pathname = name;
833 }
834
835 /* Reset the shared library package. This ensures that we get a
836 shlib event when the child reaches "_start", at which point the
837 dld will have had a chance to initialize the child. */
838 /* Also, loading a symbol file below may trigger symbol lookups, and
839 we don't want those to be satisfied by the libraries of the
840 previous incarnation of this process. */
841 no_shared_libraries (NULL, 0);
842
843 if (follow_exec_mode_string == follow_exec_mode_new)
844 {
845 struct program_space *pspace;
846
847 /* The user wants to keep the old inferior and program spaces
848 around. Create a new fresh one, and switch to it. */
849
850 inf = add_inferior (current_inferior ()->pid);
851 pspace = add_program_space (maybe_new_address_space ());
852 inf->pspace = pspace;
853 inf->aspace = pspace->aspace;
854
855 exit_inferior_num_silent (current_inferior ()->num);
856
857 set_current_inferior (inf);
858 set_current_program_space (pspace);
859 }
860
861 gdb_assert (current_program_space == inf->pspace);
862
863 /* That a.out is now the one to use. */
864 exec_file_attach (execd_pathname, 0);
865
866 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
867 (Position Independent Executable) main symbol file will get applied by
868 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
869 the breakpoints with the zero displacement. */
870
871 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
872 NULL, 0);
873
874 set_initial_language ();
875
876 #ifdef SOLIB_CREATE_INFERIOR_HOOK
877 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
878 #else
879 solib_create_inferior_hook (0);
880 #endif
881
882 jit_inferior_created_hook ();
883
884 breakpoint_re_set ();
885
886 /* Reinsert all breakpoints. (Those which were symbolic have
887 been reset to the proper address in the new a.out, thanks
888 to symbol_file_command...). */
889 insert_breakpoints ();
890
891 /* The next resume of this inferior should bring it to the shlib
892 startup breakpoints. (If the user had also set bp's on
893 "main" from the old (parent) process, then they'll auto-
894 matically get reset there in the new process.). */
895 }
896
897 /* Non-zero if we just simulating a single-step. This is needed
898 because we cannot remove the breakpoints in the inferior process
899 until after the `wait' in `wait_for_inferior'. */
900 static int singlestep_breakpoints_inserted_p = 0;
901
902 /* The thread we inserted single-step breakpoints for. */
903 static ptid_t singlestep_ptid;
904
905 /* PC when we started this single-step. */
906 static CORE_ADDR singlestep_pc;
907
908 /* If another thread hit the singlestep breakpoint, we save the original
909 thread here so that we can resume single-stepping it later. */
910 static ptid_t saved_singlestep_ptid;
911 static int stepping_past_singlestep_breakpoint;
912
913 /* If not equal to null_ptid, this means that after stepping over breakpoint
914 is finished, we need to switch to deferred_step_ptid, and step it.
915
916 The use case is when one thread has hit a breakpoint, and then the user
917 has switched to another thread and issued 'step'. We need to step over
918 breakpoint in the thread which hit the breakpoint, but then continue
919 stepping the thread user has selected. */
920 static ptid_t deferred_step_ptid;
921 \f
922 /* Displaced stepping. */
923
924 /* In non-stop debugging mode, we must take special care to manage
925 breakpoints properly; in particular, the traditional strategy for
926 stepping a thread past a breakpoint it has hit is unsuitable.
927 'Displaced stepping' is a tactic for stepping one thread past a
928 breakpoint it has hit while ensuring that other threads running
929 concurrently will hit the breakpoint as they should.
930
931 The traditional way to step a thread T off a breakpoint in a
932 multi-threaded program in all-stop mode is as follows:
933
934 a0) Initially, all threads are stopped, and breakpoints are not
935 inserted.
936 a1) We single-step T, leaving breakpoints uninserted.
937 a2) We insert breakpoints, and resume all threads.
938
939 In non-stop debugging, however, this strategy is unsuitable: we
940 don't want to have to stop all threads in the system in order to
941 continue or step T past a breakpoint. Instead, we use displaced
942 stepping:
943
944 n0) Initially, T is stopped, other threads are running, and
945 breakpoints are inserted.
946 n1) We copy the instruction "under" the breakpoint to a separate
947 location, outside the main code stream, making any adjustments
948 to the instruction, register, and memory state as directed by
949 T's architecture.
950 n2) We single-step T over the instruction at its new location.
951 n3) We adjust the resulting register and memory state as directed
952 by T's architecture. This includes resetting T's PC to point
953 back into the main instruction stream.
954 n4) We resume T.
955
956 This approach depends on the following gdbarch methods:
957
958 - gdbarch_max_insn_length and gdbarch_displaced_step_location
959 indicate where to copy the instruction, and how much space must
960 be reserved there. We use these in step n1.
961
962 - gdbarch_displaced_step_copy_insn copies a instruction to a new
963 address, and makes any necessary adjustments to the instruction,
964 register contents, and memory. We use this in step n1.
965
966 - gdbarch_displaced_step_fixup adjusts registers and memory after
967 we have successfuly single-stepped the instruction, to yield the
968 same effect the instruction would have had if we had executed it
969 at its original address. We use this in step n3.
970
971 - gdbarch_displaced_step_free_closure provides cleanup.
972
973 The gdbarch_displaced_step_copy_insn and
974 gdbarch_displaced_step_fixup functions must be written so that
975 copying an instruction with gdbarch_displaced_step_copy_insn,
976 single-stepping across the copied instruction, and then applying
977 gdbarch_displaced_insn_fixup should have the same effects on the
978 thread's memory and registers as stepping the instruction in place
979 would have. Exactly which responsibilities fall to the copy and
980 which fall to the fixup is up to the author of those functions.
981
982 See the comments in gdbarch.sh for details.
983
984 Note that displaced stepping and software single-step cannot
985 currently be used in combination, although with some care I think
986 they could be made to. Software single-step works by placing
987 breakpoints on all possible subsequent instructions; if the
988 displaced instruction is a PC-relative jump, those breakpoints
989 could fall in very strange places --- on pages that aren't
990 executable, or at addresses that are not proper instruction
991 boundaries. (We do generally let other threads run while we wait
992 to hit the software single-step breakpoint, and they might
993 encounter such a corrupted instruction.) One way to work around
994 this would be to have gdbarch_displaced_step_copy_insn fully
995 simulate the effect of PC-relative instructions (and return NULL)
996 on architectures that use software single-stepping.
997
998 In non-stop mode, we can have independent and simultaneous step
999 requests, so more than one thread may need to simultaneously step
1000 over a breakpoint. The current implementation assumes there is
1001 only one scratch space per process. In this case, we have to
1002 serialize access to the scratch space. If thread A wants to step
1003 over a breakpoint, but we are currently waiting for some other
1004 thread to complete a displaced step, we leave thread A stopped and
1005 place it in the displaced_step_request_queue. Whenever a displaced
1006 step finishes, we pick the next thread in the queue and start a new
1007 displaced step operation on it. See displaced_step_prepare and
1008 displaced_step_fixup for details. */
1009
1010 struct displaced_step_request
1011 {
1012 ptid_t ptid;
1013 struct displaced_step_request *next;
1014 };
1015
1016 /* Per-inferior displaced stepping state. */
1017 struct displaced_step_inferior_state
1018 {
1019 /* Pointer to next in linked list. */
1020 struct displaced_step_inferior_state *next;
1021
1022 /* The process this displaced step state refers to. */
1023 int pid;
1024
1025 /* A queue of pending displaced stepping requests. One entry per
1026 thread that needs to do a displaced step. */
1027 struct displaced_step_request *step_request_queue;
1028
1029 /* If this is not null_ptid, this is the thread carrying out a
1030 displaced single-step in process PID. This thread's state will
1031 require fixing up once it has completed its step. */
1032 ptid_t step_ptid;
1033
1034 /* The architecture the thread had when we stepped it. */
1035 struct gdbarch *step_gdbarch;
1036
1037 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1038 for post-step cleanup. */
1039 struct displaced_step_closure *step_closure;
1040
1041 /* The address of the original instruction, and the copy we
1042 made. */
1043 CORE_ADDR step_original, step_copy;
1044
1045 /* Saved contents of copy area. */
1046 gdb_byte *step_saved_copy;
1047 };
1048
1049 /* The list of states of processes involved in displaced stepping
1050 presently. */
1051 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1052
1053 /* Get the displaced stepping state of process PID. */
1054
1055 static struct displaced_step_inferior_state *
1056 get_displaced_stepping_state (int pid)
1057 {
1058 struct displaced_step_inferior_state *state;
1059
1060 for (state = displaced_step_inferior_states;
1061 state != NULL;
1062 state = state->next)
1063 if (state->pid == pid)
1064 return state;
1065
1066 return NULL;
1067 }
1068
1069 /* Add a new displaced stepping state for process PID to the displaced
1070 stepping state list, or return a pointer to an already existing
1071 entry, if it already exists. Never returns NULL. */
1072
1073 static struct displaced_step_inferior_state *
1074 add_displaced_stepping_state (int pid)
1075 {
1076 struct displaced_step_inferior_state *state;
1077
1078 for (state = displaced_step_inferior_states;
1079 state != NULL;
1080 state = state->next)
1081 if (state->pid == pid)
1082 return state;
1083
1084 state = xcalloc (1, sizeof (*state));
1085 state->pid = pid;
1086 state->next = displaced_step_inferior_states;
1087 displaced_step_inferior_states = state;
1088
1089 return state;
1090 }
1091
1092 /* If inferior is in displaced stepping, and ADDR equals to starting address
1093 of copy area, return corresponding displaced_step_closure. Otherwise,
1094 return NULL. */
1095
1096 struct displaced_step_closure*
1097 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1098 {
1099 struct displaced_step_inferior_state *displaced
1100 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1101
1102 /* If checking the mode of displaced instruction in copy area. */
1103 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1104 && (displaced->step_copy == addr))
1105 return displaced->step_closure;
1106
1107 return NULL;
1108 }
1109
1110 /* Remove the displaced stepping state of process PID. */
1111
1112 static void
1113 remove_displaced_stepping_state (int pid)
1114 {
1115 struct displaced_step_inferior_state *it, **prev_next_p;
1116
1117 gdb_assert (pid != 0);
1118
1119 it = displaced_step_inferior_states;
1120 prev_next_p = &displaced_step_inferior_states;
1121 while (it)
1122 {
1123 if (it->pid == pid)
1124 {
1125 *prev_next_p = it->next;
1126 xfree (it);
1127 return;
1128 }
1129
1130 prev_next_p = &it->next;
1131 it = *prev_next_p;
1132 }
1133 }
1134
1135 static void
1136 infrun_inferior_exit (struct inferior *inf)
1137 {
1138 remove_displaced_stepping_state (inf->pid);
1139 }
1140
1141 /* Enum strings for "set|show displaced-stepping". */
1142
1143 static const char can_use_displaced_stepping_auto[] = "auto";
1144 static const char can_use_displaced_stepping_on[] = "on";
1145 static const char can_use_displaced_stepping_off[] = "off";
1146 static const char *can_use_displaced_stepping_enum[] =
1147 {
1148 can_use_displaced_stepping_auto,
1149 can_use_displaced_stepping_on,
1150 can_use_displaced_stepping_off,
1151 NULL,
1152 };
1153
1154 /* If ON, and the architecture supports it, GDB will use displaced
1155 stepping to step over breakpoints. If OFF, or if the architecture
1156 doesn't support it, GDB will instead use the traditional
1157 hold-and-step approach. If AUTO (which is the default), GDB will
1158 decide which technique to use to step over breakpoints depending on
1159 which of all-stop or non-stop mode is active --- displaced stepping
1160 in non-stop mode; hold-and-step in all-stop mode. */
1161
1162 static const char *can_use_displaced_stepping =
1163 can_use_displaced_stepping_auto;
1164
1165 static void
1166 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1167 struct cmd_list_element *c,
1168 const char *value)
1169 {
1170 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1171 fprintf_filtered (file,
1172 _("Debugger's willingness to use displaced stepping "
1173 "to step over breakpoints is %s (currently %s).\n"),
1174 value, non_stop ? "on" : "off");
1175 else
1176 fprintf_filtered (file,
1177 _("Debugger's willingness to use displaced stepping "
1178 "to step over breakpoints is %s.\n"), value);
1179 }
1180
1181 /* Return non-zero if displaced stepping can/should be used to step
1182 over breakpoints. */
1183
1184 static int
1185 use_displaced_stepping (struct gdbarch *gdbarch)
1186 {
1187 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1188 && non_stop)
1189 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1190 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1191 && !RECORD_IS_USED);
1192 }
1193
1194 /* Clean out any stray displaced stepping state. */
1195 static void
1196 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1197 {
1198 /* Indicate that there is no cleanup pending. */
1199 displaced->step_ptid = null_ptid;
1200
1201 if (displaced->step_closure)
1202 {
1203 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1204 displaced->step_closure);
1205 displaced->step_closure = NULL;
1206 }
1207 }
1208
1209 static void
1210 displaced_step_clear_cleanup (void *arg)
1211 {
1212 struct displaced_step_inferior_state *state = arg;
1213
1214 displaced_step_clear (state);
1215 }
1216
1217 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1218 void
1219 displaced_step_dump_bytes (struct ui_file *file,
1220 const gdb_byte *buf,
1221 size_t len)
1222 {
1223 int i;
1224
1225 for (i = 0; i < len; i++)
1226 fprintf_unfiltered (file, "%02x ", buf[i]);
1227 fputs_unfiltered ("\n", file);
1228 }
1229
1230 /* Prepare to single-step, using displaced stepping.
1231
1232 Note that we cannot use displaced stepping when we have a signal to
1233 deliver. If we have a signal to deliver and an instruction to step
1234 over, then after the step, there will be no indication from the
1235 target whether the thread entered a signal handler or ignored the
1236 signal and stepped over the instruction successfully --- both cases
1237 result in a simple SIGTRAP. In the first case we mustn't do a
1238 fixup, and in the second case we must --- but we can't tell which.
1239 Comments in the code for 'random signals' in handle_inferior_event
1240 explain how we handle this case instead.
1241
1242 Returns 1 if preparing was successful -- this thread is going to be
1243 stepped now; or 0 if displaced stepping this thread got queued. */
1244 static int
1245 displaced_step_prepare (ptid_t ptid)
1246 {
1247 struct cleanup *old_cleanups, *ignore_cleanups;
1248 struct regcache *regcache = get_thread_regcache (ptid);
1249 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1250 CORE_ADDR original, copy;
1251 ULONGEST len;
1252 struct displaced_step_closure *closure;
1253 struct displaced_step_inferior_state *displaced;
1254
1255 /* We should never reach this function if the architecture does not
1256 support displaced stepping. */
1257 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1258
1259 /* We have to displaced step one thread at a time, as we only have
1260 access to a single scratch space per inferior. */
1261
1262 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1263
1264 if (!ptid_equal (displaced->step_ptid, null_ptid))
1265 {
1266 /* Already waiting for a displaced step to finish. Defer this
1267 request and place in queue. */
1268 struct displaced_step_request *req, *new_req;
1269
1270 if (debug_displaced)
1271 fprintf_unfiltered (gdb_stdlog,
1272 "displaced: defering step of %s\n",
1273 target_pid_to_str (ptid));
1274
1275 new_req = xmalloc (sizeof (*new_req));
1276 new_req->ptid = ptid;
1277 new_req->next = NULL;
1278
1279 if (displaced->step_request_queue)
1280 {
1281 for (req = displaced->step_request_queue;
1282 req && req->next;
1283 req = req->next)
1284 ;
1285 req->next = new_req;
1286 }
1287 else
1288 displaced->step_request_queue = new_req;
1289
1290 return 0;
1291 }
1292 else
1293 {
1294 if (debug_displaced)
1295 fprintf_unfiltered (gdb_stdlog,
1296 "displaced: stepping %s now\n",
1297 target_pid_to_str (ptid));
1298 }
1299
1300 displaced_step_clear (displaced);
1301
1302 old_cleanups = save_inferior_ptid ();
1303 inferior_ptid = ptid;
1304
1305 original = regcache_read_pc (regcache);
1306
1307 copy = gdbarch_displaced_step_location (gdbarch);
1308 len = gdbarch_max_insn_length (gdbarch);
1309
1310 /* Save the original contents of the copy area. */
1311 displaced->step_saved_copy = xmalloc (len);
1312 ignore_cleanups = make_cleanup (free_current_contents,
1313 &displaced->step_saved_copy);
1314 read_memory (copy, displaced->step_saved_copy, len);
1315 if (debug_displaced)
1316 {
1317 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1318 paddress (gdbarch, copy));
1319 displaced_step_dump_bytes (gdb_stdlog,
1320 displaced->step_saved_copy,
1321 len);
1322 };
1323
1324 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1325 original, copy, regcache);
1326
1327 /* We don't support the fully-simulated case at present. */
1328 gdb_assert (closure);
1329
1330 /* Save the information we need to fix things up if the step
1331 succeeds. */
1332 displaced->step_ptid = ptid;
1333 displaced->step_gdbarch = gdbarch;
1334 displaced->step_closure = closure;
1335 displaced->step_original = original;
1336 displaced->step_copy = copy;
1337
1338 make_cleanup (displaced_step_clear_cleanup, displaced);
1339
1340 /* Resume execution at the copy. */
1341 regcache_write_pc (regcache, copy);
1342
1343 discard_cleanups (ignore_cleanups);
1344
1345 do_cleanups (old_cleanups);
1346
1347 if (debug_displaced)
1348 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1349 paddress (gdbarch, copy));
1350
1351 return 1;
1352 }
1353
1354 static void
1355 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1356 const gdb_byte *myaddr, int len)
1357 {
1358 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1359
1360 inferior_ptid = ptid;
1361 write_memory (memaddr, myaddr, len);
1362 do_cleanups (ptid_cleanup);
1363 }
1364
1365 static void
1366 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1367 {
1368 struct cleanup *old_cleanups;
1369 struct displaced_step_inferior_state *displaced
1370 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1371
1372 /* Was any thread of this process doing a displaced step? */
1373 if (displaced == NULL)
1374 return;
1375
1376 /* Was this event for the pid we displaced? */
1377 if (ptid_equal (displaced->step_ptid, null_ptid)
1378 || ! ptid_equal (displaced->step_ptid, event_ptid))
1379 return;
1380
1381 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1382
1383 /* Restore the contents of the copy area. */
1384 {
1385 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1386
1387 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1388 displaced->step_saved_copy, len);
1389 if (debug_displaced)
1390 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1391 paddress (displaced->step_gdbarch,
1392 displaced->step_copy));
1393 }
1394
1395 /* Did the instruction complete successfully? */
1396 if (signal == TARGET_SIGNAL_TRAP)
1397 {
1398 /* Fix up the resulting state. */
1399 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1400 displaced->step_closure,
1401 displaced->step_original,
1402 displaced->step_copy,
1403 get_thread_regcache (displaced->step_ptid));
1404 }
1405 else
1406 {
1407 /* Since the instruction didn't complete, all we can do is
1408 relocate the PC. */
1409 struct regcache *regcache = get_thread_regcache (event_ptid);
1410 CORE_ADDR pc = regcache_read_pc (regcache);
1411
1412 pc = displaced->step_original + (pc - displaced->step_copy);
1413 regcache_write_pc (regcache, pc);
1414 }
1415
1416 do_cleanups (old_cleanups);
1417
1418 displaced->step_ptid = null_ptid;
1419
1420 /* Are there any pending displaced stepping requests? If so, run
1421 one now. Leave the state object around, since we're likely to
1422 need it again soon. */
1423 while (displaced->step_request_queue)
1424 {
1425 struct displaced_step_request *head;
1426 ptid_t ptid;
1427 struct regcache *regcache;
1428 struct gdbarch *gdbarch;
1429 CORE_ADDR actual_pc;
1430 struct address_space *aspace;
1431
1432 head = displaced->step_request_queue;
1433 ptid = head->ptid;
1434 displaced->step_request_queue = head->next;
1435 xfree (head);
1436
1437 context_switch (ptid);
1438
1439 regcache = get_thread_regcache (ptid);
1440 actual_pc = regcache_read_pc (regcache);
1441 aspace = get_regcache_aspace (regcache);
1442
1443 if (breakpoint_here_p (aspace, actual_pc))
1444 {
1445 if (debug_displaced)
1446 fprintf_unfiltered (gdb_stdlog,
1447 "displaced: stepping queued %s now\n",
1448 target_pid_to_str (ptid));
1449
1450 displaced_step_prepare (ptid);
1451
1452 gdbarch = get_regcache_arch (regcache);
1453
1454 if (debug_displaced)
1455 {
1456 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1457 gdb_byte buf[4];
1458
1459 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1460 paddress (gdbarch, actual_pc));
1461 read_memory (actual_pc, buf, sizeof (buf));
1462 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1463 }
1464
1465 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1466 displaced->step_closure))
1467 target_resume (ptid, 1, TARGET_SIGNAL_0);
1468 else
1469 target_resume (ptid, 0, TARGET_SIGNAL_0);
1470
1471 /* Done, we're stepping a thread. */
1472 break;
1473 }
1474 else
1475 {
1476 int step;
1477 struct thread_info *tp = inferior_thread ();
1478
1479 /* The breakpoint we were sitting under has since been
1480 removed. */
1481 tp->control.trap_expected = 0;
1482
1483 /* Go back to what we were trying to do. */
1484 step = currently_stepping (tp);
1485
1486 if (debug_displaced)
1487 fprintf_unfiltered (gdb_stdlog,
1488 "breakpoint is gone %s: step(%d)\n",
1489 target_pid_to_str (tp->ptid), step);
1490
1491 target_resume (ptid, step, TARGET_SIGNAL_0);
1492 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1493
1494 /* This request was discarded. See if there's any other
1495 thread waiting for its turn. */
1496 }
1497 }
1498 }
1499
1500 /* Update global variables holding ptids to hold NEW_PTID if they were
1501 holding OLD_PTID. */
1502 static void
1503 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1504 {
1505 struct displaced_step_request *it;
1506 struct displaced_step_inferior_state *displaced;
1507
1508 if (ptid_equal (inferior_ptid, old_ptid))
1509 inferior_ptid = new_ptid;
1510
1511 if (ptid_equal (singlestep_ptid, old_ptid))
1512 singlestep_ptid = new_ptid;
1513
1514 if (ptid_equal (deferred_step_ptid, old_ptid))
1515 deferred_step_ptid = new_ptid;
1516
1517 for (displaced = displaced_step_inferior_states;
1518 displaced;
1519 displaced = displaced->next)
1520 {
1521 if (ptid_equal (displaced->step_ptid, old_ptid))
1522 displaced->step_ptid = new_ptid;
1523
1524 for (it = displaced->step_request_queue; it; it = it->next)
1525 if (ptid_equal (it->ptid, old_ptid))
1526 it->ptid = new_ptid;
1527 }
1528 }
1529
1530 \f
1531 /* Resuming. */
1532
1533 /* Things to clean up if we QUIT out of resume (). */
1534 static void
1535 resume_cleanups (void *ignore)
1536 {
1537 normal_stop ();
1538 }
1539
1540 static const char schedlock_off[] = "off";
1541 static const char schedlock_on[] = "on";
1542 static const char schedlock_step[] = "step";
1543 static const char *scheduler_enums[] = {
1544 schedlock_off,
1545 schedlock_on,
1546 schedlock_step,
1547 NULL
1548 };
1549 static const char *scheduler_mode = schedlock_off;
1550 static void
1551 show_scheduler_mode (struct ui_file *file, int from_tty,
1552 struct cmd_list_element *c, const char *value)
1553 {
1554 fprintf_filtered (file,
1555 _("Mode for locking scheduler "
1556 "during execution is \"%s\".\n"),
1557 value);
1558 }
1559
1560 static void
1561 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1562 {
1563 if (!target_can_lock_scheduler)
1564 {
1565 scheduler_mode = schedlock_off;
1566 error (_("Target '%s' cannot support this command."), target_shortname);
1567 }
1568 }
1569
1570 /* True if execution commands resume all threads of all processes by
1571 default; otherwise, resume only threads of the current inferior
1572 process. */
1573 int sched_multi = 0;
1574
1575 /* Try to setup for software single stepping over the specified location.
1576 Return 1 if target_resume() should use hardware single step.
1577
1578 GDBARCH the current gdbarch.
1579 PC the location to step over. */
1580
1581 static int
1582 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1583 {
1584 int hw_step = 1;
1585
1586 if (execution_direction == EXEC_FORWARD
1587 && gdbarch_software_single_step_p (gdbarch)
1588 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1589 {
1590 hw_step = 0;
1591 /* Do not pull these breakpoints until after a `wait' in
1592 `wait_for_inferior'. */
1593 singlestep_breakpoints_inserted_p = 1;
1594 singlestep_ptid = inferior_ptid;
1595 singlestep_pc = pc;
1596 }
1597 return hw_step;
1598 }
1599
1600 /* Return a ptid representing the set of threads that we will proceed,
1601 in the perspective of the user/frontend. We may actually resume
1602 fewer threads at first, e.g., if a thread is stopped at a
1603 breakpoint that needs stepping-off, but that should not be visible
1604 to the user/frontend, and neither should the frontend/user be
1605 allowed to proceed any of the threads that happen to be stopped for
1606 internal run control handling, if a previous command wanted them
1607 resumed. */
1608
1609 ptid_t
1610 user_visible_resume_ptid (int step)
1611 {
1612 /* By default, resume all threads of all processes. */
1613 ptid_t resume_ptid = RESUME_ALL;
1614
1615 /* Maybe resume only all threads of the current process. */
1616 if (!sched_multi && target_supports_multi_process ())
1617 {
1618 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1619 }
1620
1621 /* Maybe resume a single thread after all. */
1622 if (non_stop)
1623 {
1624 /* With non-stop mode on, threads are always handled
1625 individually. */
1626 resume_ptid = inferior_ptid;
1627 }
1628 else if ((scheduler_mode == schedlock_on)
1629 || (scheduler_mode == schedlock_step
1630 && (step || singlestep_breakpoints_inserted_p)))
1631 {
1632 /* User-settable 'scheduler' mode requires solo thread resume. */
1633 resume_ptid = inferior_ptid;
1634 }
1635
1636 return resume_ptid;
1637 }
1638
1639 /* Resume the inferior, but allow a QUIT. This is useful if the user
1640 wants to interrupt some lengthy single-stepping operation
1641 (for child processes, the SIGINT goes to the inferior, and so
1642 we get a SIGINT random_signal, but for remote debugging and perhaps
1643 other targets, that's not true).
1644
1645 STEP nonzero if we should step (zero to continue instead).
1646 SIG is the signal to give the inferior (zero for none). */
1647 void
1648 resume (int step, enum target_signal sig)
1649 {
1650 int should_resume = 1;
1651 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1652 struct regcache *regcache = get_current_regcache ();
1653 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1654 struct thread_info *tp = inferior_thread ();
1655 CORE_ADDR pc = regcache_read_pc (regcache);
1656 struct address_space *aspace = get_regcache_aspace (regcache);
1657
1658 QUIT;
1659
1660 if (current_inferior ()->waiting_for_vfork_done)
1661 {
1662 /* Don't try to single-step a vfork parent that is waiting for
1663 the child to get out of the shared memory region (by exec'ing
1664 or exiting). This is particularly important on software
1665 single-step archs, as the child process would trip on the
1666 software single step breakpoint inserted for the parent
1667 process. Since the parent will not actually execute any
1668 instruction until the child is out of the shared region (such
1669 are vfork's semantics), it is safe to simply continue it.
1670 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1671 the parent, and tell it to `keep_going', which automatically
1672 re-sets it stepping. */
1673 if (debug_infrun)
1674 fprintf_unfiltered (gdb_stdlog,
1675 "infrun: resume : clear step\n");
1676 step = 0;
1677 }
1678
1679 if (debug_infrun)
1680 fprintf_unfiltered (gdb_stdlog,
1681 "infrun: resume (step=%d, signal=%d), "
1682 "trap_expected=%d, current thread [%s] at %s\n",
1683 step, sig, tp->control.trap_expected,
1684 target_pid_to_str (inferior_ptid),
1685 paddress (gdbarch, pc));
1686
1687 /* Normally, by the time we reach `resume', the breakpoints are either
1688 removed or inserted, as appropriate. The exception is if we're sitting
1689 at a permanent breakpoint; we need to step over it, but permanent
1690 breakpoints can't be removed. So we have to test for it here. */
1691 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1692 {
1693 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1694 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1695 else
1696 error (_("\
1697 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1698 how to step past a permanent breakpoint on this architecture. Try using\n\
1699 a command like `return' or `jump' to continue execution."));
1700 }
1701
1702 /* If enabled, step over breakpoints by executing a copy of the
1703 instruction at a different address.
1704
1705 We can't use displaced stepping when we have a signal to deliver;
1706 the comments for displaced_step_prepare explain why. The
1707 comments in the handle_inferior event for dealing with 'random
1708 signals' explain what we do instead.
1709
1710 We can't use displaced stepping when we are waiting for vfork_done
1711 event, displaced stepping breaks the vfork child similarly as single
1712 step software breakpoint. */
1713 if (use_displaced_stepping (gdbarch)
1714 && (tp->control.trap_expected
1715 || (step && gdbarch_software_single_step_p (gdbarch)))
1716 && sig == TARGET_SIGNAL_0
1717 && !current_inferior ()->waiting_for_vfork_done)
1718 {
1719 struct displaced_step_inferior_state *displaced;
1720
1721 if (!displaced_step_prepare (inferior_ptid))
1722 {
1723 /* Got placed in displaced stepping queue. Will be resumed
1724 later when all the currently queued displaced stepping
1725 requests finish. The thread is not executing at this point,
1726 and the call to set_executing will be made later. But we
1727 need to call set_running here, since from frontend point of view,
1728 the thread is running. */
1729 set_running (inferior_ptid, 1);
1730 discard_cleanups (old_cleanups);
1731 return;
1732 }
1733
1734 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1735 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1736 displaced->step_closure);
1737 }
1738
1739 /* Do we need to do it the hard way, w/temp breakpoints? */
1740 else if (step)
1741 step = maybe_software_singlestep (gdbarch, pc);
1742
1743 /* Currently, our software single-step implementation leads to different
1744 results than hardware single-stepping in one situation: when stepping
1745 into delivering a signal which has an associated signal handler,
1746 hardware single-step will stop at the first instruction of the handler,
1747 while software single-step will simply skip execution of the handler.
1748
1749 For now, this difference in behavior is accepted since there is no
1750 easy way to actually implement single-stepping into a signal handler
1751 without kernel support.
1752
1753 However, there is one scenario where this difference leads to follow-on
1754 problems: if we're stepping off a breakpoint by removing all breakpoints
1755 and then single-stepping. In this case, the software single-step
1756 behavior means that even if there is a *breakpoint* in the signal
1757 handler, GDB still would not stop.
1758
1759 Fortunately, we can at least fix this particular issue. We detect
1760 here the case where we are about to deliver a signal while software
1761 single-stepping with breakpoints removed. In this situation, we
1762 revert the decisions to remove all breakpoints and insert single-
1763 step breakpoints, and instead we install a step-resume breakpoint
1764 at the current address, deliver the signal without stepping, and
1765 once we arrive back at the step-resume breakpoint, actually step
1766 over the breakpoint we originally wanted to step over. */
1767 if (singlestep_breakpoints_inserted_p
1768 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1769 {
1770 /* If we have nested signals or a pending signal is delivered
1771 immediately after a handler returns, might might already have
1772 a step-resume breakpoint set on the earlier handler. We cannot
1773 set another step-resume breakpoint; just continue on until the
1774 original breakpoint is hit. */
1775 if (tp->control.step_resume_breakpoint == NULL)
1776 {
1777 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1778 tp->step_after_step_resume_breakpoint = 1;
1779 }
1780
1781 remove_single_step_breakpoints ();
1782 singlestep_breakpoints_inserted_p = 0;
1783
1784 insert_breakpoints ();
1785 tp->control.trap_expected = 0;
1786 }
1787
1788 if (should_resume)
1789 {
1790 ptid_t resume_ptid;
1791
1792 /* If STEP is set, it's a request to use hardware stepping
1793 facilities. But in that case, we should never
1794 use singlestep breakpoint. */
1795 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1796
1797 /* Decide the set of threads to ask the target to resume. Start
1798 by assuming everything will be resumed, than narrow the set
1799 by applying increasingly restricting conditions. */
1800 resume_ptid = user_visible_resume_ptid (step);
1801
1802 /* Maybe resume a single thread after all. */
1803 if (singlestep_breakpoints_inserted_p
1804 && stepping_past_singlestep_breakpoint)
1805 {
1806 /* The situation here is as follows. In thread T1 we wanted to
1807 single-step. Lacking hardware single-stepping we've
1808 set breakpoint at the PC of the next instruction -- call it
1809 P. After resuming, we've hit that breakpoint in thread T2.
1810 Now we've removed original breakpoint, inserted breakpoint
1811 at P+1, and try to step to advance T2 past breakpoint.
1812 We need to step only T2, as if T1 is allowed to freely run,
1813 it can run past P, and if other threads are allowed to run,
1814 they can hit breakpoint at P+1, and nested hits of single-step
1815 breakpoints is not something we'd want -- that's complicated
1816 to support, and has no value. */
1817 resume_ptid = inferior_ptid;
1818 }
1819 else if ((step || singlestep_breakpoints_inserted_p)
1820 && tp->control.trap_expected)
1821 {
1822 /* We're allowing a thread to run past a breakpoint it has
1823 hit, by single-stepping the thread with the breakpoint
1824 removed. In which case, we need to single-step only this
1825 thread, and keep others stopped, as they can miss this
1826 breakpoint if allowed to run.
1827
1828 The current code actually removes all breakpoints when
1829 doing this, not just the one being stepped over, so if we
1830 let other threads run, we can actually miss any
1831 breakpoint, not just the one at PC. */
1832 resume_ptid = inferior_ptid;
1833 }
1834
1835 if (gdbarch_cannot_step_breakpoint (gdbarch))
1836 {
1837 /* Most targets can step a breakpoint instruction, thus
1838 executing it normally. But if this one cannot, just
1839 continue and we will hit it anyway. */
1840 if (step && breakpoint_inserted_here_p (aspace, pc))
1841 step = 0;
1842 }
1843
1844 if (debug_displaced
1845 && use_displaced_stepping (gdbarch)
1846 && tp->control.trap_expected)
1847 {
1848 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1849 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1850 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1851 gdb_byte buf[4];
1852
1853 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1854 paddress (resume_gdbarch, actual_pc));
1855 read_memory (actual_pc, buf, sizeof (buf));
1856 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1857 }
1858
1859 /* Install inferior's terminal modes. */
1860 target_terminal_inferior ();
1861
1862 /* Avoid confusing the next resume, if the next stop/resume
1863 happens to apply to another thread. */
1864 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1865
1866 /* Advise target which signals may be handled silently. If we have
1867 removed breakpoints because we are stepping over one (which can
1868 happen only if we are not using displaced stepping), we need to
1869 receive all signals to avoid accidentally skipping a breakpoint
1870 during execution of a signal handler. */
1871 if ((step || singlestep_breakpoints_inserted_p)
1872 && tp->control.trap_expected
1873 && !use_displaced_stepping (gdbarch))
1874 target_pass_signals (0, NULL);
1875 else
1876 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1877
1878 target_resume (resume_ptid, step, sig);
1879 }
1880
1881 discard_cleanups (old_cleanups);
1882 }
1883 \f
1884 /* Proceeding. */
1885
1886 /* Clear out all variables saying what to do when inferior is continued.
1887 First do this, then set the ones you want, then call `proceed'. */
1888
1889 static void
1890 clear_proceed_status_thread (struct thread_info *tp)
1891 {
1892 if (debug_infrun)
1893 fprintf_unfiltered (gdb_stdlog,
1894 "infrun: clear_proceed_status_thread (%s)\n",
1895 target_pid_to_str (tp->ptid));
1896
1897 tp->control.trap_expected = 0;
1898 tp->control.step_range_start = 0;
1899 tp->control.step_range_end = 0;
1900 tp->control.step_frame_id = null_frame_id;
1901 tp->control.step_stack_frame_id = null_frame_id;
1902 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1903 tp->stop_requested = 0;
1904
1905 tp->control.stop_step = 0;
1906
1907 tp->control.proceed_to_finish = 0;
1908
1909 /* Discard any remaining commands or status from previous stop. */
1910 bpstat_clear (&tp->control.stop_bpstat);
1911 }
1912
1913 static int
1914 clear_proceed_status_callback (struct thread_info *tp, void *data)
1915 {
1916 if (is_exited (tp->ptid))
1917 return 0;
1918
1919 clear_proceed_status_thread (tp);
1920 return 0;
1921 }
1922
1923 void
1924 clear_proceed_status (void)
1925 {
1926 if (!non_stop)
1927 {
1928 /* In all-stop mode, delete the per-thread status of all
1929 threads, even if inferior_ptid is null_ptid, there may be
1930 threads on the list. E.g., we may be launching a new
1931 process, while selecting the executable. */
1932 iterate_over_threads (clear_proceed_status_callback, NULL);
1933 }
1934
1935 if (!ptid_equal (inferior_ptid, null_ptid))
1936 {
1937 struct inferior *inferior;
1938
1939 if (non_stop)
1940 {
1941 /* If in non-stop mode, only delete the per-thread status of
1942 the current thread. */
1943 clear_proceed_status_thread (inferior_thread ());
1944 }
1945
1946 inferior = current_inferior ();
1947 inferior->control.stop_soon = NO_STOP_QUIETLY;
1948 }
1949
1950 stop_after_trap = 0;
1951
1952 observer_notify_about_to_proceed ();
1953
1954 if (stop_registers)
1955 {
1956 regcache_xfree (stop_registers);
1957 stop_registers = NULL;
1958 }
1959 }
1960
1961 /* Check the current thread against the thread that reported the most recent
1962 event. If a step-over is required return TRUE and set the current thread
1963 to the old thread. Otherwise return FALSE.
1964
1965 This should be suitable for any targets that support threads. */
1966
1967 static int
1968 prepare_to_proceed (int step)
1969 {
1970 ptid_t wait_ptid;
1971 struct target_waitstatus wait_status;
1972 int schedlock_enabled;
1973
1974 /* With non-stop mode on, threads are always handled individually. */
1975 gdb_assert (! non_stop);
1976
1977 /* Get the last target status returned by target_wait(). */
1978 get_last_target_status (&wait_ptid, &wait_status);
1979
1980 /* Make sure we were stopped at a breakpoint. */
1981 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1982 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1983 && wait_status.value.sig != TARGET_SIGNAL_ILL
1984 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1985 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1986 {
1987 return 0;
1988 }
1989
1990 schedlock_enabled = (scheduler_mode == schedlock_on
1991 || (scheduler_mode == schedlock_step
1992 && step));
1993
1994 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1995 if (schedlock_enabled)
1996 return 0;
1997
1998 /* Don't switch over if we're about to resume some other process
1999 other than WAIT_PTID's, and schedule-multiple is off. */
2000 if (!sched_multi
2001 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2002 return 0;
2003
2004 /* Switched over from WAIT_PID. */
2005 if (!ptid_equal (wait_ptid, minus_one_ptid)
2006 && !ptid_equal (inferior_ptid, wait_ptid))
2007 {
2008 struct regcache *regcache = get_thread_regcache (wait_ptid);
2009
2010 if (breakpoint_here_p (get_regcache_aspace (regcache),
2011 regcache_read_pc (regcache)))
2012 {
2013 /* If stepping, remember current thread to switch back to. */
2014 if (step)
2015 deferred_step_ptid = inferior_ptid;
2016
2017 /* Switch back to WAIT_PID thread. */
2018 switch_to_thread (wait_ptid);
2019
2020 if (debug_infrun)
2021 fprintf_unfiltered (gdb_stdlog,
2022 "infrun: prepare_to_proceed (step=%d), "
2023 "switched to [%s]\n",
2024 step, target_pid_to_str (inferior_ptid));
2025
2026 /* We return 1 to indicate that there is a breakpoint here,
2027 so we need to step over it before continuing to avoid
2028 hitting it straight away. */
2029 return 1;
2030 }
2031 }
2032
2033 return 0;
2034 }
2035
2036 /* Basic routine for continuing the program in various fashions.
2037
2038 ADDR is the address to resume at, or -1 for resume where stopped.
2039 SIGGNAL is the signal to give it, or 0 for none,
2040 or -1 for act according to how it stopped.
2041 STEP is nonzero if should trap after one instruction.
2042 -1 means return after that and print nothing.
2043 You should probably set various step_... variables
2044 before calling here, if you are stepping.
2045
2046 You should call clear_proceed_status before calling proceed. */
2047
2048 void
2049 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2050 {
2051 struct regcache *regcache;
2052 struct gdbarch *gdbarch;
2053 struct thread_info *tp;
2054 CORE_ADDR pc;
2055 struct address_space *aspace;
2056 int oneproc = 0;
2057
2058 /* If we're stopped at a fork/vfork, follow the branch set by the
2059 "set follow-fork-mode" command; otherwise, we'll just proceed
2060 resuming the current thread. */
2061 if (!follow_fork ())
2062 {
2063 /* The target for some reason decided not to resume. */
2064 normal_stop ();
2065 if (target_can_async_p ())
2066 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2067 return;
2068 }
2069
2070 /* We'll update this if & when we switch to a new thread. */
2071 previous_inferior_ptid = inferior_ptid;
2072
2073 regcache = get_current_regcache ();
2074 gdbarch = get_regcache_arch (regcache);
2075 aspace = get_regcache_aspace (regcache);
2076 pc = regcache_read_pc (regcache);
2077
2078 if (step > 0)
2079 step_start_function = find_pc_function (pc);
2080 if (step < 0)
2081 stop_after_trap = 1;
2082
2083 if (addr == (CORE_ADDR) -1)
2084 {
2085 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2086 && execution_direction != EXEC_REVERSE)
2087 /* There is a breakpoint at the address we will resume at,
2088 step one instruction before inserting breakpoints so that
2089 we do not stop right away (and report a second hit at this
2090 breakpoint).
2091
2092 Note, we don't do this in reverse, because we won't
2093 actually be executing the breakpoint insn anyway.
2094 We'll be (un-)executing the previous instruction. */
2095
2096 oneproc = 1;
2097 else if (gdbarch_single_step_through_delay_p (gdbarch)
2098 && gdbarch_single_step_through_delay (gdbarch,
2099 get_current_frame ()))
2100 /* We stepped onto an instruction that needs to be stepped
2101 again before re-inserting the breakpoint, do so. */
2102 oneproc = 1;
2103 }
2104 else
2105 {
2106 regcache_write_pc (regcache, addr);
2107 }
2108
2109 if (debug_infrun)
2110 fprintf_unfiltered (gdb_stdlog,
2111 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2112 paddress (gdbarch, addr), siggnal, step);
2113
2114 if (non_stop)
2115 /* In non-stop, each thread is handled individually. The context
2116 must already be set to the right thread here. */
2117 ;
2118 else
2119 {
2120 /* In a multi-threaded task we may select another thread and
2121 then continue or step.
2122
2123 But if the old thread was stopped at a breakpoint, it will
2124 immediately cause another breakpoint stop without any
2125 execution (i.e. it will report a breakpoint hit incorrectly).
2126 So we must step over it first.
2127
2128 prepare_to_proceed checks the current thread against the
2129 thread that reported the most recent event. If a step-over
2130 is required it returns TRUE and sets the current thread to
2131 the old thread. */
2132 if (prepare_to_proceed (step))
2133 oneproc = 1;
2134 }
2135
2136 /* prepare_to_proceed may change the current thread. */
2137 tp = inferior_thread ();
2138
2139 if (oneproc)
2140 {
2141 tp->control.trap_expected = 1;
2142 /* If displaced stepping is enabled, we can step over the
2143 breakpoint without hitting it, so leave all breakpoints
2144 inserted. Otherwise we need to disable all breakpoints, step
2145 one instruction, and then re-add them when that step is
2146 finished. */
2147 if (!use_displaced_stepping (gdbarch))
2148 remove_breakpoints ();
2149 }
2150
2151 /* We can insert breakpoints if we're not trying to step over one,
2152 or if we are stepping over one but we're using displaced stepping
2153 to do so. */
2154 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2155 insert_breakpoints ();
2156
2157 if (!non_stop)
2158 {
2159 /* Pass the last stop signal to the thread we're resuming,
2160 irrespective of whether the current thread is the thread that
2161 got the last event or not. This was historically GDB's
2162 behaviour before keeping a stop_signal per thread. */
2163
2164 struct thread_info *last_thread;
2165 ptid_t last_ptid;
2166 struct target_waitstatus last_status;
2167
2168 get_last_target_status (&last_ptid, &last_status);
2169 if (!ptid_equal (inferior_ptid, last_ptid)
2170 && !ptid_equal (last_ptid, null_ptid)
2171 && !ptid_equal (last_ptid, minus_one_ptid))
2172 {
2173 last_thread = find_thread_ptid (last_ptid);
2174 if (last_thread)
2175 {
2176 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2177 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2178 }
2179 }
2180 }
2181
2182 if (siggnal != TARGET_SIGNAL_DEFAULT)
2183 tp->suspend.stop_signal = siggnal;
2184 /* If this signal should not be seen by program,
2185 give it zero. Used for debugging signals. */
2186 else if (!signal_program[tp->suspend.stop_signal])
2187 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2188
2189 annotate_starting ();
2190
2191 /* Make sure that output from GDB appears before output from the
2192 inferior. */
2193 gdb_flush (gdb_stdout);
2194
2195 /* Refresh prev_pc value just prior to resuming. This used to be
2196 done in stop_stepping, however, setting prev_pc there did not handle
2197 scenarios such as inferior function calls or returning from
2198 a function via the return command. In those cases, the prev_pc
2199 value was not set properly for subsequent commands. The prev_pc value
2200 is used to initialize the starting line number in the ecs. With an
2201 invalid value, the gdb next command ends up stopping at the position
2202 represented by the next line table entry past our start position.
2203 On platforms that generate one line table entry per line, this
2204 is not a problem. However, on the ia64, the compiler generates
2205 extraneous line table entries that do not increase the line number.
2206 When we issue the gdb next command on the ia64 after an inferior call
2207 or a return command, we often end up a few instructions forward, still
2208 within the original line we started.
2209
2210 An attempt was made to refresh the prev_pc at the same time the
2211 execution_control_state is initialized (for instance, just before
2212 waiting for an inferior event). But this approach did not work
2213 because of platforms that use ptrace, where the pc register cannot
2214 be read unless the inferior is stopped. At that point, we are not
2215 guaranteed the inferior is stopped and so the regcache_read_pc() call
2216 can fail. Setting the prev_pc value here ensures the value is updated
2217 correctly when the inferior is stopped. */
2218 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2219
2220 /* Fill in with reasonable starting values. */
2221 init_thread_stepping_state (tp);
2222
2223 /* Reset to normal state. */
2224 init_infwait_state ();
2225
2226 /* Resume inferior. */
2227 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2228
2229 /* Wait for it to stop (if not standalone)
2230 and in any case decode why it stopped, and act accordingly. */
2231 /* Do this only if we are not using the event loop, or if the target
2232 does not support asynchronous execution. */
2233 if (!target_can_async_p ())
2234 {
2235 wait_for_inferior ();
2236 normal_stop ();
2237 }
2238 }
2239 \f
2240
2241 /* Start remote-debugging of a machine over a serial link. */
2242
2243 void
2244 start_remote (int from_tty)
2245 {
2246 struct inferior *inferior;
2247
2248 init_wait_for_inferior ();
2249 inferior = current_inferior ();
2250 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2251
2252 /* Always go on waiting for the target, regardless of the mode. */
2253 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2254 indicate to wait_for_inferior that a target should timeout if
2255 nothing is returned (instead of just blocking). Because of this,
2256 targets expecting an immediate response need to, internally, set
2257 things up so that the target_wait() is forced to eventually
2258 timeout. */
2259 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2260 differentiate to its caller what the state of the target is after
2261 the initial open has been performed. Here we're assuming that
2262 the target has stopped. It should be possible to eventually have
2263 target_open() return to the caller an indication that the target
2264 is currently running and GDB state should be set to the same as
2265 for an async run. */
2266 wait_for_inferior ();
2267
2268 /* Now that the inferior has stopped, do any bookkeeping like
2269 loading shared libraries. We want to do this before normal_stop,
2270 so that the displayed frame is up to date. */
2271 post_create_inferior (&current_target, from_tty);
2272
2273 normal_stop ();
2274 }
2275
2276 /* Initialize static vars when a new inferior begins. */
2277
2278 void
2279 init_wait_for_inferior (void)
2280 {
2281 /* These are meaningless until the first time through wait_for_inferior. */
2282
2283 breakpoint_init_inferior (inf_starting);
2284
2285 clear_proceed_status ();
2286
2287 stepping_past_singlestep_breakpoint = 0;
2288 deferred_step_ptid = null_ptid;
2289
2290 target_last_wait_ptid = minus_one_ptid;
2291
2292 previous_inferior_ptid = inferior_ptid;
2293 init_infwait_state ();
2294
2295 /* Discard any skipped inlined frames. */
2296 clear_inline_frame_state (minus_one_ptid);
2297 }
2298
2299 \f
2300 /* This enum encodes possible reasons for doing a target_wait, so that
2301 wfi can call target_wait in one place. (Ultimately the call will be
2302 moved out of the infinite loop entirely.) */
2303
2304 enum infwait_states
2305 {
2306 infwait_normal_state,
2307 infwait_thread_hop_state,
2308 infwait_step_watch_state,
2309 infwait_nonstep_watch_state
2310 };
2311
2312 /* The PTID we'll do a target_wait on.*/
2313 ptid_t waiton_ptid;
2314
2315 /* Current inferior wait state. */
2316 enum infwait_states infwait_state;
2317
2318 /* Data to be passed around while handling an event. This data is
2319 discarded between events. */
2320 struct execution_control_state
2321 {
2322 ptid_t ptid;
2323 /* The thread that got the event, if this was a thread event; NULL
2324 otherwise. */
2325 struct thread_info *event_thread;
2326
2327 struct target_waitstatus ws;
2328 int random_signal;
2329 CORE_ADDR stop_func_start;
2330 CORE_ADDR stop_func_end;
2331 char *stop_func_name;
2332 int new_thread_event;
2333 int wait_some_more;
2334 };
2335
2336 static void handle_inferior_event (struct execution_control_state *ecs);
2337
2338 static void handle_step_into_function (struct gdbarch *gdbarch,
2339 struct execution_control_state *ecs);
2340 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2341 struct execution_control_state *ecs);
2342 static void check_exception_resume (struct execution_control_state *,
2343 struct frame_info *, struct symbol *);
2344
2345 static void stop_stepping (struct execution_control_state *ecs);
2346 static void prepare_to_wait (struct execution_control_state *ecs);
2347 static void keep_going (struct execution_control_state *ecs);
2348
2349 /* Callback for iterate over threads. If the thread is stopped, but
2350 the user/frontend doesn't know about that yet, go through
2351 normal_stop, as if the thread had just stopped now. ARG points at
2352 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2353 ptid_is_pid(PTID) is true, applies to all threads of the process
2354 pointed at by PTID. Otherwise, apply only to the thread pointed by
2355 PTID. */
2356
2357 static int
2358 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2359 {
2360 ptid_t ptid = * (ptid_t *) arg;
2361
2362 if ((ptid_equal (info->ptid, ptid)
2363 || ptid_equal (minus_one_ptid, ptid)
2364 || (ptid_is_pid (ptid)
2365 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2366 && is_running (info->ptid)
2367 && !is_executing (info->ptid))
2368 {
2369 struct cleanup *old_chain;
2370 struct execution_control_state ecss;
2371 struct execution_control_state *ecs = &ecss;
2372
2373 memset (ecs, 0, sizeof (*ecs));
2374
2375 old_chain = make_cleanup_restore_current_thread ();
2376
2377 switch_to_thread (info->ptid);
2378
2379 /* Go through handle_inferior_event/normal_stop, so we always
2380 have consistent output as if the stop event had been
2381 reported. */
2382 ecs->ptid = info->ptid;
2383 ecs->event_thread = find_thread_ptid (info->ptid);
2384 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2385 ecs->ws.value.sig = TARGET_SIGNAL_0;
2386
2387 handle_inferior_event (ecs);
2388
2389 if (!ecs->wait_some_more)
2390 {
2391 struct thread_info *tp;
2392
2393 normal_stop ();
2394
2395 /* Finish off the continuations. The continations
2396 themselves are responsible for realising the thread
2397 didn't finish what it was supposed to do. */
2398 tp = inferior_thread ();
2399 do_all_intermediate_continuations_thread (tp);
2400 do_all_continuations_thread (tp);
2401 }
2402
2403 do_cleanups (old_chain);
2404 }
2405
2406 return 0;
2407 }
2408
2409 /* This function is attached as a "thread_stop_requested" observer.
2410 Cleanup local state that assumed the PTID was to be resumed, and
2411 report the stop to the frontend. */
2412
2413 static void
2414 infrun_thread_stop_requested (ptid_t ptid)
2415 {
2416 struct displaced_step_inferior_state *displaced;
2417
2418 /* PTID was requested to stop. Remove it from the displaced
2419 stepping queue, so we don't try to resume it automatically. */
2420
2421 for (displaced = displaced_step_inferior_states;
2422 displaced;
2423 displaced = displaced->next)
2424 {
2425 struct displaced_step_request *it, **prev_next_p;
2426
2427 it = displaced->step_request_queue;
2428 prev_next_p = &displaced->step_request_queue;
2429 while (it)
2430 {
2431 if (ptid_match (it->ptid, ptid))
2432 {
2433 *prev_next_p = it->next;
2434 it->next = NULL;
2435 xfree (it);
2436 }
2437 else
2438 {
2439 prev_next_p = &it->next;
2440 }
2441
2442 it = *prev_next_p;
2443 }
2444 }
2445
2446 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2447 }
2448
2449 static void
2450 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2451 {
2452 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2453 nullify_last_target_wait_ptid ();
2454 }
2455
2456 /* Callback for iterate_over_threads. */
2457
2458 static int
2459 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2460 {
2461 if (is_exited (info->ptid))
2462 return 0;
2463
2464 delete_step_resume_breakpoint (info);
2465 delete_exception_resume_breakpoint (info);
2466 return 0;
2467 }
2468
2469 /* In all-stop, delete the step resume breakpoint of any thread that
2470 had one. In non-stop, delete the step resume breakpoint of the
2471 thread that just stopped. */
2472
2473 static void
2474 delete_step_thread_step_resume_breakpoint (void)
2475 {
2476 if (!target_has_execution
2477 || ptid_equal (inferior_ptid, null_ptid))
2478 /* If the inferior has exited, we have already deleted the step
2479 resume breakpoints out of GDB's lists. */
2480 return;
2481
2482 if (non_stop)
2483 {
2484 /* If in non-stop mode, only delete the step-resume or
2485 longjmp-resume breakpoint of the thread that just stopped
2486 stepping. */
2487 struct thread_info *tp = inferior_thread ();
2488
2489 delete_step_resume_breakpoint (tp);
2490 delete_exception_resume_breakpoint (tp);
2491 }
2492 else
2493 /* In all-stop mode, delete all step-resume and longjmp-resume
2494 breakpoints of any thread that had them. */
2495 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2496 }
2497
2498 /* A cleanup wrapper. */
2499
2500 static void
2501 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2502 {
2503 delete_step_thread_step_resume_breakpoint ();
2504 }
2505
2506 /* Pretty print the results of target_wait, for debugging purposes. */
2507
2508 static void
2509 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2510 const struct target_waitstatus *ws)
2511 {
2512 char *status_string = target_waitstatus_to_string (ws);
2513 struct ui_file *tmp_stream = mem_fileopen ();
2514 char *text;
2515
2516 /* The text is split over several lines because it was getting too long.
2517 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2518 output as a unit; we want only one timestamp printed if debug_timestamp
2519 is set. */
2520
2521 fprintf_unfiltered (tmp_stream,
2522 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2523 if (PIDGET (waiton_ptid) != -1)
2524 fprintf_unfiltered (tmp_stream,
2525 " [%s]", target_pid_to_str (waiton_ptid));
2526 fprintf_unfiltered (tmp_stream, ", status) =\n");
2527 fprintf_unfiltered (tmp_stream,
2528 "infrun: %d [%s],\n",
2529 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2530 fprintf_unfiltered (tmp_stream,
2531 "infrun: %s\n",
2532 status_string);
2533
2534 text = ui_file_xstrdup (tmp_stream, NULL);
2535
2536 /* This uses %s in part to handle %'s in the text, but also to avoid
2537 a gcc error: the format attribute requires a string literal. */
2538 fprintf_unfiltered (gdb_stdlog, "%s", text);
2539
2540 xfree (status_string);
2541 xfree (text);
2542 ui_file_delete (tmp_stream);
2543 }
2544
2545 /* Prepare and stabilize the inferior for detaching it. E.g.,
2546 detaching while a thread is displaced stepping is a recipe for
2547 crashing it, as nothing would readjust the PC out of the scratch
2548 pad. */
2549
2550 void
2551 prepare_for_detach (void)
2552 {
2553 struct inferior *inf = current_inferior ();
2554 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2555 struct cleanup *old_chain_1;
2556 struct displaced_step_inferior_state *displaced;
2557
2558 displaced = get_displaced_stepping_state (inf->pid);
2559
2560 /* Is any thread of this process displaced stepping? If not,
2561 there's nothing else to do. */
2562 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2563 return;
2564
2565 if (debug_infrun)
2566 fprintf_unfiltered (gdb_stdlog,
2567 "displaced-stepping in-process while detaching");
2568
2569 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2570 inf->detaching = 1;
2571
2572 while (!ptid_equal (displaced->step_ptid, null_ptid))
2573 {
2574 struct cleanup *old_chain_2;
2575 struct execution_control_state ecss;
2576 struct execution_control_state *ecs;
2577
2578 ecs = &ecss;
2579 memset (ecs, 0, sizeof (*ecs));
2580
2581 overlay_cache_invalid = 1;
2582
2583 /* We have to invalidate the registers BEFORE calling
2584 target_wait because they can be loaded from the target while
2585 in target_wait. This makes remote debugging a bit more
2586 efficient for those targets that provide critical registers
2587 as part of their normal status mechanism. */
2588
2589 registers_changed ();
2590
2591 if (deprecated_target_wait_hook)
2592 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2593 else
2594 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2595
2596 if (debug_infrun)
2597 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2598
2599 /* If an error happens while handling the event, propagate GDB's
2600 knowledge of the executing state to the frontend/user running
2601 state. */
2602 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2603 &minus_one_ptid);
2604
2605 /* In non-stop mode, each thread is handled individually.
2606 Switch early, so the global state is set correctly for this
2607 thread. */
2608 if (non_stop
2609 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2610 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2611 context_switch (ecs->ptid);
2612
2613 /* Now figure out what to do with the result of the result. */
2614 handle_inferior_event (ecs);
2615
2616 /* No error, don't finish the state yet. */
2617 discard_cleanups (old_chain_2);
2618
2619 /* Breakpoints and watchpoints are not installed on the target
2620 at this point, and signals are passed directly to the
2621 inferior, so this must mean the process is gone. */
2622 if (!ecs->wait_some_more)
2623 {
2624 discard_cleanups (old_chain_1);
2625 error (_("Program exited while detaching"));
2626 }
2627 }
2628
2629 discard_cleanups (old_chain_1);
2630 }
2631
2632 /* Wait for control to return from inferior to debugger.
2633
2634 If inferior gets a signal, we may decide to start it up again
2635 instead of returning. That is why there is a loop in this function.
2636 When this function actually returns it means the inferior
2637 should be left stopped and GDB should read more commands. */
2638
2639 void
2640 wait_for_inferior (void)
2641 {
2642 struct cleanup *old_cleanups;
2643 struct execution_control_state ecss;
2644 struct execution_control_state *ecs;
2645
2646 if (debug_infrun)
2647 fprintf_unfiltered
2648 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2649
2650 old_cleanups =
2651 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2652
2653 ecs = &ecss;
2654 memset (ecs, 0, sizeof (*ecs));
2655
2656 while (1)
2657 {
2658 struct cleanup *old_chain;
2659
2660 /* We have to invalidate the registers BEFORE calling target_wait
2661 because they can be loaded from the target while in target_wait.
2662 This makes remote debugging a bit more efficient for those
2663 targets that provide critical registers as part of their normal
2664 status mechanism. */
2665
2666 overlay_cache_invalid = 1;
2667 registers_changed ();
2668
2669 if (deprecated_target_wait_hook)
2670 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2671 else
2672 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2673
2674 if (debug_infrun)
2675 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2676
2677 /* If an error happens while handling the event, propagate GDB's
2678 knowledge of the executing state to the frontend/user running
2679 state. */
2680 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2681
2682 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2683 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2684 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2685
2686 /* Now figure out what to do with the result of the result. */
2687 handle_inferior_event (ecs);
2688
2689 /* No error, don't finish the state yet. */
2690 discard_cleanups (old_chain);
2691
2692 if (!ecs->wait_some_more)
2693 break;
2694 }
2695
2696 do_cleanups (old_cleanups);
2697 }
2698
2699 /* Asynchronous version of wait_for_inferior. It is called by the
2700 event loop whenever a change of state is detected on the file
2701 descriptor corresponding to the target. It can be called more than
2702 once to complete a single execution command. In such cases we need
2703 to keep the state in a global variable ECSS. If it is the last time
2704 that this function is called for a single execution command, then
2705 report to the user that the inferior has stopped, and do the
2706 necessary cleanups. */
2707
2708 void
2709 fetch_inferior_event (void *client_data)
2710 {
2711 struct execution_control_state ecss;
2712 struct execution_control_state *ecs = &ecss;
2713 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2714 struct cleanup *ts_old_chain;
2715 int was_sync = sync_execution;
2716
2717 memset (ecs, 0, sizeof (*ecs));
2718
2719 /* We're handling a live event, so make sure we're doing live
2720 debugging. If we're looking at traceframes while the target is
2721 running, we're going to need to get back to that mode after
2722 handling the event. */
2723 if (non_stop)
2724 {
2725 make_cleanup_restore_current_traceframe ();
2726 set_current_traceframe (-1);
2727 }
2728
2729 if (non_stop)
2730 /* In non-stop mode, the user/frontend should not notice a thread
2731 switch due to internal events. Make sure we reverse to the
2732 user selected thread and frame after handling the event and
2733 running any breakpoint commands. */
2734 make_cleanup_restore_current_thread ();
2735
2736 /* We have to invalidate the registers BEFORE calling target_wait
2737 because they can be loaded from the target while in target_wait.
2738 This makes remote debugging a bit more efficient for those
2739 targets that provide critical registers as part of their normal
2740 status mechanism. */
2741
2742 overlay_cache_invalid = 1;
2743 registers_changed ();
2744
2745 make_cleanup_restore_integer (&execution_direction);
2746 execution_direction = target_execution_direction ();
2747
2748 if (deprecated_target_wait_hook)
2749 ecs->ptid =
2750 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2751 else
2752 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2753
2754 if (debug_infrun)
2755 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2756
2757 if (non_stop
2758 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2759 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2760 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2761 /* In non-stop mode, each thread is handled individually. Switch
2762 early, so the global state is set correctly for this
2763 thread. */
2764 context_switch (ecs->ptid);
2765
2766 /* If an error happens while handling the event, propagate GDB's
2767 knowledge of the executing state to the frontend/user running
2768 state. */
2769 if (!non_stop)
2770 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2771 else
2772 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2773
2774 /* Now figure out what to do with the result of the result. */
2775 handle_inferior_event (ecs);
2776
2777 if (!ecs->wait_some_more)
2778 {
2779 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2780
2781 delete_step_thread_step_resume_breakpoint ();
2782
2783 /* We may not find an inferior if this was a process exit. */
2784 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2785 normal_stop ();
2786
2787 if (target_has_execution
2788 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2789 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2790 && ecs->event_thread->step_multi
2791 && ecs->event_thread->control.stop_step)
2792 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2793 else
2794 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2795 }
2796
2797 /* No error, don't finish the thread states yet. */
2798 discard_cleanups (ts_old_chain);
2799
2800 /* Revert thread and frame. */
2801 do_cleanups (old_chain);
2802
2803 /* If the inferior was in sync execution mode, and now isn't,
2804 restore the prompt. */
2805 if (was_sync && !sync_execution)
2806 display_gdb_prompt (0);
2807 }
2808
2809 /* Record the frame and location we're currently stepping through. */
2810 void
2811 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2812 {
2813 struct thread_info *tp = inferior_thread ();
2814
2815 tp->control.step_frame_id = get_frame_id (frame);
2816 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2817
2818 tp->current_symtab = sal.symtab;
2819 tp->current_line = sal.line;
2820 }
2821
2822 /* Clear context switchable stepping state. */
2823
2824 void
2825 init_thread_stepping_state (struct thread_info *tss)
2826 {
2827 tss->stepping_over_breakpoint = 0;
2828 tss->step_after_step_resume_breakpoint = 0;
2829 tss->stepping_through_solib_after_catch = 0;
2830 tss->stepping_through_solib_catchpoints = NULL;
2831 }
2832
2833 /* Return the cached copy of the last pid/waitstatus returned by
2834 target_wait()/deprecated_target_wait_hook(). The data is actually
2835 cached by handle_inferior_event(), which gets called immediately
2836 after target_wait()/deprecated_target_wait_hook(). */
2837
2838 void
2839 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2840 {
2841 *ptidp = target_last_wait_ptid;
2842 *status = target_last_waitstatus;
2843 }
2844
2845 void
2846 nullify_last_target_wait_ptid (void)
2847 {
2848 target_last_wait_ptid = minus_one_ptid;
2849 }
2850
2851 /* Switch thread contexts. */
2852
2853 static void
2854 context_switch (ptid_t ptid)
2855 {
2856 if (debug_infrun)
2857 {
2858 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2859 target_pid_to_str (inferior_ptid));
2860 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2861 target_pid_to_str (ptid));
2862 }
2863
2864 switch_to_thread (ptid);
2865 }
2866
2867 static void
2868 adjust_pc_after_break (struct execution_control_state *ecs)
2869 {
2870 struct regcache *regcache;
2871 struct gdbarch *gdbarch;
2872 struct address_space *aspace;
2873 CORE_ADDR breakpoint_pc;
2874
2875 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2876 we aren't, just return.
2877
2878 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2879 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2880 implemented by software breakpoints should be handled through the normal
2881 breakpoint layer.
2882
2883 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2884 different signals (SIGILL or SIGEMT for instance), but it is less
2885 clear where the PC is pointing afterwards. It may not match
2886 gdbarch_decr_pc_after_break. I don't know any specific target that
2887 generates these signals at breakpoints (the code has been in GDB since at
2888 least 1992) so I can not guess how to handle them here.
2889
2890 In earlier versions of GDB, a target with
2891 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2892 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2893 target with both of these set in GDB history, and it seems unlikely to be
2894 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2895
2896 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2897 return;
2898
2899 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2900 return;
2901
2902 /* In reverse execution, when a breakpoint is hit, the instruction
2903 under it has already been de-executed. The reported PC always
2904 points at the breakpoint address, so adjusting it further would
2905 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2906 architecture:
2907
2908 B1 0x08000000 : INSN1
2909 B2 0x08000001 : INSN2
2910 0x08000002 : INSN3
2911 PC -> 0x08000003 : INSN4
2912
2913 Say you're stopped at 0x08000003 as above. Reverse continuing
2914 from that point should hit B2 as below. Reading the PC when the
2915 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2916 been de-executed already.
2917
2918 B1 0x08000000 : INSN1
2919 B2 PC -> 0x08000001 : INSN2
2920 0x08000002 : INSN3
2921 0x08000003 : INSN4
2922
2923 We can't apply the same logic as for forward execution, because
2924 we would wrongly adjust the PC to 0x08000000, since there's a
2925 breakpoint at PC - 1. We'd then report a hit on B1, although
2926 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2927 behaviour. */
2928 if (execution_direction == EXEC_REVERSE)
2929 return;
2930
2931 /* If this target does not decrement the PC after breakpoints, then
2932 we have nothing to do. */
2933 regcache = get_thread_regcache (ecs->ptid);
2934 gdbarch = get_regcache_arch (regcache);
2935 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2936 return;
2937
2938 aspace = get_regcache_aspace (regcache);
2939
2940 /* Find the location where (if we've hit a breakpoint) the
2941 breakpoint would be. */
2942 breakpoint_pc = regcache_read_pc (regcache)
2943 - gdbarch_decr_pc_after_break (gdbarch);
2944
2945 /* Check whether there actually is a software breakpoint inserted at
2946 that location.
2947
2948 If in non-stop mode, a race condition is possible where we've
2949 removed a breakpoint, but stop events for that breakpoint were
2950 already queued and arrive later. To suppress those spurious
2951 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2952 and retire them after a number of stop events are reported. */
2953 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2954 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2955 {
2956 struct cleanup *old_cleanups = NULL;
2957
2958 if (RECORD_IS_USED)
2959 old_cleanups = record_gdb_operation_disable_set ();
2960
2961 /* When using hardware single-step, a SIGTRAP is reported for both
2962 a completed single-step and a software breakpoint. Need to
2963 differentiate between the two, as the latter needs adjusting
2964 but the former does not.
2965
2966 The SIGTRAP can be due to a completed hardware single-step only if
2967 - we didn't insert software single-step breakpoints
2968 - the thread to be examined is still the current thread
2969 - this thread is currently being stepped
2970
2971 If any of these events did not occur, we must have stopped due
2972 to hitting a software breakpoint, and have to back up to the
2973 breakpoint address.
2974
2975 As a special case, we could have hardware single-stepped a
2976 software breakpoint. In this case (prev_pc == breakpoint_pc),
2977 we also need to back up to the breakpoint address. */
2978
2979 if (singlestep_breakpoints_inserted_p
2980 || !ptid_equal (ecs->ptid, inferior_ptid)
2981 || !currently_stepping (ecs->event_thread)
2982 || ecs->event_thread->prev_pc == breakpoint_pc)
2983 regcache_write_pc (regcache, breakpoint_pc);
2984
2985 if (RECORD_IS_USED)
2986 do_cleanups (old_cleanups);
2987 }
2988 }
2989
2990 void
2991 init_infwait_state (void)
2992 {
2993 waiton_ptid = pid_to_ptid (-1);
2994 infwait_state = infwait_normal_state;
2995 }
2996
2997 void
2998 error_is_running (void)
2999 {
3000 error (_("Cannot execute this command while "
3001 "the selected thread is running."));
3002 }
3003
3004 void
3005 ensure_not_running (void)
3006 {
3007 if (is_running (inferior_ptid))
3008 error_is_running ();
3009 }
3010
3011 static int
3012 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3013 {
3014 for (frame = get_prev_frame (frame);
3015 frame != NULL;
3016 frame = get_prev_frame (frame))
3017 {
3018 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3019 return 1;
3020 if (get_frame_type (frame) != INLINE_FRAME)
3021 break;
3022 }
3023
3024 return 0;
3025 }
3026
3027 /* Auxiliary function that handles syscall entry/return events.
3028 It returns 1 if the inferior should keep going (and GDB
3029 should ignore the event), or 0 if the event deserves to be
3030 processed. */
3031
3032 static int
3033 handle_syscall_event (struct execution_control_state *ecs)
3034 {
3035 struct regcache *regcache;
3036 struct gdbarch *gdbarch;
3037 int syscall_number;
3038
3039 if (!ptid_equal (ecs->ptid, inferior_ptid))
3040 context_switch (ecs->ptid);
3041
3042 regcache = get_thread_regcache (ecs->ptid);
3043 gdbarch = get_regcache_arch (regcache);
3044 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3045 stop_pc = regcache_read_pc (regcache);
3046
3047 target_last_waitstatus.value.syscall_number = syscall_number;
3048
3049 if (catch_syscall_enabled () > 0
3050 && catching_syscall_number (syscall_number) > 0)
3051 {
3052 if (debug_infrun)
3053 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3054 syscall_number);
3055
3056 ecs->event_thread->control.stop_bpstat
3057 = bpstat_stop_status (get_regcache_aspace (regcache),
3058 stop_pc, ecs->ptid);
3059 ecs->random_signal
3060 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3061
3062 if (!ecs->random_signal)
3063 {
3064 /* Catchpoint hit. */
3065 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3066 return 0;
3067 }
3068 }
3069
3070 /* If no catchpoint triggered for this, then keep going. */
3071 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3072 keep_going (ecs);
3073 return 1;
3074 }
3075
3076 /* Given an execution control state that has been freshly filled in
3077 by an event from the inferior, figure out what it means and take
3078 appropriate action. */
3079
3080 static void
3081 handle_inferior_event (struct execution_control_state *ecs)
3082 {
3083 struct frame_info *frame;
3084 struct gdbarch *gdbarch;
3085 int sw_single_step_trap_p = 0;
3086 int stopped_by_watchpoint;
3087 int stepped_after_stopped_by_watchpoint = 0;
3088 struct symtab_and_line stop_pc_sal;
3089 enum stop_kind stop_soon;
3090
3091 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3092 {
3093 /* We had an event in the inferior, but we are not interested in
3094 handling it at this level. The lower layers have already
3095 done what needs to be done, if anything.
3096
3097 One of the possible circumstances for this is when the
3098 inferior produces output for the console. The inferior has
3099 not stopped, and we are ignoring the event. Another possible
3100 circumstance is any event which the lower level knows will be
3101 reported multiple times without an intervening resume. */
3102 if (debug_infrun)
3103 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3104 prepare_to_wait (ecs);
3105 return;
3106 }
3107
3108 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3109 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3110 {
3111 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3112
3113 gdb_assert (inf);
3114 stop_soon = inf->control.stop_soon;
3115 }
3116 else
3117 stop_soon = NO_STOP_QUIETLY;
3118
3119 /* Cache the last pid/waitstatus. */
3120 target_last_wait_ptid = ecs->ptid;
3121 target_last_waitstatus = ecs->ws;
3122
3123 /* Always clear state belonging to the previous time we stopped. */
3124 stop_stack_dummy = STOP_NONE;
3125
3126 /* If it's a new process, add it to the thread database. */
3127
3128 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3129 && !ptid_equal (ecs->ptid, minus_one_ptid)
3130 && !in_thread_list (ecs->ptid));
3131
3132 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3133 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3134 add_thread (ecs->ptid);
3135
3136 ecs->event_thread = find_thread_ptid (ecs->ptid);
3137
3138 /* Dependent on valid ECS->EVENT_THREAD. */
3139 adjust_pc_after_break (ecs);
3140
3141 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3142 reinit_frame_cache ();
3143
3144 breakpoint_retire_moribund ();
3145
3146 /* First, distinguish signals caused by the debugger from signals
3147 that have to do with the program's own actions. Note that
3148 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3149 on the operating system version. Here we detect when a SIGILL or
3150 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3151 something similar for SIGSEGV, since a SIGSEGV will be generated
3152 when we're trying to execute a breakpoint instruction on a
3153 non-executable stack. This happens for call dummy breakpoints
3154 for architectures like SPARC that place call dummies on the
3155 stack. */
3156 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3157 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3158 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3159 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3160 {
3161 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3162
3163 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3164 regcache_read_pc (regcache)))
3165 {
3166 if (debug_infrun)
3167 fprintf_unfiltered (gdb_stdlog,
3168 "infrun: Treating signal as SIGTRAP\n");
3169 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3170 }
3171 }
3172
3173 /* Mark the non-executing threads accordingly. In all-stop, all
3174 threads of all processes are stopped when we get any event
3175 reported. In non-stop mode, only the event thread stops. If
3176 we're handling a process exit in non-stop mode, there's nothing
3177 to do, as threads of the dead process are gone, and threads of
3178 any other process were left running. */
3179 if (!non_stop)
3180 set_executing (minus_one_ptid, 0);
3181 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3182 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3183 set_executing (inferior_ptid, 0);
3184
3185 switch (infwait_state)
3186 {
3187 case infwait_thread_hop_state:
3188 if (debug_infrun)
3189 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3190 break;
3191
3192 case infwait_normal_state:
3193 if (debug_infrun)
3194 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3195 break;
3196
3197 case infwait_step_watch_state:
3198 if (debug_infrun)
3199 fprintf_unfiltered (gdb_stdlog,
3200 "infrun: infwait_step_watch_state\n");
3201
3202 stepped_after_stopped_by_watchpoint = 1;
3203 break;
3204
3205 case infwait_nonstep_watch_state:
3206 if (debug_infrun)
3207 fprintf_unfiltered (gdb_stdlog,
3208 "infrun: infwait_nonstep_watch_state\n");
3209 insert_breakpoints ();
3210
3211 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3212 handle things like signals arriving and other things happening
3213 in combination correctly? */
3214 stepped_after_stopped_by_watchpoint = 1;
3215 break;
3216
3217 default:
3218 internal_error (__FILE__, __LINE__, _("bad switch"));
3219 }
3220
3221 infwait_state = infwait_normal_state;
3222 waiton_ptid = pid_to_ptid (-1);
3223
3224 switch (ecs->ws.kind)
3225 {
3226 case TARGET_WAITKIND_LOADED:
3227 if (debug_infrun)
3228 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3229 /* Ignore gracefully during startup of the inferior, as it might
3230 be the shell which has just loaded some objects, otherwise
3231 add the symbols for the newly loaded objects. Also ignore at
3232 the beginning of an attach or remote session; we will query
3233 the full list of libraries once the connection is
3234 established. */
3235 if (stop_soon == NO_STOP_QUIETLY)
3236 {
3237 /* Check for any newly added shared libraries if we're
3238 supposed to be adding them automatically. Switch
3239 terminal for any messages produced by
3240 breakpoint_re_set. */
3241 target_terminal_ours_for_output ();
3242 /* NOTE: cagney/2003-11-25: Make certain that the target
3243 stack's section table is kept up-to-date. Architectures,
3244 (e.g., PPC64), use the section table to perform
3245 operations such as address => section name and hence
3246 require the table to contain all sections (including
3247 those found in shared libraries). */
3248 #ifdef SOLIB_ADD
3249 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3250 #else
3251 solib_add (NULL, 0, &current_target, auto_solib_add);
3252 #endif
3253 target_terminal_inferior ();
3254
3255 /* If requested, stop when the dynamic linker notifies
3256 gdb of events. This allows the user to get control
3257 and place breakpoints in initializer routines for
3258 dynamically loaded objects (among other things). */
3259 if (stop_on_solib_events)
3260 {
3261 /* Make sure we print "Stopped due to solib-event" in
3262 normal_stop. */
3263 stop_print_frame = 1;
3264
3265 stop_stepping (ecs);
3266 return;
3267 }
3268
3269 /* NOTE drow/2007-05-11: This might be a good place to check
3270 for "catch load". */
3271 }
3272
3273 /* If we are skipping through a shell, or through shared library
3274 loading that we aren't interested in, resume the program. If
3275 we're running the program normally, also resume. But stop if
3276 we're attaching or setting up a remote connection. */
3277 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3278 {
3279 /* Loading of shared libraries might have changed breakpoint
3280 addresses. Make sure new breakpoints are inserted. */
3281 if (stop_soon == NO_STOP_QUIETLY
3282 && !breakpoints_always_inserted_mode ())
3283 insert_breakpoints ();
3284 resume (0, TARGET_SIGNAL_0);
3285 prepare_to_wait (ecs);
3286 return;
3287 }
3288
3289 break;
3290
3291 case TARGET_WAITKIND_SPURIOUS:
3292 if (debug_infrun)
3293 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3294 resume (0, TARGET_SIGNAL_0);
3295 prepare_to_wait (ecs);
3296 return;
3297
3298 case TARGET_WAITKIND_EXITED:
3299 if (debug_infrun)
3300 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3301 inferior_ptid = ecs->ptid;
3302 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3303 set_current_program_space (current_inferior ()->pspace);
3304 handle_vfork_child_exec_or_exit (0);
3305 target_terminal_ours (); /* Must do this before mourn anyway. */
3306 print_exited_reason (ecs->ws.value.integer);
3307
3308 /* Record the exit code in the convenience variable $_exitcode, so
3309 that the user can inspect this again later. */
3310 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3311 (LONGEST) ecs->ws.value.integer);
3312 gdb_flush (gdb_stdout);
3313 target_mourn_inferior ();
3314 singlestep_breakpoints_inserted_p = 0;
3315 cancel_single_step_breakpoints ();
3316 stop_print_frame = 0;
3317 stop_stepping (ecs);
3318 return;
3319
3320 case TARGET_WAITKIND_SIGNALLED:
3321 if (debug_infrun)
3322 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3323 inferior_ptid = ecs->ptid;
3324 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3325 set_current_program_space (current_inferior ()->pspace);
3326 handle_vfork_child_exec_or_exit (0);
3327 stop_print_frame = 0;
3328 target_terminal_ours (); /* Must do this before mourn anyway. */
3329
3330 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3331 reach here unless the inferior is dead. However, for years
3332 target_kill() was called here, which hints that fatal signals aren't
3333 really fatal on some systems. If that's true, then some changes
3334 may be needed. */
3335 target_mourn_inferior ();
3336
3337 print_signal_exited_reason (ecs->ws.value.sig);
3338 singlestep_breakpoints_inserted_p = 0;
3339 cancel_single_step_breakpoints ();
3340 stop_stepping (ecs);
3341 return;
3342
3343 /* The following are the only cases in which we keep going;
3344 the above cases end in a continue or goto. */
3345 case TARGET_WAITKIND_FORKED:
3346 case TARGET_WAITKIND_VFORKED:
3347 if (debug_infrun)
3348 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3349
3350 if (!ptid_equal (ecs->ptid, inferior_ptid))
3351 {
3352 context_switch (ecs->ptid);
3353 reinit_frame_cache ();
3354 }
3355
3356 /* Immediately detach breakpoints from the child before there's
3357 any chance of letting the user delete breakpoints from the
3358 breakpoint lists. If we don't do this early, it's easy to
3359 leave left over traps in the child, vis: "break foo; catch
3360 fork; c; <fork>; del; c; <child calls foo>". We only follow
3361 the fork on the last `continue', and by that time the
3362 breakpoint at "foo" is long gone from the breakpoint table.
3363 If we vforked, then we don't need to unpatch here, since both
3364 parent and child are sharing the same memory pages; we'll
3365 need to unpatch at follow/detach time instead to be certain
3366 that new breakpoints added between catchpoint hit time and
3367 vfork follow are detached. */
3368 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3369 {
3370 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3371
3372 /* This won't actually modify the breakpoint list, but will
3373 physically remove the breakpoints from the child. */
3374 detach_breakpoints (child_pid);
3375 }
3376
3377 if (singlestep_breakpoints_inserted_p)
3378 {
3379 /* Pull the single step breakpoints out of the target. */
3380 remove_single_step_breakpoints ();
3381 singlestep_breakpoints_inserted_p = 0;
3382 }
3383
3384 /* In case the event is caught by a catchpoint, remember that
3385 the event is to be followed at the next resume of the thread,
3386 and not immediately. */
3387 ecs->event_thread->pending_follow = ecs->ws;
3388
3389 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3390
3391 ecs->event_thread->control.stop_bpstat
3392 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3393 stop_pc, ecs->ptid);
3394
3395 /* Note that we're interested in knowing the bpstat actually
3396 causes a stop, not just if it may explain the signal.
3397 Software watchpoints, for example, always appear in the
3398 bpstat. */
3399 ecs->random_signal
3400 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3401
3402 /* If no catchpoint triggered for this, then keep going. */
3403 if (ecs->random_signal)
3404 {
3405 ptid_t parent;
3406 ptid_t child;
3407 int should_resume;
3408 int follow_child
3409 = (follow_fork_mode_string == follow_fork_mode_child);
3410
3411 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3412
3413 should_resume = follow_fork ();
3414
3415 parent = ecs->ptid;
3416 child = ecs->ws.value.related_pid;
3417
3418 /* In non-stop mode, also resume the other branch. */
3419 if (non_stop && !detach_fork)
3420 {
3421 if (follow_child)
3422 switch_to_thread (parent);
3423 else
3424 switch_to_thread (child);
3425
3426 ecs->event_thread = inferior_thread ();
3427 ecs->ptid = inferior_ptid;
3428 keep_going (ecs);
3429 }
3430
3431 if (follow_child)
3432 switch_to_thread (child);
3433 else
3434 switch_to_thread (parent);
3435
3436 ecs->event_thread = inferior_thread ();
3437 ecs->ptid = inferior_ptid;
3438
3439 if (should_resume)
3440 keep_going (ecs);
3441 else
3442 stop_stepping (ecs);
3443 return;
3444 }
3445 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3446 goto process_event_stop_test;
3447
3448 case TARGET_WAITKIND_VFORK_DONE:
3449 /* Done with the shared memory region. Re-insert breakpoints in
3450 the parent, and keep going. */
3451
3452 if (debug_infrun)
3453 fprintf_unfiltered (gdb_stdlog,
3454 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3455
3456 if (!ptid_equal (ecs->ptid, inferior_ptid))
3457 context_switch (ecs->ptid);
3458
3459 current_inferior ()->waiting_for_vfork_done = 0;
3460 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3461 /* This also takes care of reinserting breakpoints in the
3462 previously locked inferior. */
3463 keep_going (ecs);
3464 return;
3465
3466 case TARGET_WAITKIND_EXECD:
3467 if (debug_infrun)
3468 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3469
3470 if (!ptid_equal (ecs->ptid, inferior_ptid))
3471 {
3472 context_switch (ecs->ptid);
3473 reinit_frame_cache ();
3474 }
3475
3476 singlestep_breakpoints_inserted_p = 0;
3477 cancel_single_step_breakpoints ();
3478
3479 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3480
3481 /* Do whatever is necessary to the parent branch of the vfork. */
3482 handle_vfork_child_exec_or_exit (1);
3483
3484 /* This causes the eventpoints and symbol table to be reset.
3485 Must do this now, before trying to determine whether to
3486 stop. */
3487 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3488
3489 ecs->event_thread->control.stop_bpstat
3490 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3491 stop_pc, ecs->ptid);
3492 ecs->random_signal
3493 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3494
3495 /* Note that this may be referenced from inside
3496 bpstat_stop_status above, through inferior_has_execd. */
3497 xfree (ecs->ws.value.execd_pathname);
3498 ecs->ws.value.execd_pathname = NULL;
3499
3500 /* If no catchpoint triggered for this, then keep going. */
3501 if (ecs->random_signal)
3502 {
3503 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3504 keep_going (ecs);
3505 return;
3506 }
3507 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3508 goto process_event_stop_test;
3509
3510 /* Be careful not to try to gather much state about a thread
3511 that's in a syscall. It's frequently a losing proposition. */
3512 case TARGET_WAITKIND_SYSCALL_ENTRY:
3513 if (debug_infrun)
3514 fprintf_unfiltered (gdb_stdlog,
3515 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3516 /* Getting the current syscall number. */
3517 if (handle_syscall_event (ecs) != 0)
3518 return;
3519 goto process_event_stop_test;
3520
3521 /* Before examining the threads further, step this thread to
3522 get it entirely out of the syscall. (We get notice of the
3523 event when the thread is just on the verge of exiting a
3524 syscall. Stepping one instruction seems to get it back
3525 into user code.) */
3526 case TARGET_WAITKIND_SYSCALL_RETURN:
3527 if (debug_infrun)
3528 fprintf_unfiltered (gdb_stdlog,
3529 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3530 if (handle_syscall_event (ecs) != 0)
3531 return;
3532 goto process_event_stop_test;
3533
3534 case TARGET_WAITKIND_STOPPED:
3535 if (debug_infrun)
3536 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3537 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3538 break;
3539
3540 case TARGET_WAITKIND_NO_HISTORY:
3541 /* Reverse execution: target ran out of history info. */
3542 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3543 print_no_history_reason ();
3544 stop_stepping (ecs);
3545 return;
3546 }
3547
3548 if (ecs->new_thread_event)
3549 {
3550 if (non_stop)
3551 /* Non-stop assumes that the target handles adding new threads
3552 to the thread list. */
3553 internal_error (__FILE__, __LINE__,
3554 "targets should add new threads to the thread "
3555 "list themselves in non-stop mode.");
3556
3557 /* We may want to consider not doing a resume here in order to
3558 give the user a chance to play with the new thread. It might
3559 be good to make that a user-settable option. */
3560
3561 /* At this point, all threads are stopped (happens automatically
3562 in either the OS or the native code). Therefore we need to
3563 continue all threads in order to make progress. */
3564
3565 if (!ptid_equal (ecs->ptid, inferior_ptid))
3566 context_switch (ecs->ptid);
3567 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3568 prepare_to_wait (ecs);
3569 return;
3570 }
3571
3572 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3573 {
3574 /* Do we need to clean up the state of a thread that has
3575 completed a displaced single-step? (Doing so usually affects
3576 the PC, so do it here, before we set stop_pc.) */
3577 displaced_step_fixup (ecs->ptid,
3578 ecs->event_thread->suspend.stop_signal);
3579
3580 /* If we either finished a single-step or hit a breakpoint, but
3581 the user wanted this thread to be stopped, pretend we got a
3582 SIG0 (generic unsignaled stop). */
3583
3584 if (ecs->event_thread->stop_requested
3585 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3586 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3587 }
3588
3589 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3590
3591 if (debug_infrun)
3592 {
3593 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3594 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3595 struct cleanup *old_chain = save_inferior_ptid ();
3596
3597 inferior_ptid = ecs->ptid;
3598
3599 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3600 paddress (gdbarch, stop_pc));
3601 if (target_stopped_by_watchpoint ())
3602 {
3603 CORE_ADDR addr;
3604
3605 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3606
3607 if (target_stopped_data_address (&current_target, &addr))
3608 fprintf_unfiltered (gdb_stdlog,
3609 "infrun: stopped data address = %s\n",
3610 paddress (gdbarch, addr));
3611 else
3612 fprintf_unfiltered (gdb_stdlog,
3613 "infrun: (no data address available)\n");
3614 }
3615
3616 do_cleanups (old_chain);
3617 }
3618
3619 if (stepping_past_singlestep_breakpoint)
3620 {
3621 gdb_assert (singlestep_breakpoints_inserted_p);
3622 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3623 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3624
3625 stepping_past_singlestep_breakpoint = 0;
3626
3627 /* We've either finished single-stepping past the single-step
3628 breakpoint, or stopped for some other reason. It would be nice if
3629 we could tell, but we can't reliably. */
3630 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3631 {
3632 if (debug_infrun)
3633 fprintf_unfiltered (gdb_stdlog,
3634 "infrun: stepping_past_"
3635 "singlestep_breakpoint\n");
3636 /* Pull the single step breakpoints out of the target. */
3637 remove_single_step_breakpoints ();
3638 singlestep_breakpoints_inserted_p = 0;
3639
3640 ecs->random_signal = 0;
3641 ecs->event_thread->control.trap_expected = 0;
3642
3643 context_switch (saved_singlestep_ptid);
3644 if (deprecated_context_hook)
3645 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3646
3647 resume (1, TARGET_SIGNAL_0);
3648 prepare_to_wait (ecs);
3649 return;
3650 }
3651 }
3652
3653 if (!ptid_equal (deferred_step_ptid, null_ptid))
3654 {
3655 /* In non-stop mode, there's never a deferred_step_ptid set. */
3656 gdb_assert (!non_stop);
3657
3658 /* If we stopped for some other reason than single-stepping, ignore
3659 the fact that we were supposed to switch back. */
3660 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3661 {
3662 if (debug_infrun)
3663 fprintf_unfiltered (gdb_stdlog,
3664 "infrun: handling deferred step\n");
3665
3666 /* Pull the single step breakpoints out of the target. */
3667 if (singlestep_breakpoints_inserted_p)
3668 {
3669 remove_single_step_breakpoints ();
3670 singlestep_breakpoints_inserted_p = 0;
3671 }
3672
3673 ecs->event_thread->control.trap_expected = 0;
3674
3675 /* Note: We do not call context_switch at this point, as the
3676 context is already set up for stepping the original thread. */
3677 switch_to_thread (deferred_step_ptid);
3678 deferred_step_ptid = null_ptid;
3679 /* Suppress spurious "Switching to ..." message. */
3680 previous_inferior_ptid = inferior_ptid;
3681
3682 resume (1, TARGET_SIGNAL_0);
3683 prepare_to_wait (ecs);
3684 return;
3685 }
3686
3687 deferred_step_ptid = null_ptid;
3688 }
3689
3690 /* See if a thread hit a thread-specific breakpoint that was meant for
3691 another thread. If so, then step that thread past the breakpoint,
3692 and continue it. */
3693
3694 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3695 {
3696 int thread_hop_needed = 0;
3697 struct address_space *aspace =
3698 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3699
3700 /* Check if a regular breakpoint has been hit before checking
3701 for a potential single step breakpoint. Otherwise, GDB will
3702 not see this breakpoint hit when stepping onto breakpoints. */
3703 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3704 {
3705 ecs->random_signal = 0;
3706 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3707 thread_hop_needed = 1;
3708 }
3709 else if (singlestep_breakpoints_inserted_p)
3710 {
3711 /* We have not context switched yet, so this should be true
3712 no matter which thread hit the singlestep breakpoint. */
3713 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3714 if (debug_infrun)
3715 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3716 "trap for %s\n",
3717 target_pid_to_str (ecs->ptid));
3718
3719 ecs->random_signal = 0;
3720 /* The call to in_thread_list is necessary because PTIDs sometimes
3721 change when we go from single-threaded to multi-threaded. If
3722 the singlestep_ptid is still in the list, assume that it is
3723 really different from ecs->ptid. */
3724 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3725 && in_thread_list (singlestep_ptid))
3726 {
3727 /* If the PC of the thread we were trying to single-step
3728 has changed, discard this event (which we were going
3729 to ignore anyway), and pretend we saw that thread
3730 trap. This prevents us continuously moving the
3731 single-step breakpoint forward, one instruction at a
3732 time. If the PC has changed, then the thread we were
3733 trying to single-step has trapped or been signalled,
3734 but the event has not been reported to GDB yet.
3735
3736 There might be some cases where this loses signal
3737 information, if a signal has arrived at exactly the
3738 same time that the PC changed, but this is the best
3739 we can do with the information available. Perhaps we
3740 should arrange to report all events for all threads
3741 when they stop, or to re-poll the remote looking for
3742 this particular thread (i.e. temporarily enable
3743 schedlock). */
3744
3745 CORE_ADDR new_singlestep_pc
3746 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3747
3748 if (new_singlestep_pc != singlestep_pc)
3749 {
3750 enum target_signal stop_signal;
3751
3752 if (debug_infrun)
3753 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3754 " but expected thread advanced also\n");
3755
3756 /* The current context still belongs to
3757 singlestep_ptid. Don't swap here, since that's
3758 the context we want to use. Just fudge our
3759 state and continue. */
3760 stop_signal = ecs->event_thread->suspend.stop_signal;
3761 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3762 ecs->ptid = singlestep_ptid;
3763 ecs->event_thread = find_thread_ptid (ecs->ptid);
3764 ecs->event_thread->suspend.stop_signal = stop_signal;
3765 stop_pc = new_singlestep_pc;
3766 }
3767 else
3768 {
3769 if (debug_infrun)
3770 fprintf_unfiltered (gdb_stdlog,
3771 "infrun: unexpected thread\n");
3772
3773 thread_hop_needed = 1;
3774 stepping_past_singlestep_breakpoint = 1;
3775 saved_singlestep_ptid = singlestep_ptid;
3776 }
3777 }
3778 }
3779
3780 if (thread_hop_needed)
3781 {
3782 struct regcache *thread_regcache;
3783 int remove_status = 0;
3784
3785 if (debug_infrun)
3786 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3787
3788 /* Switch context before touching inferior memory, the
3789 previous thread may have exited. */
3790 if (!ptid_equal (inferior_ptid, ecs->ptid))
3791 context_switch (ecs->ptid);
3792
3793 /* Saw a breakpoint, but it was hit by the wrong thread.
3794 Just continue. */
3795
3796 if (singlestep_breakpoints_inserted_p)
3797 {
3798 /* Pull the single step breakpoints out of the target. */
3799 remove_single_step_breakpoints ();
3800 singlestep_breakpoints_inserted_p = 0;
3801 }
3802
3803 /* If the arch can displace step, don't remove the
3804 breakpoints. */
3805 thread_regcache = get_thread_regcache (ecs->ptid);
3806 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3807 remove_status = remove_breakpoints ();
3808
3809 /* Did we fail to remove breakpoints? If so, try
3810 to set the PC past the bp. (There's at least
3811 one situation in which we can fail to remove
3812 the bp's: On HP-UX's that use ttrace, we can't
3813 change the address space of a vforking child
3814 process until the child exits (well, okay, not
3815 then either :-) or execs. */
3816 if (remove_status != 0)
3817 error (_("Cannot step over breakpoint hit in wrong thread"));
3818 else
3819 { /* Single step */
3820 if (!non_stop)
3821 {
3822 /* Only need to require the next event from this
3823 thread in all-stop mode. */
3824 waiton_ptid = ecs->ptid;
3825 infwait_state = infwait_thread_hop_state;
3826 }
3827
3828 ecs->event_thread->stepping_over_breakpoint = 1;
3829 keep_going (ecs);
3830 return;
3831 }
3832 }
3833 else if (singlestep_breakpoints_inserted_p)
3834 {
3835 sw_single_step_trap_p = 1;
3836 ecs->random_signal = 0;
3837 }
3838 }
3839 else
3840 ecs->random_signal = 1;
3841
3842 /* See if something interesting happened to the non-current thread. If
3843 so, then switch to that thread. */
3844 if (!ptid_equal (ecs->ptid, inferior_ptid))
3845 {
3846 if (debug_infrun)
3847 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3848
3849 context_switch (ecs->ptid);
3850
3851 if (deprecated_context_hook)
3852 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3853 }
3854
3855 /* At this point, get hold of the now-current thread's frame. */
3856 frame = get_current_frame ();
3857 gdbarch = get_frame_arch (frame);
3858
3859 if (singlestep_breakpoints_inserted_p)
3860 {
3861 /* Pull the single step breakpoints out of the target. */
3862 remove_single_step_breakpoints ();
3863 singlestep_breakpoints_inserted_p = 0;
3864 }
3865
3866 if (stepped_after_stopped_by_watchpoint)
3867 stopped_by_watchpoint = 0;
3868 else
3869 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3870
3871 /* If necessary, step over this watchpoint. We'll be back to display
3872 it in a moment. */
3873 if (stopped_by_watchpoint
3874 && (target_have_steppable_watchpoint
3875 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3876 {
3877 /* At this point, we are stopped at an instruction which has
3878 attempted to write to a piece of memory under control of
3879 a watchpoint. The instruction hasn't actually executed
3880 yet. If we were to evaluate the watchpoint expression
3881 now, we would get the old value, and therefore no change
3882 would seem to have occurred.
3883
3884 In order to make watchpoints work `right', we really need
3885 to complete the memory write, and then evaluate the
3886 watchpoint expression. We do this by single-stepping the
3887 target.
3888
3889 It may not be necessary to disable the watchpoint to stop over
3890 it. For example, the PA can (with some kernel cooperation)
3891 single step over a watchpoint without disabling the watchpoint.
3892
3893 It is far more common to need to disable a watchpoint to step
3894 the inferior over it. If we have non-steppable watchpoints,
3895 we must disable the current watchpoint; it's simplest to
3896 disable all watchpoints and breakpoints. */
3897 int hw_step = 1;
3898
3899 if (!target_have_steppable_watchpoint)
3900 {
3901 remove_breakpoints ();
3902 /* See comment in resume why we need to stop bypassing signals
3903 while breakpoints have been removed. */
3904 target_pass_signals (0, NULL);
3905 }
3906 /* Single step */
3907 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3908 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3909 waiton_ptid = ecs->ptid;
3910 if (target_have_steppable_watchpoint)
3911 infwait_state = infwait_step_watch_state;
3912 else
3913 infwait_state = infwait_nonstep_watch_state;
3914 prepare_to_wait (ecs);
3915 return;
3916 }
3917
3918 ecs->stop_func_start = 0;
3919 ecs->stop_func_end = 0;
3920 ecs->stop_func_name = 0;
3921 /* Don't care about return value; stop_func_start and stop_func_name
3922 will both be 0 if it doesn't work. */
3923 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3924 &ecs->stop_func_start, &ecs->stop_func_end);
3925 ecs->stop_func_start
3926 += gdbarch_deprecated_function_start_offset (gdbarch);
3927 ecs->event_thread->stepping_over_breakpoint = 0;
3928 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
3929 ecs->event_thread->control.stop_step = 0;
3930 stop_print_frame = 1;
3931 ecs->random_signal = 0;
3932 stopped_by_random_signal = 0;
3933
3934 /* Hide inlined functions starting here, unless we just performed stepi or
3935 nexti. After stepi and nexti, always show the innermost frame (not any
3936 inline function call sites). */
3937 if (ecs->event_thread->control.step_range_end != 1)
3938 skip_inline_frames (ecs->ptid);
3939
3940 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3941 && ecs->event_thread->control.trap_expected
3942 && gdbarch_single_step_through_delay_p (gdbarch)
3943 && currently_stepping (ecs->event_thread))
3944 {
3945 /* We're trying to step off a breakpoint. Turns out that we're
3946 also on an instruction that needs to be stepped multiple
3947 times before it's been fully executing. E.g., architectures
3948 with a delay slot. It needs to be stepped twice, once for
3949 the instruction and once for the delay slot. */
3950 int step_through_delay
3951 = gdbarch_single_step_through_delay (gdbarch, frame);
3952
3953 if (debug_infrun && step_through_delay)
3954 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3955 if (ecs->event_thread->control.step_range_end == 0
3956 && step_through_delay)
3957 {
3958 /* The user issued a continue when stopped at a breakpoint.
3959 Set up for another trap and get out of here. */
3960 ecs->event_thread->stepping_over_breakpoint = 1;
3961 keep_going (ecs);
3962 return;
3963 }
3964 else if (step_through_delay)
3965 {
3966 /* The user issued a step when stopped at a breakpoint.
3967 Maybe we should stop, maybe we should not - the delay
3968 slot *might* correspond to a line of source. In any
3969 case, don't decide that here, just set
3970 ecs->stepping_over_breakpoint, making sure we
3971 single-step again before breakpoints are re-inserted. */
3972 ecs->event_thread->stepping_over_breakpoint = 1;
3973 }
3974 }
3975
3976 /* Look at the cause of the stop, and decide what to do.
3977 The alternatives are:
3978 1) stop_stepping and return; to really stop and return to the debugger,
3979 2) keep_going and return to start up again
3980 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3981 3) set ecs->random_signal to 1, and the decision between 1 and 2
3982 will be made according to the signal handling tables. */
3983
3984 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3985 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3986 || stop_soon == STOP_QUIETLY_REMOTE)
3987 {
3988 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3989 && stop_after_trap)
3990 {
3991 if (debug_infrun)
3992 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3993 stop_print_frame = 0;
3994 stop_stepping (ecs);
3995 return;
3996 }
3997
3998 /* This is originated from start_remote(), start_inferior() and
3999 shared libraries hook functions. */
4000 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4001 {
4002 if (debug_infrun)
4003 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4004 stop_stepping (ecs);
4005 return;
4006 }
4007
4008 /* This originates from attach_command(). We need to overwrite
4009 the stop_signal here, because some kernels don't ignore a
4010 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4011 See more comments in inferior.h. On the other hand, if we
4012 get a non-SIGSTOP, report it to the user - assume the backend
4013 will handle the SIGSTOP if it should show up later.
4014
4015 Also consider that the attach is complete when we see a
4016 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4017 target extended-remote report it instead of a SIGSTOP
4018 (e.g. gdbserver). We already rely on SIGTRAP being our
4019 signal, so this is no exception.
4020
4021 Also consider that the attach is complete when we see a
4022 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4023 the target to stop all threads of the inferior, in case the
4024 low level attach operation doesn't stop them implicitly. If
4025 they weren't stopped implicitly, then the stub will report a
4026 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4027 other than GDB's request. */
4028 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4029 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4030 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4031 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4032 {
4033 stop_stepping (ecs);
4034 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4035 return;
4036 }
4037
4038 /* See if there is a breakpoint at the current PC. */
4039 ecs->event_thread->control.stop_bpstat
4040 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4041 stop_pc, ecs->ptid);
4042
4043 /* Following in case break condition called a
4044 function. */
4045 stop_print_frame = 1;
4046
4047 /* This is where we handle "moribund" watchpoints. Unlike
4048 software breakpoints traps, hardware watchpoint traps are
4049 always distinguishable from random traps. If no high-level
4050 watchpoint is associated with the reported stop data address
4051 anymore, then the bpstat does not explain the signal ---
4052 simply make sure to ignore it if `stopped_by_watchpoint' is
4053 set. */
4054
4055 if (debug_infrun
4056 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4057 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4058 && stopped_by_watchpoint)
4059 fprintf_unfiltered (gdb_stdlog,
4060 "infrun: no user watchpoint explains "
4061 "watchpoint SIGTRAP, ignoring\n");
4062
4063 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4064 at one stage in the past included checks for an inferior
4065 function call's call dummy's return breakpoint. The original
4066 comment, that went with the test, read:
4067
4068 ``End of a stack dummy. Some systems (e.g. Sony news) give
4069 another signal besides SIGTRAP, so check here as well as
4070 above.''
4071
4072 If someone ever tries to get call dummys on a
4073 non-executable stack to work (where the target would stop
4074 with something like a SIGSEGV), then those tests might need
4075 to be re-instated. Given, however, that the tests were only
4076 enabled when momentary breakpoints were not being used, I
4077 suspect that it won't be the case.
4078
4079 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4080 be necessary for call dummies on a non-executable stack on
4081 SPARC. */
4082
4083 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4084 ecs->random_signal
4085 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4086 || stopped_by_watchpoint
4087 || ecs->event_thread->control.trap_expected
4088 || (ecs->event_thread->control.step_range_end
4089 && (ecs->event_thread->control.step_resume_breakpoint
4090 == NULL)));
4091 else
4092 {
4093 ecs->random_signal = !bpstat_explains_signal
4094 (ecs->event_thread->control.stop_bpstat);
4095 if (!ecs->random_signal)
4096 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4097 }
4098 }
4099
4100 /* When we reach this point, we've pretty much decided
4101 that the reason for stopping must've been a random
4102 (unexpected) signal. */
4103
4104 else
4105 ecs->random_signal = 1;
4106
4107 process_event_stop_test:
4108
4109 /* Re-fetch current thread's frame in case we did a
4110 "goto process_event_stop_test" above. */
4111 frame = get_current_frame ();
4112 gdbarch = get_frame_arch (frame);
4113
4114 /* For the program's own signals, act according to
4115 the signal handling tables. */
4116
4117 if (ecs->random_signal)
4118 {
4119 /* Signal not for debugging purposes. */
4120 int printed = 0;
4121 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4122
4123 if (debug_infrun)
4124 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4125 ecs->event_thread->suspend.stop_signal);
4126
4127 stopped_by_random_signal = 1;
4128
4129 if (signal_print[ecs->event_thread->suspend.stop_signal])
4130 {
4131 printed = 1;
4132 target_terminal_ours_for_output ();
4133 print_signal_received_reason
4134 (ecs->event_thread->suspend.stop_signal);
4135 }
4136 /* Always stop on signals if we're either just gaining control
4137 of the program, or the user explicitly requested this thread
4138 to remain stopped. */
4139 if (stop_soon != NO_STOP_QUIETLY
4140 || ecs->event_thread->stop_requested
4141 || (!inf->detaching
4142 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4143 {
4144 stop_stepping (ecs);
4145 return;
4146 }
4147 /* If not going to stop, give terminal back
4148 if we took it away. */
4149 else if (printed)
4150 target_terminal_inferior ();
4151
4152 /* Clear the signal if it should not be passed. */
4153 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4154 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4155
4156 if (ecs->event_thread->prev_pc == stop_pc
4157 && ecs->event_thread->control.trap_expected
4158 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4159 {
4160 /* We were just starting a new sequence, attempting to
4161 single-step off of a breakpoint and expecting a SIGTRAP.
4162 Instead this signal arrives. This signal will take us out
4163 of the stepping range so GDB needs to remember to, when
4164 the signal handler returns, resume stepping off that
4165 breakpoint. */
4166 /* To simplify things, "continue" is forced to use the same
4167 code paths as single-step - set a breakpoint at the
4168 signal return address and then, once hit, step off that
4169 breakpoint. */
4170 if (debug_infrun)
4171 fprintf_unfiltered (gdb_stdlog,
4172 "infrun: signal arrived while stepping over "
4173 "breakpoint\n");
4174
4175 insert_hp_step_resume_breakpoint_at_frame (frame);
4176 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4177 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4178 ecs->event_thread->control.trap_expected = 0;
4179 keep_going (ecs);
4180 return;
4181 }
4182
4183 if (ecs->event_thread->control.step_range_end != 0
4184 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4185 && (ecs->event_thread->control.step_range_start <= stop_pc
4186 && stop_pc < ecs->event_thread->control.step_range_end)
4187 && frame_id_eq (get_stack_frame_id (frame),
4188 ecs->event_thread->control.step_stack_frame_id)
4189 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4190 {
4191 /* The inferior is about to take a signal that will take it
4192 out of the single step range. Set a breakpoint at the
4193 current PC (which is presumably where the signal handler
4194 will eventually return) and then allow the inferior to
4195 run free.
4196
4197 Note that this is only needed for a signal delivered
4198 while in the single-step range. Nested signals aren't a
4199 problem as they eventually all return. */
4200 if (debug_infrun)
4201 fprintf_unfiltered (gdb_stdlog,
4202 "infrun: signal may take us out of "
4203 "single-step range\n");
4204
4205 insert_hp_step_resume_breakpoint_at_frame (frame);
4206 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4207 ecs->event_thread->control.trap_expected = 0;
4208 keep_going (ecs);
4209 return;
4210 }
4211
4212 /* Note: step_resume_breakpoint may be non-NULL. This occures
4213 when either there's a nested signal, or when there's a
4214 pending signal enabled just as the signal handler returns
4215 (leaving the inferior at the step-resume-breakpoint without
4216 actually executing it). Either way continue until the
4217 breakpoint is really hit. */
4218 keep_going (ecs);
4219 return;
4220 }
4221
4222 /* Handle cases caused by hitting a breakpoint. */
4223 {
4224 CORE_ADDR jmp_buf_pc;
4225 struct bpstat_what what;
4226
4227 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4228
4229 if (what.call_dummy)
4230 {
4231 stop_stack_dummy = what.call_dummy;
4232 }
4233
4234 /* If we hit an internal event that triggers symbol changes, the
4235 current frame will be invalidated within bpstat_what (e.g., if
4236 we hit an internal solib event). Re-fetch it. */
4237 frame = get_current_frame ();
4238 gdbarch = get_frame_arch (frame);
4239
4240 switch (what.main_action)
4241 {
4242 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4243 /* If we hit the breakpoint at longjmp while stepping, we
4244 install a momentary breakpoint at the target of the
4245 jmp_buf. */
4246
4247 if (debug_infrun)
4248 fprintf_unfiltered (gdb_stdlog,
4249 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4250
4251 ecs->event_thread->stepping_over_breakpoint = 1;
4252
4253 if (what.is_longjmp)
4254 {
4255 if (!gdbarch_get_longjmp_target_p (gdbarch)
4256 || !gdbarch_get_longjmp_target (gdbarch,
4257 frame, &jmp_buf_pc))
4258 {
4259 if (debug_infrun)
4260 fprintf_unfiltered (gdb_stdlog,
4261 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4262 "(!gdbarch_get_longjmp_target)\n");
4263 keep_going (ecs);
4264 return;
4265 }
4266
4267 /* We're going to replace the current step-resume breakpoint
4268 with a longjmp-resume breakpoint. */
4269 delete_step_resume_breakpoint (ecs->event_thread);
4270
4271 /* Insert a breakpoint at resume address. */
4272 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4273 }
4274 else
4275 {
4276 struct symbol *func = get_frame_function (frame);
4277
4278 if (func)
4279 check_exception_resume (ecs, frame, func);
4280 }
4281 keep_going (ecs);
4282 return;
4283
4284 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4285 if (debug_infrun)
4286 fprintf_unfiltered (gdb_stdlog,
4287 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4288
4289 if (what.is_longjmp)
4290 {
4291 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4292 != NULL);
4293 delete_step_resume_breakpoint (ecs->event_thread);
4294 }
4295 else
4296 {
4297 /* There are several cases to consider.
4298
4299 1. The initiating frame no longer exists. In this case
4300 we must stop, because the exception has gone too far.
4301
4302 2. The initiating frame exists, and is the same as the
4303 current frame. We stop, because the exception has been
4304 caught.
4305
4306 3. The initiating frame exists and is different from
4307 the current frame. This means the exception has been
4308 caught beneath the initiating frame, so keep going. */
4309 struct frame_info *init_frame
4310 = frame_find_by_id (ecs->event_thread->initiating_frame);
4311
4312 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4313 != NULL);
4314 delete_exception_resume_breakpoint (ecs->event_thread);
4315
4316 if (init_frame)
4317 {
4318 struct frame_id current_id
4319 = get_frame_id (get_current_frame ());
4320 if (frame_id_eq (current_id,
4321 ecs->event_thread->initiating_frame))
4322 {
4323 /* Case 2. Fall through. */
4324 }
4325 else
4326 {
4327 /* Case 3. */
4328 keep_going (ecs);
4329 return;
4330 }
4331 }
4332
4333 /* For Cases 1 and 2, remove the step-resume breakpoint,
4334 if it exists. */
4335 delete_step_resume_breakpoint (ecs->event_thread);
4336 }
4337
4338 ecs->event_thread->control.stop_step = 1;
4339 print_end_stepping_range_reason ();
4340 stop_stepping (ecs);
4341 return;
4342
4343 case BPSTAT_WHAT_SINGLE:
4344 if (debug_infrun)
4345 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4346 ecs->event_thread->stepping_over_breakpoint = 1;
4347 /* Still need to check other stuff, at least the case
4348 where we are stepping and step out of the right range. */
4349 break;
4350
4351 case BPSTAT_WHAT_STEP_RESUME:
4352 if (debug_infrun)
4353 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4354
4355 delete_step_resume_breakpoint (ecs->event_thread);
4356 if (ecs->event_thread->control.proceed_to_finish
4357 && execution_direction == EXEC_REVERSE)
4358 {
4359 struct thread_info *tp = ecs->event_thread;
4360
4361 /* We are finishing a function in reverse, and just hit
4362 the step-resume breakpoint at the start address of the
4363 function, and we're almost there -- just need to back
4364 up by one more single-step, which should take us back
4365 to the function call. */
4366 tp->control.step_range_start = tp->control.step_range_end = 1;
4367 keep_going (ecs);
4368 return;
4369 }
4370 if (stop_pc == ecs->stop_func_start
4371 && execution_direction == EXEC_REVERSE)
4372 {
4373 /* We are stepping over a function call in reverse, and
4374 just hit the step-resume breakpoint at the start
4375 address of the function. Go back to single-stepping,
4376 which should take us back to the function call. */
4377 ecs->event_thread->stepping_over_breakpoint = 1;
4378 keep_going (ecs);
4379 return;
4380 }
4381 break;
4382
4383 case BPSTAT_WHAT_STOP_NOISY:
4384 if (debug_infrun)
4385 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4386 stop_print_frame = 1;
4387
4388 /* We are about to nuke the step_resume_breakpointt via the
4389 cleanup chain, so no need to worry about it here. */
4390
4391 stop_stepping (ecs);
4392 return;
4393
4394 case BPSTAT_WHAT_STOP_SILENT:
4395 if (debug_infrun)
4396 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4397 stop_print_frame = 0;
4398
4399 /* We are about to nuke the step_resume_breakpoin via the
4400 cleanup chain, so no need to worry about it here. */
4401
4402 stop_stepping (ecs);
4403 return;
4404
4405 case BPSTAT_WHAT_HP_STEP_RESUME:
4406 if (debug_infrun)
4407 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4408
4409 delete_step_resume_breakpoint (ecs->event_thread);
4410 if (ecs->event_thread->step_after_step_resume_breakpoint)
4411 {
4412 /* Back when the step-resume breakpoint was inserted, we
4413 were trying to single-step off a breakpoint. Go back
4414 to doing that. */
4415 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4416 ecs->event_thread->stepping_over_breakpoint = 1;
4417 keep_going (ecs);
4418 return;
4419 }
4420 break;
4421
4422 case BPSTAT_WHAT_KEEP_CHECKING:
4423 break;
4424 }
4425 }
4426
4427 /* We come here if we hit a breakpoint but should not
4428 stop for it. Possibly we also were stepping
4429 and should stop for that. So fall through and
4430 test for stepping. But, if not stepping,
4431 do not stop. */
4432
4433 /* In all-stop mode, if we're currently stepping but have stopped in
4434 some other thread, we need to switch back to the stepped thread. */
4435 if (!non_stop)
4436 {
4437 struct thread_info *tp;
4438
4439 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4440 ecs->event_thread);
4441 if (tp)
4442 {
4443 /* However, if the current thread is blocked on some internal
4444 breakpoint, and we simply need to step over that breakpoint
4445 to get it going again, do that first. */
4446 if ((ecs->event_thread->control.trap_expected
4447 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4448 || ecs->event_thread->stepping_over_breakpoint)
4449 {
4450 keep_going (ecs);
4451 return;
4452 }
4453
4454 /* If the stepping thread exited, then don't try to switch
4455 back and resume it, which could fail in several different
4456 ways depending on the target. Instead, just keep going.
4457
4458 We can find a stepping dead thread in the thread list in
4459 two cases:
4460
4461 - The target supports thread exit events, and when the
4462 target tries to delete the thread from the thread list,
4463 inferior_ptid pointed at the exiting thread. In such
4464 case, calling delete_thread does not really remove the
4465 thread from the list; instead, the thread is left listed,
4466 with 'exited' state.
4467
4468 - The target's debug interface does not support thread
4469 exit events, and so we have no idea whatsoever if the
4470 previously stepping thread is still alive. For that
4471 reason, we need to synchronously query the target
4472 now. */
4473 if (is_exited (tp->ptid)
4474 || !target_thread_alive (tp->ptid))
4475 {
4476 if (debug_infrun)
4477 fprintf_unfiltered (gdb_stdlog,
4478 "infrun: not switching back to "
4479 "stepped thread, it has vanished\n");
4480
4481 delete_thread (tp->ptid);
4482 keep_going (ecs);
4483 return;
4484 }
4485
4486 /* Otherwise, we no longer expect a trap in the current thread.
4487 Clear the trap_expected flag before switching back -- this is
4488 what keep_going would do as well, if we called it. */
4489 ecs->event_thread->control.trap_expected = 0;
4490
4491 if (debug_infrun)
4492 fprintf_unfiltered (gdb_stdlog,
4493 "infrun: switching back to stepped thread\n");
4494
4495 ecs->event_thread = tp;
4496 ecs->ptid = tp->ptid;
4497 context_switch (ecs->ptid);
4498 keep_going (ecs);
4499 return;
4500 }
4501 }
4502
4503 /* Are we stepping to get the inferior out of the dynamic linker's
4504 hook (and possibly the dld itself) after catching a shlib
4505 event? */
4506 if (ecs->event_thread->stepping_through_solib_after_catch)
4507 {
4508 #if defined(SOLIB_ADD)
4509 /* Have we reached our destination? If not, keep going. */
4510 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4511 {
4512 if (debug_infrun)
4513 fprintf_unfiltered (gdb_stdlog,
4514 "infrun: stepping in dynamic linker\n");
4515 ecs->event_thread->stepping_over_breakpoint = 1;
4516 keep_going (ecs);
4517 return;
4518 }
4519 #endif
4520 if (debug_infrun)
4521 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4522 /* Else, stop and report the catchpoint(s) whose triggering
4523 caused us to begin stepping. */
4524 ecs->event_thread->stepping_through_solib_after_catch = 0;
4525 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4526 ecs->event_thread->control.stop_bpstat
4527 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4528 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4529 stop_print_frame = 1;
4530 stop_stepping (ecs);
4531 return;
4532 }
4533
4534 if (ecs->event_thread->control.step_resume_breakpoint)
4535 {
4536 if (debug_infrun)
4537 fprintf_unfiltered (gdb_stdlog,
4538 "infrun: step-resume breakpoint is inserted\n");
4539
4540 /* Having a step-resume breakpoint overrides anything
4541 else having to do with stepping commands until
4542 that breakpoint is reached. */
4543 keep_going (ecs);
4544 return;
4545 }
4546
4547 if (ecs->event_thread->control.step_range_end == 0)
4548 {
4549 if (debug_infrun)
4550 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4551 /* Likewise if we aren't even stepping. */
4552 keep_going (ecs);
4553 return;
4554 }
4555
4556 /* Re-fetch current thread's frame in case the code above caused
4557 the frame cache to be re-initialized, making our FRAME variable
4558 a dangling pointer. */
4559 frame = get_current_frame ();
4560 gdbarch = get_frame_arch (frame);
4561
4562 /* If stepping through a line, keep going if still within it.
4563
4564 Note that step_range_end is the address of the first instruction
4565 beyond the step range, and NOT the address of the last instruction
4566 within it!
4567
4568 Note also that during reverse execution, we may be stepping
4569 through a function epilogue and therefore must detect when
4570 the current-frame changes in the middle of a line. */
4571
4572 if (stop_pc >= ecs->event_thread->control.step_range_start
4573 && stop_pc < ecs->event_thread->control.step_range_end
4574 && (execution_direction != EXEC_REVERSE
4575 || frame_id_eq (get_frame_id (frame),
4576 ecs->event_thread->control.step_frame_id)))
4577 {
4578 if (debug_infrun)
4579 fprintf_unfiltered
4580 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4581 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4582 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4583
4584 /* When stepping backward, stop at beginning of line range
4585 (unless it's the function entry point, in which case
4586 keep going back to the call point). */
4587 if (stop_pc == ecs->event_thread->control.step_range_start
4588 && stop_pc != ecs->stop_func_start
4589 && execution_direction == EXEC_REVERSE)
4590 {
4591 ecs->event_thread->control.stop_step = 1;
4592 print_end_stepping_range_reason ();
4593 stop_stepping (ecs);
4594 }
4595 else
4596 keep_going (ecs);
4597
4598 return;
4599 }
4600
4601 /* We stepped out of the stepping range. */
4602
4603 /* If we are stepping at the source level and entered the runtime
4604 loader dynamic symbol resolution code...
4605
4606 EXEC_FORWARD: we keep on single stepping until we exit the run
4607 time loader code and reach the callee's address.
4608
4609 EXEC_REVERSE: we've already executed the callee (backward), and
4610 the runtime loader code is handled just like any other
4611 undebuggable function call. Now we need only keep stepping
4612 backward through the trampoline code, and that's handled further
4613 down, so there is nothing for us to do here. */
4614
4615 if (execution_direction != EXEC_REVERSE
4616 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4617 && in_solib_dynsym_resolve_code (stop_pc))
4618 {
4619 CORE_ADDR pc_after_resolver =
4620 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4621
4622 if (debug_infrun)
4623 fprintf_unfiltered (gdb_stdlog,
4624 "infrun: stepped into dynsym resolve code\n");
4625
4626 if (pc_after_resolver)
4627 {
4628 /* Set up a step-resume breakpoint at the address
4629 indicated by SKIP_SOLIB_RESOLVER. */
4630 struct symtab_and_line sr_sal;
4631
4632 init_sal (&sr_sal);
4633 sr_sal.pc = pc_after_resolver;
4634 sr_sal.pspace = get_frame_program_space (frame);
4635
4636 insert_step_resume_breakpoint_at_sal (gdbarch,
4637 sr_sal, null_frame_id);
4638 }
4639
4640 keep_going (ecs);
4641 return;
4642 }
4643
4644 if (ecs->event_thread->control.step_range_end != 1
4645 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4646 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4647 && get_frame_type (frame) == SIGTRAMP_FRAME)
4648 {
4649 if (debug_infrun)
4650 fprintf_unfiltered (gdb_stdlog,
4651 "infrun: stepped into signal trampoline\n");
4652 /* The inferior, while doing a "step" or "next", has ended up in
4653 a signal trampoline (either by a signal being delivered or by
4654 the signal handler returning). Just single-step until the
4655 inferior leaves the trampoline (either by calling the handler
4656 or returning). */
4657 keep_going (ecs);
4658 return;
4659 }
4660
4661 /* Check for subroutine calls. The check for the current frame
4662 equalling the step ID is not necessary - the check of the
4663 previous frame's ID is sufficient - but it is a common case and
4664 cheaper than checking the previous frame's ID.
4665
4666 NOTE: frame_id_eq will never report two invalid frame IDs as
4667 being equal, so to get into this block, both the current and
4668 previous frame must have valid frame IDs. */
4669 /* The outer_frame_id check is a heuristic to detect stepping
4670 through startup code. If we step over an instruction which
4671 sets the stack pointer from an invalid value to a valid value,
4672 we may detect that as a subroutine call from the mythical
4673 "outermost" function. This could be fixed by marking
4674 outermost frames as !stack_p,code_p,special_p. Then the
4675 initial outermost frame, before sp was valid, would
4676 have code_addr == &_start. See the comment in frame_id_eq
4677 for more. */
4678 if (!frame_id_eq (get_stack_frame_id (frame),
4679 ecs->event_thread->control.step_stack_frame_id)
4680 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4681 ecs->event_thread->control.step_stack_frame_id)
4682 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4683 outer_frame_id)
4684 || step_start_function != find_pc_function (stop_pc))))
4685 {
4686 CORE_ADDR real_stop_pc;
4687
4688 if (debug_infrun)
4689 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4690
4691 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4692 || ((ecs->event_thread->control.step_range_end == 1)
4693 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4694 ecs->stop_func_start)))
4695 {
4696 /* I presume that step_over_calls is only 0 when we're
4697 supposed to be stepping at the assembly language level
4698 ("stepi"). Just stop. */
4699 /* Also, maybe we just did a "nexti" inside a prolog, so we
4700 thought it was a subroutine call but it was not. Stop as
4701 well. FENN */
4702 /* And this works the same backward as frontward. MVS */
4703 ecs->event_thread->control.stop_step = 1;
4704 print_end_stepping_range_reason ();
4705 stop_stepping (ecs);
4706 return;
4707 }
4708
4709 /* Reverse stepping through solib trampolines. */
4710
4711 if (execution_direction == EXEC_REVERSE
4712 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4713 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4714 || (ecs->stop_func_start == 0
4715 && in_solib_dynsym_resolve_code (stop_pc))))
4716 {
4717 /* Any solib trampoline code can be handled in reverse
4718 by simply continuing to single-step. We have already
4719 executed the solib function (backwards), and a few
4720 steps will take us back through the trampoline to the
4721 caller. */
4722 keep_going (ecs);
4723 return;
4724 }
4725
4726 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4727 {
4728 /* We're doing a "next".
4729
4730 Normal (forward) execution: set a breakpoint at the
4731 callee's return address (the address at which the caller
4732 will resume).
4733
4734 Reverse (backward) execution. set the step-resume
4735 breakpoint at the start of the function that we just
4736 stepped into (backwards), and continue to there. When we
4737 get there, we'll need to single-step back to the caller. */
4738
4739 if (execution_direction == EXEC_REVERSE)
4740 {
4741 struct symtab_and_line sr_sal;
4742
4743 /* Normal function call return (static or dynamic). */
4744 init_sal (&sr_sal);
4745 sr_sal.pc = ecs->stop_func_start;
4746 sr_sal.pspace = get_frame_program_space (frame);
4747 insert_step_resume_breakpoint_at_sal (gdbarch,
4748 sr_sal, null_frame_id);
4749 }
4750 else
4751 insert_step_resume_breakpoint_at_caller (frame);
4752
4753 keep_going (ecs);
4754 return;
4755 }
4756
4757 /* If we are in a function call trampoline (a stub between the
4758 calling routine and the real function), locate the real
4759 function. That's what tells us (a) whether we want to step
4760 into it at all, and (b) what prologue we want to run to the
4761 end of, if we do step into it. */
4762 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4763 if (real_stop_pc == 0)
4764 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4765 if (real_stop_pc != 0)
4766 ecs->stop_func_start = real_stop_pc;
4767
4768 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4769 {
4770 struct symtab_and_line sr_sal;
4771
4772 init_sal (&sr_sal);
4773 sr_sal.pc = ecs->stop_func_start;
4774 sr_sal.pspace = get_frame_program_space (frame);
4775
4776 insert_step_resume_breakpoint_at_sal (gdbarch,
4777 sr_sal, null_frame_id);
4778 keep_going (ecs);
4779 return;
4780 }
4781
4782 /* If we have line number information for the function we are
4783 thinking of stepping into, step into it.
4784
4785 If there are several symtabs at that PC (e.g. with include
4786 files), just want to know whether *any* of them have line
4787 numbers. find_pc_line handles this. */
4788 {
4789 struct symtab_and_line tmp_sal;
4790
4791 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4792 if (tmp_sal.line != 0)
4793 {
4794 if (execution_direction == EXEC_REVERSE)
4795 handle_step_into_function_backward (gdbarch, ecs);
4796 else
4797 handle_step_into_function (gdbarch, ecs);
4798 return;
4799 }
4800 }
4801
4802 /* If we have no line number and the step-stop-if-no-debug is
4803 set, we stop the step so that the user has a chance to switch
4804 in assembly mode. */
4805 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4806 && step_stop_if_no_debug)
4807 {
4808 ecs->event_thread->control.stop_step = 1;
4809 print_end_stepping_range_reason ();
4810 stop_stepping (ecs);
4811 return;
4812 }
4813
4814 if (execution_direction == EXEC_REVERSE)
4815 {
4816 /* Set a breakpoint at callee's start address.
4817 From there we can step once and be back in the caller. */
4818 struct symtab_and_line sr_sal;
4819
4820 init_sal (&sr_sal);
4821 sr_sal.pc = ecs->stop_func_start;
4822 sr_sal.pspace = get_frame_program_space (frame);
4823 insert_step_resume_breakpoint_at_sal (gdbarch,
4824 sr_sal, null_frame_id);
4825 }
4826 else
4827 /* Set a breakpoint at callee's return address (the address
4828 at which the caller will resume). */
4829 insert_step_resume_breakpoint_at_caller (frame);
4830
4831 keep_going (ecs);
4832 return;
4833 }
4834
4835 /* Reverse stepping through solib trampolines. */
4836
4837 if (execution_direction == EXEC_REVERSE
4838 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4839 {
4840 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4841 || (ecs->stop_func_start == 0
4842 && in_solib_dynsym_resolve_code (stop_pc)))
4843 {
4844 /* Any solib trampoline code can be handled in reverse
4845 by simply continuing to single-step. We have already
4846 executed the solib function (backwards), and a few
4847 steps will take us back through the trampoline to the
4848 caller. */
4849 keep_going (ecs);
4850 return;
4851 }
4852 else if (in_solib_dynsym_resolve_code (stop_pc))
4853 {
4854 /* Stepped backward into the solib dynsym resolver.
4855 Set a breakpoint at its start and continue, then
4856 one more step will take us out. */
4857 struct symtab_and_line sr_sal;
4858
4859 init_sal (&sr_sal);
4860 sr_sal.pc = ecs->stop_func_start;
4861 sr_sal.pspace = get_frame_program_space (frame);
4862 insert_step_resume_breakpoint_at_sal (gdbarch,
4863 sr_sal, null_frame_id);
4864 keep_going (ecs);
4865 return;
4866 }
4867 }
4868
4869 /* If we're in the return path from a shared library trampoline,
4870 we want to proceed through the trampoline when stepping. */
4871 if (gdbarch_in_solib_return_trampoline (gdbarch,
4872 stop_pc, ecs->stop_func_name))
4873 {
4874 /* Determine where this trampoline returns. */
4875 CORE_ADDR real_stop_pc;
4876
4877 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4878
4879 if (debug_infrun)
4880 fprintf_unfiltered (gdb_stdlog,
4881 "infrun: stepped into solib return tramp\n");
4882
4883 /* Only proceed through if we know where it's going. */
4884 if (real_stop_pc)
4885 {
4886 /* And put the step-breakpoint there and go until there. */
4887 struct symtab_and_line sr_sal;
4888
4889 init_sal (&sr_sal); /* initialize to zeroes */
4890 sr_sal.pc = real_stop_pc;
4891 sr_sal.section = find_pc_overlay (sr_sal.pc);
4892 sr_sal.pspace = get_frame_program_space (frame);
4893
4894 /* Do not specify what the fp should be when we stop since
4895 on some machines the prologue is where the new fp value
4896 is established. */
4897 insert_step_resume_breakpoint_at_sal (gdbarch,
4898 sr_sal, null_frame_id);
4899
4900 /* Restart without fiddling with the step ranges or
4901 other state. */
4902 keep_going (ecs);
4903 return;
4904 }
4905 }
4906
4907 stop_pc_sal = find_pc_line (stop_pc, 0);
4908
4909 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4910 the trampoline processing logic, however, there are some trampolines
4911 that have no names, so we should do trampoline handling first. */
4912 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4913 && ecs->stop_func_name == NULL
4914 && stop_pc_sal.line == 0)
4915 {
4916 if (debug_infrun)
4917 fprintf_unfiltered (gdb_stdlog,
4918 "infrun: stepped into undebuggable function\n");
4919
4920 /* The inferior just stepped into, or returned to, an
4921 undebuggable function (where there is no debugging information
4922 and no line number corresponding to the address where the
4923 inferior stopped). Since we want to skip this kind of code,
4924 we keep going until the inferior returns from this
4925 function - unless the user has asked us not to (via
4926 set step-mode) or we no longer know how to get back
4927 to the call site. */
4928 if (step_stop_if_no_debug
4929 || !frame_id_p (frame_unwind_caller_id (frame)))
4930 {
4931 /* If we have no line number and the step-stop-if-no-debug
4932 is set, we stop the step so that the user has a chance to
4933 switch in assembly mode. */
4934 ecs->event_thread->control.stop_step = 1;
4935 print_end_stepping_range_reason ();
4936 stop_stepping (ecs);
4937 return;
4938 }
4939 else
4940 {
4941 /* Set a breakpoint at callee's return address (the address
4942 at which the caller will resume). */
4943 insert_step_resume_breakpoint_at_caller (frame);
4944 keep_going (ecs);
4945 return;
4946 }
4947 }
4948
4949 if (ecs->event_thread->control.step_range_end == 1)
4950 {
4951 /* It is stepi or nexti. We always want to stop stepping after
4952 one instruction. */
4953 if (debug_infrun)
4954 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4955 ecs->event_thread->control.stop_step = 1;
4956 print_end_stepping_range_reason ();
4957 stop_stepping (ecs);
4958 return;
4959 }
4960
4961 if (stop_pc_sal.line == 0)
4962 {
4963 /* We have no line number information. That means to stop
4964 stepping (does this always happen right after one instruction,
4965 when we do "s" in a function with no line numbers,
4966 or can this happen as a result of a return or longjmp?). */
4967 if (debug_infrun)
4968 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4969 ecs->event_thread->control.stop_step = 1;
4970 print_end_stepping_range_reason ();
4971 stop_stepping (ecs);
4972 return;
4973 }
4974
4975 /* Look for "calls" to inlined functions, part one. If the inline
4976 frame machinery detected some skipped call sites, we have entered
4977 a new inline function. */
4978
4979 if (frame_id_eq (get_frame_id (get_current_frame ()),
4980 ecs->event_thread->control.step_frame_id)
4981 && inline_skipped_frames (ecs->ptid))
4982 {
4983 struct symtab_and_line call_sal;
4984
4985 if (debug_infrun)
4986 fprintf_unfiltered (gdb_stdlog,
4987 "infrun: stepped into inlined function\n");
4988
4989 find_frame_sal (get_current_frame (), &call_sal);
4990
4991 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
4992 {
4993 /* For "step", we're going to stop. But if the call site
4994 for this inlined function is on the same source line as
4995 we were previously stepping, go down into the function
4996 first. Otherwise stop at the call site. */
4997
4998 if (call_sal.line == ecs->event_thread->current_line
4999 && call_sal.symtab == ecs->event_thread->current_symtab)
5000 step_into_inline_frame (ecs->ptid);
5001
5002 ecs->event_thread->control.stop_step = 1;
5003 print_end_stepping_range_reason ();
5004 stop_stepping (ecs);
5005 return;
5006 }
5007 else
5008 {
5009 /* For "next", we should stop at the call site if it is on a
5010 different source line. Otherwise continue through the
5011 inlined function. */
5012 if (call_sal.line == ecs->event_thread->current_line
5013 && call_sal.symtab == ecs->event_thread->current_symtab)
5014 keep_going (ecs);
5015 else
5016 {
5017 ecs->event_thread->control.stop_step = 1;
5018 print_end_stepping_range_reason ();
5019 stop_stepping (ecs);
5020 }
5021 return;
5022 }
5023 }
5024
5025 /* Look for "calls" to inlined functions, part two. If we are still
5026 in the same real function we were stepping through, but we have
5027 to go further up to find the exact frame ID, we are stepping
5028 through a more inlined call beyond its call site. */
5029
5030 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5031 && !frame_id_eq (get_frame_id (get_current_frame ()),
5032 ecs->event_thread->control.step_frame_id)
5033 && stepped_in_from (get_current_frame (),
5034 ecs->event_thread->control.step_frame_id))
5035 {
5036 if (debug_infrun)
5037 fprintf_unfiltered (gdb_stdlog,
5038 "infrun: stepping through inlined function\n");
5039
5040 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5041 keep_going (ecs);
5042 else
5043 {
5044 ecs->event_thread->control.stop_step = 1;
5045 print_end_stepping_range_reason ();
5046 stop_stepping (ecs);
5047 }
5048 return;
5049 }
5050
5051 if ((stop_pc == stop_pc_sal.pc)
5052 && (ecs->event_thread->current_line != stop_pc_sal.line
5053 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5054 {
5055 /* We are at the start of a different line. So stop. Note that
5056 we don't stop if we step into the middle of a different line.
5057 That is said to make things like for (;;) statements work
5058 better. */
5059 if (debug_infrun)
5060 fprintf_unfiltered (gdb_stdlog,
5061 "infrun: stepped to a different line\n");
5062 ecs->event_thread->control.stop_step = 1;
5063 print_end_stepping_range_reason ();
5064 stop_stepping (ecs);
5065 return;
5066 }
5067
5068 /* We aren't done stepping.
5069
5070 Optimize by setting the stepping range to the line.
5071 (We might not be in the original line, but if we entered a
5072 new line in mid-statement, we continue stepping. This makes
5073 things like for(;;) statements work better.) */
5074
5075 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5076 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5077 set_step_info (frame, stop_pc_sal);
5078
5079 if (debug_infrun)
5080 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5081 keep_going (ecs);
5082 }
5083
5084 /* Is thread TP in the middle of single-stepping? */
5085
5086 static int
5087 currently_stepping (struct thread_info *tp)
5088 {
5089 return ((tp->control.step_range_end
5090 && tp->control.step_resume_breakpoint == NULL)
5091 || tp->control.trap_expected
5092 || tp->stepping_through_solib_after_catch
5093 || bpstat_should_step ());
5094 }
5095
5096 /* Returns true if any thread *but* the one passed in "data" is in the
5097 middle of stepping or of handling a "next". */
5098
5099 static int
5100 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5101 {
5102 if (tp == data)
5103 return 0;
5104
5105 return (tp->control.step_range_end
5106 || tp->control.trap_expected
5107 || tp->stepping_through_solib_after_catch);
5108 }
5109
5110 /* Inferior has stepped into a subroutine call with source code that
5111 we should not step over. Do step to the first line of code in
5112 it. */
5113
5114 static void
5115 handle_step_into_function (struct gdbarch *gdbarch,
5116 struct execution_control_state *ecs)
5117 {
5118 struct symtab *s;
5119 struct symtab_and_line stop_func_sal, sr_sal;
5120
5121 s = find_pc_symtab (stop_pc);
5122 if (s && s->language != language_asm)
5123 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5124 ecs->stop_func_start);
5125
5126 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5127 /* Use the step_resume_break to step until the end of the prologue,
5128 even if that involves jumps (as it seems to on the vax under
5129 4.2). */
5130 /* If the prologue ends in the middle of a source line, continue to
5131 the end of that source line (if it is still within the function).
5132 Otherwise, just go to end of prologue. */
5133 if (stop_func_sal.end
5134 && stop_func_sal.pc != ecs->stop_func_start
5135 && stop_func_sal.end < ecs->stop_func_end)
5136 ecs->stop_func_start = stop_func_sal.end;
5137
5138 /* Architectures which require breakpoint adjustment might not be able
5139 to place a breakpoint at the computed address. If so, the test
5140 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5141 ecs->stop_func_start to an address at which a breakpoint may be
5142 legitimately placed.
5143
5144 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5145 made, GDB will enter an infinite loop when stepping through
5146 optimized code consisting of VLIW instructions which contain
5147 subinstructions corresponding to different source lines. On
5148 FR-V, it's not permitted to place a breakpoint on any but the
5149 first subinstruction of a VLIW instruction. When a breakpoint is
5150 set, GDB will adjust the breakpoint address to the beginning of
5151 the VLIW instruction. Thus, we need to make the corresponding
5152 adjustment here when computing the stop address. */
5153
5154 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5155 {
5156 ecs->stop_func_start
5157 = gdbarch_adjust_breakpoint_address (gdbarch,
5158 ecs->stop_func_start);
5159 }
5160
5161 if (ecs->stop_func_start == stop_pc)
5162 {
5163 /* We are already there: stop now. */
5164 ecs->event_thread->control.stop_step = 1;
5165 print_end_stepping_range_reason ();
5166 stop_stepping (ecs);
5167 return;
5168 }
5169 else
5170 {
5171 /* Put the step-breakpoint there and go until there. */
5172 init_sal (&sr_sal); /* initialize to zeroes */
5173 sr_sal.pc = ecs->stop_func_start;
5174 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5175 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5176
5177 /* Do not specify what the fp should be when we stop since on
5178 some machines the prologue is where the new fp value is
5179 established. */
5180 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5181
5182 /* And make sure stepping stops right away then. */
5183 ecs->event_thread->control.step_range_end
5184 = ecs->event_thread->control.step_range_start;
5185 }
5186 keep_going (ecs);
5187 }
5188
5189 /* Inferior has stepped backward into a subroutine call with source
5190 code that we should not step over. Do step to the beginning of the
5191 last line of code in it. */
5192
5193 static void
5194 handle_step_into_function_backward (struct gdbarch *gdbarch,
5195 struct execution_control_state *ecs)
5196 {
5197 struct symtab *s;
5198 struct symtab_and_line stop_func_sal;
5199
5200 s = find_pc_symtab (stop_pc);
5201 if (s && s->language != language_asm)
5202 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5203 ecs->stop_func_start);
5204
5205 stop_func_sal = find_pc_line (stop_pc, 0);
5206
5207 /* OK, we're just going to keep stepping here. */
5208 if (stop_func_sal.pc == stop_pc)
5209 {
5210 /* We're there already. Just stop stepping now. */
5211 ecs->event_thread->control.stop_step = 1;
5212 print_end_stepping_range_reason ();
5213 stop_stepping (ecs);
5214 }
5215 else
5216 {
5217 /* Else just reset the step range and keep going.
5218 No step-resume breakpoint, they don't work for
5219 epilogues, which can have multiple entry paths. */
5220 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5221 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5222 keep_going (ecs);
5223 }
5224 return;
5225 }
5226
5227 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5228 This is used to both functions and to skip over code. */
5229
5230 static void
5231 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5232 struct symtab_and_line sr_sal,
5233 struct frame_id sr_id,
5234 enum bptype sr_type)
5235 {
5236 /* There should never be more than one step-resume or longjmp-resume
5237 breakpoint per thread, so we should never be setting a new
5238 step_resume_breakpoint when one is already active. */
5239 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5240 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5241
5242 if (debug_infrun)
5243 fprintf_unfiltered (gdb_stdlog,
5244 "infrun: inserting step-resume breakpoint at %s\n",
5245 paddress (gdbarch, sr_sal.pc));
5246
5247 inferior_thread ()->control.step_resume_breakpoint
5248 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5249 }
5250
5251 void
5252 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5253 struct symtab_and_line sr_sal,
5254 struct frame_id sr_id)
5255 {
5256 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5257 sr_sal, sr_id,
5258 bp_step_resume);
5259 }
5260
5261 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5262 This is used to skip a potential signal handler.
5263
5264 This is called with the interrupted function's frame. The signal
5265 handler, when it returns, will resume the interrupted function at
5266 RETURN_FRAME.pc. */
5267
5268 static void
5269 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5270 {
5271 struct symtab_and_line sr_sal;
5272 struct gdbarch *gdbarch;
5273
5274 gdb_assert (return_frame != NULL);
5275 init_sal (&sr_sal); /* initialize to zeros */
5276
5277 gdbarch = get_frame_arch (return_frame);
5278 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5279 sr_sal.section = find_pc_overlay (sr_sal.pc);
5280 sr_sal.pspace = get_frame_program_space (return_frame);
5281
5282 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5283 get_stack_frame_id (return_frame),
5284 bp_hp_step_resume);
5285 }
5286
5287 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5288 is used to skip a function after stepping into it (for "next" or if
5289 the called function has no debugging information).
5290
5291 The current function has almost always been reached by single
5292 stepping a call or return instruction. NEXT_FRAME belongs to the
5293 current function, and the breakpoint will be set at the caller's
5294 resume address.
5295
5296 This is a separate function rather than reusing
5297 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5298 get_prev_frame, which may stop prematurely (see the implementation
5299 of frame_unwind_caller_id for an example). */
5300
5301 static void
5302 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5303 {
5304 struct symtab_and_line sr_sal;
5305 struct gdbarch *gdbarch;
5306
5307 /* We shouldn't have gotten here if we don't know where the call site
5308 is. */
5309 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5310
5311 init_sal (&sr_sal); /* initialize to zeros */
5312
5313 gdbarch = frame_unwind_caller_arch (next_frame);
5314 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5315 frame_unwind_caller_pc (next_frame));
5316 sr_sal.section = find_pc_overlay (sr_sal.pc);
5317 sr_sal.pspace = frame_unwind_program_space (next_frame);
5318
5319 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5320 frame_unwind_caller_id (next_frame));
5321 }
5322
5323 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5324 new breakpoint at the target of a jmp_buf. The handling of
5325 longjmp-resume uses the same mechanisms used for handling
5326 "step-resume" breakpoints. */
5327
5328 static void
5329 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5330 {
5331 /* There should never be more than one step-resume or longjmp-resume
5332 breakpoint per thread, so we should never be setting a new
5333 longjmp_resume_breakpoint when one is already active. */
5334 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5335
5336 if (debug_infrun)
5337 fprintf_unfiltered (gdb_stdlog,
5338 "infrun: inserting longjmp-resume breakpoint at %s\n",
5339 paddress (gdbarch, pc));
5340
5341 inferior_thread ()->control.step_resume_breakpoint =
5342 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5343 }
5344
5345 /* Insert an exception resume breakpoint. TP is the thread throwing
5346 the exception. The block B is the block of the unwinder debug hook
5347 function. FRAME is the frame corresponding to the call to this
5348 function. SYM is the symbol of the function argument holding the
5349 target PC of the exception. */
5350
5351 static void
5352 insert_exception_resume_breakpoint (struct thread_info *tp,
5353 struct block *b,
5354 struct frame_info *frame,
5355 struct symbol *sym)
5356 {
5357 struct gdb_exception e;
5358
5359 /* We want to ignore errors here. */
5360 TRY_CATCH (e, RETURN_MASK_ERROR)
5361 {
5362 struct symbol *vsym;
5363 struct value *value;
5364 CORE_ADDR handler;
5365 struct breakpoint *bp;
5366
5367 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5368 value = read_var_value (vsym, frame);
5369 /* If the value was optimized out, revert to the old behavior. */
5370 if (! value_optimized_out (value))
5371 {
5372 handler = value_as_address (value);
5373
5374 if (debug_infrun)
5375 fprintf_unfiltered (gdb_stdlog,
5376 "infrun: exception resume at %lx\n",
5377 (unsigned long) handler);
5378
5379 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5380 handler, bp_exception_resume);
5381 bp->thread = tp->num;
5382 inferior_thread ()->control.exception_resume_breakpoint = bp;
5383 }
5384 }
5385 }
5386
5387 /* This is called when an exception has been intercepted. Check to
5388 see whether the exception's destination is of interest, and if so,
5389 set an exception resume breakpoint there. */
5390
5391 static void
5392 check_exception_resume (struct execution_control_state *ecs,
5393 struct frame_info *frame, struct symbol *func)
5394 {
5395 struct gdb_exception e;
5396
5397 TRY_CATCH (e, RETURN_MASK_ERROR)
5398 {
5399 struct block *b;
5400 struct dict_iterator iter;
5401 struct symbol *sym;
5402 int argno = 0;
5403
5404 /* The exception breakpoint is a thread-specific breakpoint on
5405 the unwinder's debug hook, declared as:
5406
5407 void _Unwind_DebugHook (void *cfa, void *handler);
5408
5409 The CFA argument indicates the frame to which control is
5410 about to be transferred. HANDLER is the destination PC.
5411
5412 We ignore the CFA and set a temporary breakpoint at HANDLER.
5413 This is not extremely efficient but it avoids issues in gdb
5414 with computing the DWARF CFA, and it also works even in weird
5415 cases such as throwing an exception from inside a signal
5416 handler. */
5417
5418 b = SYMBOL_BLOCK_VALUE (func);
5419 ALL_BLOCK_SYMBOLS (b, iter, sym)
5420 {
5421 if (!SYMBOL_IS_ARGUMENT (sym))
5422 continue;
5423
5424 if (argno == 0)
5425 ++argno;
5426 else
5427 {
5428 insert_exception_resume_breakpoint (ecs->event_thread,
5429 b, frame, sym);
5430 break;
5431 }
5432 }
5433 }
5434 }
5435
5436 static void
5437 stop_stepping (struct execution_control_state *ecs)
5438 {
5439 if (debug_infrun)
5440 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5441
5442 /* Let callers know we don't want to wait for the inferior anymore. */
5443 ecs->wait_some_more = 0;
5444 }
5445
5446 /* This function handles various cases where we need to continue
5447 waiting for the inferior. */
5448 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5449
5450 static void
5451 keep_going (struct execution_control_state *ecs)
5452 {
5453 /* Make sure normal_stop is called if we get a QUIT handled before
5454 reaching resume. */
5455 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5456
5457 /* Save the pc before execution, to compare with pc after stop. */
5458 ecs->event_thread->prev_pc
5459 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5460
5461 /* If we did not do break;, it means we should keep running the
5462 inferior and not return to debugger. */
5463
5464 if (ecs->event_thread->control.trap_expected
5465 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5466 {
5467 /* We took a signal (which we are supposed to pass through to
5468 the inferior, else we'd not get here) and we haven't yet
5469 gotten our trap. Simply continue. */
5470
5471 discard_cleanups (old_cleanups);
5472 resume (currently_stepping (ecs->event_thread),
5473 ecs->event_thread->suspend.stop_signal);
5474 }
5475 else
5476 {
5477 /* Either the trap was not expected, but we are continuing
5478 anyway (the user asked that this signal be passed to the
5479 child)
5480 -- or --
5481 The signal was SIGTRAP, e.g. it was our signal, but we
5482 decided we should resume from it.
5483
5484 We're going to run this baby now!
5485
5486 Note that insert_breakpoints won't try to re-insert
5487 already inserted breakpoints. Therefore, we don't
5488 care if breakpoints were already inserted, or not. */
5489
5490 if (ecs->event_thread->stepping_over_breakpoint)
5491 {
5492 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5493
5494 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5495 /* Since we can't do a displaced step, we have to remove
5496 the breakpoint while we step it. To keep things
5497 simple, we remove them all. */
5498 remove_breakpoints ();
5499 }
5500 else
5501 {
5502 struct gdb_exception e;
5503
5504 /* Stop stepping when inserting breakpoints
5505 has failed. */
5506 TRY_CATCH (e, RETURN_MASK_ERROR)
5507 {
5508 insert_breakpoints ();
5509 }
5510 if (e.reason < 0)
5511 {
5512 exception_print (gdb_stderr, e);
5513 stop_stepping (ecs);
5514 return;
5515 }
5516 }
5517
5518 ecs->event_thread->control.trap_expected
5519 = ecs->event_thread->stepping_over_breakpoint;
5520
5521 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5522 specifies that such a signal should be delivered to the
5523 target program).
5524
5525 Typically, this would occure when a user is debugging a
5526 target monitor on a simulator: the target monitor sets a
5527 breakpoint; the simulator encounters this break-point and
5528 halts the simulation handing control to GDB; GDB, noteing
5529 that the break-point isn't valid, returns control back to the
5530 simulator; the simulator then delivers the hardware
5531 equivalent of a SIGNAL_TRAP to the program being debugged. */
5532
5533 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5534 && !signal_program[ecs->event_thread->suspend.stop_signal])
5535 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5536
5537 discard_cleanups (old_cleanups);
5538 resume (currently_stepping (ecs->event_thread),
5539 ecs->event_thread->suspend.stop_signal);
5540 }
5541
5542 prepare_to_wait (ecs);
5543 }
5544
5545 /* This function normally comes after a resume, before
5546 handle_inferior_event exits. It takes care of any last bits of
5547 housekeeping, and sets the all-important wait_some_more flag. */
5548
5549 static void
5550 prepare_to_wait (struct execution_control_state *ecs)
5551 {
5552 if (debug_infrun)
5553 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5554
5555 /* This is the old end of the while loop. Let everybody know we
5556 want to wait for the inferior some more and get called again
5557 soon. */
5558 ecs->wait_some_more = 1;
5559 }
5560
5561 /* Several print_*_reason functions to print why the inferior has stopped.
5562 We always print something when the inferior exits, or receives a signal.
5563 The rest of the cases are dealt with later on in normal_stop and
5564 print_it_typical. Ideally there should be a call to one of these
5565 print_*_reason functions functions from handle_inferior_event each time
5566 stop_stepping is called. */
5567
5568 /* Print why the inferior has stopped.
5569 We are done with a step/next/si/ni command, print why the inferior has
5570 stopped. For now print nothing. Print a message only if not in the middle
5571 of doing a "step n" operation for n > 1. */
5572
5573 static void
5574 print_end_stepping_range_reason (void)
5575 {
5576 if ((!inferior_thread ()->step_multi
5577 || !inferior_thread ()->control.stop_step)
5578 && ui_out_is_mi_like_p (uiout))
5579 ui_out_field_string (uiout, "reason",
5580 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5581 }
5582
5583 /* The inferior was terminated by a signal, print why it stopped. */
5584
5585 static void
5586 print_signal_exited_reason (enum target_signal siggnal)
5587 {
5588 annotate_signalled ();
5589 if (ui_out_is_mi_like_p (uiout))
5590 ui_out_field_string
5591 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5592 ui_out_text (uiout, "\nProgram terminated with signal ");
5593 annotate_signal_name ();
5594 ui_out_field_string (uiout, "signal-name",
5595 target_signal_to_name (siggnal));
5596 annotate_signal_name_end ();
5597 ui_out_text (uiout, ", ");
5598 annotate_signal_string ();
5599 ui_out_field_string (uiout, "signal-meaning",
5600 target_signal_to_string (siggnal));
5601 annotate_signal_string_end ();
5602 ui_out_text (uiout, ".\n");
5603 ui_out_text (uiout, "The program no longer exists.\n");
5604 }
5605
5606 /* The inferior program is finished, print why it stopped. */
5607
5608 static void
5609 print_exited_reason (int exitstatus)
5610 {
5611 struct inferior *inf = current_inferior ();
5612 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5613
5614 annotate_exited (exitstatus);
5615 if (exitstatus)
5616 {
5617 if (ui_out_is_mi_like_p (uiout))
5618 ui_out_field_string (uiout, "reason",
5619 async_reason_lookup (EXEC_ASYNC_EXITED));
5620 ui_out_text (uiout, "[Inferior ");
5621 ui_out_text (uiout, plongest (inf->num));
5622 ui_out_text (uiout, " (");
5623 ui_out_text (uiout, pidstr);
5624 ui_out_text (uiout, ") exited with code ");
5625 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5626 ui_out_text (uiout, "]\n");
5627 }
5628 else
5629 {
5630 if (ui_out_is_mi_like_p (uiout))
5631 ui_out_field_string
5632 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5633 ui_out_text (uiout, "[Inferior ");
5634 ui_out_text (uiout, plongest (inf->num));
5635 ui_out_text (uiout, " (");
5636 ui_out_text (uiout, pidstr);
5637 ui_out_text (uiout, ") exited normally]\n");
5638 }
5639 /* Support the --return-child-result option. */
5640 return_child_result_value = exitstatus;
5641 }
5642
5643 /* Signal received, print why the inferior has stopped. The signal table
5644 tells us to print about it. */
5645
5646 static void
5647 print_signal_received_reason (enum target_signal siggnal)
5648 {
5649 annotate_signal ();
5650
5651 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5652 {
5653 struct thread_info *t = inferior_thread ();
5654
5655 ui_out_text (uiout, "\n[");
5656 ui_out_field_string (uiout, "thread-name",
5657 target_pid_to_str (t->ptid));
5658 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5659 ui_out_text (uiout, " stopped");
5660 }
5661 else
5662 {
5663 ui_out_text (uiout, "\nProgram received signal ");
5664 annotate_signal_name ();
5665 if (ui_out_is_mi_like_p (uiout))
5666 ui_out_field_string
5667 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5668 ui_out_field_string (uiout, "signal-name",
5669 target_signal_to_name (siggnal));
5670 annotate_signal_name_end ();
5671 ui_out_text (uiout, ", ");
5672 annotate_signal_string ();
5673 ui_out_field_string (uiout, "signal-meaning",
5674 target_signal_to_string (siggnal));
5675 annotate_signal_string_end ();
5676 }
5677 ui_out_text (uiout, ".\n");
5678 }
5679
5680 /* Reverse execution: target ran out of history info, print why the inferior
5681 has stopped. */
5682
5683 static void
5684 print_no_history_reason (void)
5685 {
5686 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5687 }
5688
5689 /* Here to return control to GDB when the inferior stops for real.
5690 Print appropriate messages, remove breakpoints, give terminal our modes.
5691
5692 STOP_PRINT_FRAME nonzero means print the executing frame
5693 (pc, function, args, file, line number and line text).
5694 BREAKPOINTS_FAILED nonzero means stop was due to error
5695 attempting to insert breakpoints. */
5696
5697 void
5698 normal_stop (void)
5699 {
5700 struct target_waitstatus last;
5701 ptid_t last_ptid;
5702 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5703
5704 get_last_target_status (&last_ptid, &last);
5705
5706 /* If an exception is thrown from this point on, make sure to
5707 propagate GDB's knowledge of the executing state to the
5708 frontend/user running state. A QUIT is an easy exception to see
5709 here, so do this before any filtered output. */
5710 if (!non_stop)
5711 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5712 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5713 && last.kind != TARGET_WAITKIND_EXITED)
5714 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5715
5716 /* In non-stop mode, we don't want GDB to switch threads behind the
5717 user's back, to avoid races where the user is typing a command to
5718 apply to thread x, but GDB switches to thread y before the user
5719 finishes entering the command. */
5720
5721 /* As with the notification of thread events, we want to delay
5722 notifying the user that we've switched thread context until
5723 the inferior actually stops.
5724
5725 There's no point in saying anything if the inferior has exited.
5726 Note that SIGNALLED here means "exited with a signal", not
5727 "received a signal". */
5728 if (!non_stop
5729 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5730 && target_has_execution
5731 && last.kind != TARGET_WAITKIND_SIGNALLED
5732 && last.kind != TARGET_WAITKIND_EXITED)
5733 {
5734 target_terminal_ours_for_output ();
5735 printf_filtered (_("[Switching to %s]\n"),
5736 target_pid_to_str (inferior_ptid));
5737 annotate_thread_changed ();
5738 previous_inferior_ptid = inferior_ptid;
5739 }
5740
5741 if (!breakpoints_always_inserted_mode () && target_has_execution)
5742 {
5743 if (remove_breakpoints ())
5744 {
5745 target_terminal_ours_for_output ();
5746 printf_filtered (_("Cannot remove breakpoints because "
5747 "program is no longer writable.\nFurther "
5748 "execution is probably impossible.\n"));
5749 }
5750 }
5751
5752 /* If an auto-display called a function and that got a signal,
5753 delete that auto-display to avoid an infinite recursion. */
5754
5755 if (stopped_by_random_signal)
5756 disable_current_display ();
5757
5758 /* Don't print a message if in the middle of doing a "step n"
5759 operation for n > 1 */
5760 if (target_has_execution
5761 && last.kind != TARGET_WAITKIND_SIGNALLED
5762 && last.kind != TARGET_WAITKIND_EXITED
5763 && inferior_thread ()->step_multi
5764 && inferior_thread ()->control.stop_step)
5765 goto done;
5766
5767 target_terminal_ours ();
5768
5769 /* Set the current source location. This will also happen if we
5770 display the frame below, but the current SAL will be incorrect
5771 during a user hook-stop function. */
5772 if (has_stack_frames () && !stop_stack_dummy)
5773 set_current_sal_from_frame (get_current_frame (), 1);
5774
5775 /* Let the user/frontend see the threads as stopped. */
5776 do_cleanups (old_chain);
5777
5778 /* Look up the hook_stop and run it (CLI internally handles problem
5779 of stop_command's pre-hook not existing). */
5780 if (stop_command)
5781 catch_errors (hook_stop_stub, stop_command,
5782 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5783
5784 if (!has_stack_frames ())
5785 goto done;
5786
5787 if (last.kind == TARGET_WAITKIND_SIGNALLED
5788 || last.kind == TARGET_WAITKIND_EXITED)
5789 goto done;
5790
5791 /* Select innermost stack frame - i.e., current frame is frame 0,
5792 and current location is based on that.
5793 Don't do this on return from a stack dummy routine,
5794 or if the program has exited. */
5795
5796 if (!stop_stack_dummy)
5797 {
5798 select_frame (get_current_frame ());
5799
5800 /* Print current location without a level number, if
5801 we have changed functions or hit a breakpoint.
5802 Print source line if we have one.
5803 bpstat_print() contains the logic deciding in detail
5804 what to print, based on the event(s) that just occurred. */
5805
5806 /* If --batch-silent is enabled then there's no need to print the current
5807 source location, and to try risks causing an error message about
5808 missing source files. */
5809 if (stop_print_frame && !batch_silent)
5810 {
5811 int bpstat_ret;
5812 int source_flag;
5813 int do_frame_printing = 1;
5814 struct thread_info *tp = inferior_thread ();
5815
5816 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5817 switch (bpstat_ret)
5818 {
5819 case PRINT_UNKNOWN:
5820 /* If we had hit a shared library event breakpoint,
5821 bpstat_print would print out this message. If we hit
5822 an OS-level shared library event, do the same
5823 thing. */
5824 if (last.kind == TARGET_WAITKIND_LOADED)
5825 {
5826 printf_filtered (_("Stopped due to shared library event\n"));
5827 source_flag = SRC_LINE; /* something bogus */
5828 do_frame_printing = 0;
5829 break;
5830 }
5831
5832 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5833 (or should) carry around the function and does (or
5834 should) use that when doing a frame comparison. */
5835 if (tp->control.stop_step
5836 && frame_id_eq (tp->control.step_frame_id,
5837 get_frame_id (get_current_frame ()))
5838 && step_start_function == find_pc_function (stop_pc))
5839 source_flag = SRC_LINE; /* Finished step, just
5840 print source line. */
5841 else
5842 source_flag = SRC_AND_LOC; /* Print location and
5843 source line. */
5844 break;
5845 case PRINT_SRC_AND_LOC:
5846 source_flag = SRC_AND_LOC; /* Print location and
5847 source line. */
5848 break;
5849 case PRINT_SRC_ONLY:
5850 source_flag = SRC_LINE;
5851 break;
5852 case PRINT_NOTHING:
5853 source_flag = SRC_LINE; /* something bogus */
5854 do_frame_printing = 0;
5855 break;
5856 default:
5857 internal_error (__FILE__, __LINE__, _("Unknown value."));
5858 }
5859
5860 /* The behavior of this routine with respect to the source
5861 flag is:
5862 SRC_LINE: Print only source line
5863 LOCATION: Print only location
5864 SRC_AND_LOC: Print location and source line. */
5865 if (do_frame_printing)
5866 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5867
5868 /* Display the auto-display expressions. */
5869 do_displays ();
5870 }
5871 }
5872
5873 /* Save the function value return registers, if we care.
5874 We might be about to restore their previous contents. */
5875 if (inferior_thread ()->control.proceed_to_finish
5876 && execution_direction != EXEC_REVERSE)
5877 {
5878 /* This should not be necessary. */
5879 if (stop_registers)
5880 regcache_xfree (stop_registers);
5881
5882 /* NB: The copy goes through to the target picking up the value of
5883 all the registers. */
5884 stop_registers = regcache_dup (get_current_regcache ());
5885 }
5886
5887 if (stop_stack_dummy == STOP_STACK_DUMMY)
5888 {
5889 /* Pop the empty frame that contains the stack dummy.
5890 This also restores inferior state prior to the call
5891 (struct infcall_suspend_state). */
5892 struct frame_info *frame = get_current_frame ();
5893
5894 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5895 frame_pop (frame);
5896 /* frame_pop() calls reinit_frame_cache as the last thing it
5897 does which means there's currently no selected frame. We
5898 don't need to re-establish a selected frame if the dummy call
5899 returns normally, that will be done by
5900 restore_infcall_control_state. However, we do have to handle
5901 the case where the dummy call is returning after being
5902 stopped (e.g. the dummy call previously hit a breakpoint).
5903 We can't know which case we have so just always re-establish
5904 a selected frame here. */
5905 select_frame (get_current_frame ());
5906 }
5907
5908 done:
5909 annotate_stopped ();
5910
5911 /* Suppress the stop observer if we're in the middle of:
5912
5913 - a step n (n > 1), as there still more steps to be done.
5914
5915 - a "finish" command, as the observer will be called in
5916 finish_command_continuation, so it can include the inferior
5917 function's return value.
5918
5919 - calling an inferior function, as we pretend we inferior didn't
5920 run at all. The return value of the call is handled by the
5921 expression evaluator, through call_function_by_hand. */
5922
5923 if (!target_has_execution
5924 || last.kind == TARGET_WAITKIND_SIGNALLED
5925 || last.kind == TARGET_WAITKIND_EXITED
5926 || (!inferior_thread ()->step_multi
5927 && !(inferior_thread ()->control.stop_bpstat
5928 && inferior_thread ()->control.proceed_to_finish)
5929 && !inferior_thread ()->control.in_infcall))
5930 {
5931 if (!ptid_equal (inferior_ptid, null_ptid))
5932 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
5933 stop_print_frame);
5934 else
5935 observer_notify_normal_stop (NULL, stop_print_frame);
5936 }
5937
5938 if (target_has_execution)
5939 {
5940 if (last.kind != TARGET_WAITKIND_SIGNALLED
5941 && last.kind != TARGET_WAITKIND_EXITED)
5942 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5943 Delete any breakpoint that is to be deleted at the next stop. */
5944 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
5945 }
5946
5947 /* Try to get rid of automatically added inferiors that are no
5948 longer needed. Keeping those around slows down things linearly.
5949 Note that this never removes the current inferior. */
5950 prune_inferiors ();
5951 }
5952
5953 static int
5954 hook_stop_stub (void *cmd)
5955 {
5956 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5957 return (0);
5958 }
5959 \f
5960 int
5961 signal_stop_state (int signo)
5962 {
5963 return signal_stop[signo];
5964 }
5965
5966 int
5967 signal_print_state (int signo)
5968 {
5969 return signal_print[signo];
5970 }
5971
5972 int
5973 signal_pass_state (int signo)
5974 {
5975 return signal_program[signo];
5976 }
5977
5978 static void
5979 signal_cache_update (int signo)
5980 {
5981 if (signo == -1)
5982 {
5983 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
5984 signal_cache_update (signo);
5985
5986 return;
5987 }
5988
5989 signal_pass[signo] = (signal_stop[signo] == 0
5990 && signal_print[signo] == 0
5991 && signal_program[signo] == 1);
5992 }
5993
5994 int
5995 signal_stop_update (int signo, int state)
5996 {
5997 int ret = signal_stop[signo];
5998
5999 signal_stop[signo] = state;
6000 signal_cache_update (signo);
6001 return ret;
6002 }
6003
6004 int
6005 signal_print_update (int signo, int state)
6006 {
6007 int ret = signal_print[signo];
6008
6009 signal_print[signo] = state;
6010 signal_cache_update (signo);
6011 return ret;
6012 }
6013
6014 int
6015 signal_pass_update (int signo, int state)
6016 {
6017 int ret = signal_program[signo];
6018
6019 signal_program[signo] = state;
6020 signal_cache_update (signo);
6021 return ret;
6022 }
6023
6024 static void
6025 sig_print_header (void)
6026 {
6027 printf_filtered (_("Signal Stop\tPrint\tPass "
6028 "to program\tDescription\n"));
6029 }
6030
6031 static void
6032 sig_print_info (enum target_signal oursig)
6033 {
6034 const char *name = target_signal_to_name (oursig);
6035 int name_padding = 13 - strlen (name);
6036
6037 if (name_padding <= 0)
6038 name_padding = 0;
6039
6040 printf_filtered ("%s", name);
6041 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6042 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6043 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6044 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6045 printf_filtered ("%s\n", target_signal_to_string (oursig));
6046 }
6047
6048 /* Specify how various signals in the inferior should be handled. */
6049
6050 static void
6051 handle_command (char *args, int from_tty)
6052 {
6053 char **argv;
6054 int digits, wordlen;
6055 int sigfirst, signum, siglast;
6056 enum target_signal oursig;
6057 int allsigs;
6058 int nsigs;
6059 unsigned char *sigs;
6060 struct cleanup *old_chain;
6061
6062 if (args == NULL)
6063 {
6064 error_no_arg (_("signal to handle"));
6065 }
6066
6067 /* Allocate and zero an array of flags for which signals to handle. */
6068
6069 nsigs = (int) TARGET_SIGNAL_LAST;
6070 sigs = (unsigned char *) alloca (nsigs);
6071 memset (sigs, 0, nsigs);
6072
6073 /* Break the command line up into args. */
6074
6075 argv = gdb_buildargv (args);
6076 old_chain = make_cleanup_freeargv (argv);
6077
6078 /* Walk through the args, looking for signal oursigs, signal names, and
6079 actions. Signal numbers and signal names may be interspersed with
6080 actions, with the actions being performed for all signals cumulatively
6081 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6082
6083 while (*argv != NULL)
6084 {
6085 wordlen = strlen (*argv);
6086 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6087 {;
6088 }
6089 allsigs = 0;
6090 sigfirst = siglast = -1;
6091
6092 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6093 {
6094 /* Apply action to all signals except those used by the
6095 debugger. Silently skip those. */
6096 allsigs = 1;
6097 sigfirst = 0;
6098 siglast = nsigs - 1;
6099 }
6100 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6101 {
6102 SET_SIGS (nsigs, sigs, signal_stop);
6103 SET_SIGS (nsigs, sigs, signal_print);
6104 }
6105 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6106 {
6107 UNSET_SIGS (nsigs, sigs, signal_program);
6108 }
6109 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6110 {
6111 SET_SIGS (nsigs, sigs, signal_print);
6112 }
6113 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6114 {
6115 SET_SIGS (nsigs, sigs, signal_program);
6116 }
6117 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6118 {
6119 UNSET_SIGS (nsigs, sigs, signal_stop);
6120 }
6121 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6122 {
6123 SET_SIGS (nsigs, sigs, signal_program);
6124 }
6125 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6126 {
6127 UNSET_SIGS (nsigs, sigs, signal_print);
6128 UNSET_SIGS (nsigs, sigs, signal_stop);
6129 }
6130 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6131 {
6132 UNSET_SIGS (nsigs, sigs, signal_program);
6133 }
6134 else if (digits > 0)
6135 {
6136 /* It is numeric. The numeric signal refers to our own
6137 internal signal numbering from target.h, not to host/target
6138 signal number. This is a feature; users really should be
6139 using symbolic names anyway, and the common ones like
6140 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6141
6142 sigfirst = siglast = (int)
6143 target_signal_from_command (atoi (*argv));
6144 if ((*argv)[digits] == '-')
6145 {
6146 siglast = (int)
6147 target_signal_from_command (atoi ((*argv) + digits + 1));
6148 }
6149 if (sigfirst > siglast)
6150 {
6151 /* Bet he didn't figure we'd think of this case... */
6152 signum = sigfirst;
6153 sigfirst = siglast;
6154 siglast = signum;
6155 }
6156 }
6157 else
6158 {
6159 oursig = target_signal_from_name (*argv);
6160 if (oursig != TARGET_SIGNAL_UNKNOWN)
6161 {
6162 sigfirst = siglast = (int) oursig;
6163 }
6164 else
6165 {
6166 /* Not a number and not a recognized flag word => complain. */
6167 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6168 }
6169 }
6170
6171 /* If any signal numbers or symbol names were found, set flags for
6172 which signals to apply actions to. */
6173
6174 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6175 {
6176 switch ((enum target_signal) signum)
6177 {
6178 case TARGET_SIGNAL_TRAP:
6179 case TARGET_SIGNAL_INT:
6180 if (!allsigs && !sigs[signum])
6181 {
6182 if (query (_("%s is used by the debugger.\n\
6183 Are you sure you want to change it? "),
6184 target_signal_to_name ((enum target_signal) signum)))
6185 {
6186 sigs[signum] = 1;
6187 }
6188 else
6189 {
6190 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6191 gdb_flush (gdb_stdout);
6192 }
6193 }
6194 break;
6195 case TARGET_SIGNAL_0:
6196 case TARGET_SIGNAL_DEFAULT:
6197 case TARGET_SIGNAL_UNKNOWN:
6198 /* Make sure that "all" doesn't print these. */
6199 break;
6200 default:
6201 sigs[signum] = 1;
6202 break;
6203 }
6204 }
6205
6206 argv++;
6207 }
6208
6209 for (signum = 0; signum < nsigs; signum++)
6210 if (sigs[signum])
6211 {
6212 signal_cache_update (-1);
6213 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6214
6215 if (from_tty)
6216 {
6217 /* Show the results. */
6218 sig_print_header ();
6219 for (; signum < nsigs; signum++)
6220 if (sigs[signum])
6221 sig_print_info (signum);
6222 }
6223
6224 break;
6225 }
6226
6227 do_cleanups (old_chain);
6228 }
6229
6230 static void
6231 xdb_handle_command (char *args, int from_tty)
6232 {
6233 char **argv;
6234 struct cleanup *old_chain;
6235
6236 if (args == NULL)
6237 error_no_arg (_("xdb command"));
6238
6239 /* Break the command line up into args. */
6240
6241 argv = gdb_buildargv (args);
6242 old_chain = make_cleanup_freeargv (argv);
6243 if (argv[1] != (char *) NULL)
6244 {
6245 char *argBuf;
6246 int bufLen;
6247
6248 bufLen = strlen (argv[0]) + 20;
6249 argBuf = (char *) xmalloc (bufLen);
6250 if (argBuf)
6251 {
6252 int validFlag = 1;
6253 enum target_signal oursig;
6254
6255 oursig = target_signal_from_name (argv[0]);
6256 memset (argBuf, 0, bufLen);
6257 if (strcmp (argv[1], "Q") == 0)
6258 sprintf (argBuf, "%s %s", argv[0], "noprint");
6259 else
6260 {
6261 if (strcmp (argv[1], "s") == 0)
6262 {
6263 if (!signal_stop[oursig])
6264 sprintf (argBuf, "%s %s", argv[0], "stop");
6265 else
6266 sprintf (argBuf, "%s %s", argv[0], "nostop");
6267 }
6268 else if (strcmp (argv[1], "i") == 0)
6269 {
6270 if (!signal_program[oursig])
6271 sprintf (argBuf, "%s %s", argv[0], "pass");
6272 else
6273 sprintf (argBuf, "%s %s", argv[0], "nopass");
6274 }
6275 else if (strcmp (argv[1], "r") == 0)
6276 {
6277 if (!signal_print[oursig])
6278 sprintf (argBuf, "%s %s", argv[0], "print");
6279 else
6280 sprintf (argBuf, "%s %s", argv[0], "noprint");
6281 }
6282 else
6283 validFlag = 0;
6284 }
6285 if (validFlag)
6286 handle_command (argBuf, from_tty);
6287 else
6288 printf_filtered (_("Invalid signal handling flag.\n"));
6289 if (argBuf)
6290 xfree (argBuf);
6291 }
6292 }
6293 do_cleanups (old_chain);
6294 }
6295
6296 /* Print current contents of the tables set by the handle command.
6297 It is possible we should just be printing signals actually used
6298 by the current target (but for things to work right when switching
6299 targets, all signals should be in the signal tables). */
6300
6301 static void
6302 signals_info (char *signum_exp, int from_tty)
6303 {
6304 enum target_signal oursig;
6305
6306 sig_print_header ();
6307
6308 if (signum_exp)
6309 {
6310 /* First see if this is a symbol name. */
6311 oursig = target_signal_from_name (signum_exp);
6312 if (oursig == TARGET_SIGNAL_UNKNOWN)
6313 {
6314 /* No, try numeric. */
6315 oursig =
6316 target_signal_from_command (parse_and_eval_long (signum_exp));
6317 }
6318 sig_print_info (oursig);
6319 return;
6320 }
6321
6322 printf_filtered ("\n");
6323 /* These ugly casts brought to you by the native VAX compiler. */
6324 for (oursig = TARGET_SIGNAL_FIRST;
6325 (int) oursig < (int) TARGET_SIGNAL_LAST;
6326 oursig = (enum target_signal) ((int) oursig + 1))
6327 {
6328 QUIT;
6329
6330 if (oursig != TARGET_SIGNAL_UNKNOWN
6331 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6332 sig_print_info (oursig);
6333 }
6334
6335 printf_filtered (_("\nUse the \"handle\" command "
6336 "to change these tables.\n"));
6337 }
6338
6339 /* The $_siginfo convenience variable is a bit special. We don't know
6340 for sure the type of the value until we actually have a chance to
6341 fetch the data. The type can change depending on gdbarch, so it is
6342 also dependent on which thread you have selected.
6343
6344 1. making $_siginfo be an internalvar that creates a new value on
6345 access.
6346
6347 2. making the value of $_siginfo be an lval_computed value. */
6348
6349 /* This function implements the lval_computed support for reading a
6350 $_siginfo value. */
6351
6352 static void
6353 siginfo_value_read (struct value *v)
6354 {
6355 LONGEST transferred;
6356
6357 transferred =
6358 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6359 NULL,
6360 value_contents_all_raw (v),
6361 value_offset (v),
6362 TYPE_LENGTH (value_type (v)));
6363
6364 if (transferred != TYPE_LENGTH (value_type (v)))
6365 error (_("Unable to read siginfo"));
6366 }
6367
6368 /* This function implements the lval_computed support for writing a
6369 $_siginfo value. */
6370
6371 static void
6372 siginfo_value_write (struct value *v, struct value *fromval)
6373 {
6374 LONGEST transferred;
6375
6376 transferred = target_write (&current_target,
6377 TARGET_OBJECT_SIGNAL_INFO,
6378 NULL,
6379 value_contents_all_raw (fromval),
6380 value_offset (v),
6381 TYPE_LENGTH (value_type (fromval)));
6382
6383 if (transferred != TYPE_LENGTH (value_type (fromval)))
6384 error (_("Unable to write siginfo"));
6385 }
6386
6387 static struct lval_funcs siginfo_value_funcs =
6388 {
6389 siginfo_value_read,
6390 siginfo_value_write
6391 };
6392
6393 /* Return a new value with the correct type for the siginfo object of
6394 the current thread using architecture GDBARCH. Return a void value
6395 if there's no object available. */
6396
6397 static struct value *
6398 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6399 {
6400 if (target_has_stack
6401 && !ptid_equal (inferior_ptid, null_ptid)
6402 && gdbarch_get_siginfo_type_p (gdbarch))
6403 {
6404 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6405
6406 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6407 }
6408
6409 return allocate_value (builtin_type (gdbarch)->builtin_void);
6410 }
6411
6412 \f
6413 /* infcall_suspend_state contains state about the program itself like its
6414 registers and any signal it received when it last stopped.
6415 This state must be restored regardless of how the inferior function call
6416 ends (either successfully, or after it hits a breakpoint or signal)
6417 if the program is to properly continue where it left off. */
6418
6419 struct infcall_suspend_state
6420 {
6421 struct thread_suspend_state thread_suspend;
6422 struct inferior_suspend_state inferior_suspend;
6423
6424 /* Other fields: */
6425 CORE_ADDR stop_pc;
6426 struct regcache *registers;
6427
6428 /* Format of SIGINFO_DATA or NULL if it is not present. */
6429 struct gdbarch *siginfo_gdbarch;
6430
6431 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6432 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6433 content would be invalid. */
6434 gdb_byte *siginfo_data;
6435 };
6436
6437 struct infcall_suspend_state *
6438 save_infcall_suspend_state (void)
6439 {
6440 struct infcall_suspend_state *inf_state;
6441 struct thread_info *tp = inferior_thread ();
6442 struct inferior *inf = current_inferior ();
6443 struct regcache *regcache = get_current_regcache ();
6444 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6445 gdb_byte *siginfo_data = NULL;
6446
6447 if (gdbarch_get_siginfo_type_p (gdbarch))
6448 {
6449 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6450 size_t len = TYPE_LENGTH (type);
6451 struct cleanup *back_to;
6452
6453 siginfo_data = xmalloc (len);
6454 back_to = make_cleanup (xfree, siginfo_data);
6455
6456 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6457 siginfo_data, 0, len) == len)
6458 discard_cleanups (back_to);
6459 else
6460 {
6461 /* Errors ignored. */
6462 do_cleanups (back_to);
6463 siginfo_data = NULL;
6464 }
6465 }
6466
6467 inf_state = XZALLOC (struct infcall_suspend_state);
6468
6469 if (siginfo_data)
6470 {
6471 inf_state->siginfo_gdbarch = gdbarch;
6472 inf_state->siginfo_data = siginfo_data;
6473 }
6474
6475 inf_state->thread_suspend = tp->suspend;
6476 inf_state->inferior_suspend = inf->suspend;
6477
6478 /* run_inferior_call will not use the signal due to its `proceed' call with
6479 TARGET_SIGNAL_0 anyway. */
6480 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6481
6482 inf_state->stop_pc = stop_pc;
6483
6484 inf_state->registers = regcache_dup (regcache);
6485
6486 return inf_state;
6487 }
6488
6489 /* Restore inferior session state to INF_STATE. */
6490
6491 void
6492 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6493 {
6494 struct thread_info *tp = inferior_thread ();
6495 struct inferior *inf = current_inferior ();
6496 struct regcache *regcache = get_current_regcache ();
6497 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6498
6499 tp->suspend = inf_state->thread_suspend;
6500 inf->suspend = inf_state->inferior_suspend;
6501
6502 stop_pc = inf_state->stop_pc;
6503
6504 if (inf_state->siginfo_gdbarch == gdbarch)
6505 {
6506 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6507 size_t len = TYPE_LENGTH (type);
6508
6509 /* Errors ignored. */
6510 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6511 inf_state->siginfo_data, 0, len);
6512 }
6513
6514 /* The inferior can be gone if the user types "print exit(0)"
6515 (and perhaps other times). */
6516 if (target_has_execution)
6517 /* NB: The register write goes through to the target. */
6518 regcache_cpy (regcache, inf_state->registers);
6519
6520 discard_infcall_suspend_state (inf_state);
6521 }
6522
6523 static void
6524 do_restore_infcall_suspend_state_cleanup (void *state)
6525 {
6526 restore_infcall_suspend_state (state);
6527 }
6528
6529 struct cleanup *
6530 make_cleanup_restore_infcall_suspend_state
6531 (struct infcall_suspend_state *inf_state)
6532 {
6533 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6534 }
6535
6536 void
6537 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6538 {
6539 regcache_xfree (inf_state->registers);
6540 xfree (inf_state->siginfo_data);
6541 xfree (inf_state);
6542 }
6543
6544 struct regcache *
6545 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6546 {
6547 return inf_state->registers;
6548 }
6549
6550 /* infcall_control_state contains state regarding gdb's control of the
6551 inferior itself like stepping control. It also contains session state like
6552 the user's currently selected frame. */
6553
6554 struct infcall_control_state
6555 {
6556 struct thread_control_state thread_control;
6557 struct inferior_control_state inferior_control;
6558
6559 /* Other fields: */
6560 enum stop_stack_kind stop_stack_dummy;
6561 int stopped_by_random_signal;
6562 int stop_after_trap;
6563
6564 /* ID if the selected frame when the inferior function call was made. */
6565 struct frame_id selected_frame_id;
6566 };
6567
6568 /* Save all of the information associated with the inferior<==>gdb
6569 connection. */
6570
6571 struct infcall_control_state *
6572 save_infcall_control_state (void)
6573 {
6574 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6575 struct thread_info *tp = inferior_thread ();
6576 struct inferior *inf = current_inferior ();
6577
6578 inf_status->thread_control = tp->control;
6579 inf_status->inferior_control = inf->control;
6580
6581 tp->control.step_resume_breakpoint = NULL;
6582 tp->control.exception_resume_breakpoint = NULL;
6583
6584 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6585 chain. If caller's caller is walking the chain, they'll be happier if we
6586 hand them back the original chain when restore_infcall_control_state is
6587 called. */
6588 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6589
6590 /* Other fields: */
6591 inf_status->stop_stack_dummy = stop_stack_dummy;
6592 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6593 inf_status->stop_after_trap = stop_after_trap;
6594
6595 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6596
6597 return inf_status;
6598 }
6599
6600 static int
6601 restore_selected_frame (void *args)
6602 {
6603 struct frame_id *fid = (struct frame_id *) args;
6604 struct frame_info *frame;
6605
6606 frame = frame_find_by_id (*fid);
6607
6608 /* If inf_status->selected_frame_id is NULL, there was no previously
6609 selected frame. */
6610 if (frame == NULL)
6611 {
6612 warning (_("Unable to restore previously selected frame."));
6613 return 0;
6614 }
6615
6616 select_frame (frame);
6617
6618 return (1);
6619 }
6620
6621 /* Restore inferior session state to INF_STATUS. */
6622
6623 void
6624 restore_infcall_control_state (struct infcall_control_state *inf_status)
6625 {
6626 struct thread_info *tp = inferior_thread ();
6627 struct inferior *inf = current_inferior ();
6628
6629 if (tp->control.step_resume_breakpoint)
6630 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6631
6632 if (tp->control.exception_resume_breakpoint)
6633 tp->control.exception_resume_breakpoint->disposition
6634 = disp_del_at_next_stop;
6635
6636 /* Handle the bpstat_copy of the chain. */
6637 bpstat_clear (&tp->control.stop_bpstat);
6638
6639 tp->control = inf_status->thread_control;
6640 inf->control = inf_status->inferior_control;
6641
6642 /* Other fields: */
6643 stop_stack_dummy = inf_status->stop_stack_dummy;
6644 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6645 stop_after_trap = inf_status->stop_after_trap;
6646
6647 if (target_has_stack)
6648 {
6649 /* The point of catch_errors is that if the stack is clobbered,
6650 walking the stack might encounter a garbage pointer and
6651 error() trying to dereference it. */
6652 if (catch_errors
6653 (restore_selected_frame, &inf_status->selected_frame_id,
6654 "Unable to restore previously selected frame:\n",
6655 RETURN_MASK_ERROR) == 0)
6656 /* Error in restoring the selected frame. Select the innermost
6657 frame. */
6658 select_frame (get_current_frame ());
6659 }
6660
6661 xfree (inf_status);
6662 }
6663
6664 static void
6665 do_restore_infcall_control_state_cleanup (void *sts)
6666 {
6667 restore_infcall_control_state (sts);
6668 }
6669
6670 struct cleanup *
6671 make_cleanup_restore_infcall_control_state
6672 (struct infcall_control_state *inf_status)
6673 {
6674 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6675 }
6676
6677 void
6678 discard_infcall_control_state (struct infcall_control_state *inf_status)
6679 {
6680 if (inf_status->thread_control.step_resume_breakpoint)
6681 inf_status->thread_control.step_resume_breakpoint->disposition
6682 = disp_del_at_next_stop;
6683
6684 if (inf_status->thread_control.exception_resume_breakpoint)
6685 inf_status->thread_control.exception_resume_breakpoint->disposition
6686 = disp_del_at_next_stop;
6687
6688 /* See save_infcall_control_state for info on stop_bpstat. */
6689 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6690
6691 xfree (inf_status);
6692 }
6693 \f
6694 int
6695 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6696 {
6697 struct target_waitstatus last;
6698 ptid_t last_ptid;
6699
6700 get_last_target_status (&last_ptid, &last);
6701
6702 if (last.kind != TARGET_WAITKIND_FORKED)
6703 return 0;
6704
6705 if (!ptid_equal (last_ptid, pid))
6706 return 0;
6707
6708 *child_pid = last.value.related_pid;
6709 return 1;
6710 }
6711
6712 int
6713 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6714 {
6715 struct target_waitstatus last;
6716 ptid_t last_ptid;
6717
6718 get_last_target_status (&last_ptid, &last);
6719
6720 if (last.kind != TARGET_WAITKIND_VFORKED)
6721 return 0;
6722
6723 if (!ptid_equal (last_ptid, pid))
6724 return 0;
6725
6726 *child_pid = last.value.related_pid;
6727 return 1;
6728 }
6729
6730 int
6731 inferior_has_execd (ptid_t pid, char **execd_pathname)
6732 {
6733 struct target_waitstatus last;
6734 ptid_t last_ptid;
6735
6736 get_last_target_status (&last_ptid, &last);
6737
6738 if (last.kind != TARGET_WAITKIND_EXECD)
6739 return 0;
6740
6741 if (!ptid_equal (last_ptid, pid))
6742 return 0;
6743
6744 *execd_pathname = xstrdup (last.value.execd_pathname);
6745 return 1;
6746 }
6747
6748 int
6749 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6750 {
6751 struct target_waitstatus last;
6752 ptid_t last_ptid;
6753
6754 get_last_target_status (&last_ptid, &last);
6755
6756 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6757 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6758 return 0;
6759
6760 if (!ptid_equal (last_ptid, pid))
6761 return 0;
6762
6763 *syscall_number = last.value.syscall_number;
6764 return 1;
6765 }
6766
6767 /* Oft used ptids */
6768 ptid_t null_ptid;
6769 ptid_t minus_one_ptid;
6770
6771 /* Create a ptid given the necessary PID, LWP, and TID components. */
6772
6773 ptid_t
6774 ptid_build (int pid, long lwp, long tid)
6775 {
6776 ptid_t ptid;
6777
6778 ptid.pid = pid;
6779 ptid.lwp = lwp;
6780 ptid.tid = tid;
6781 return ptid;
6782 }
6783
6784 /* Create a ptid from just a pid. */
6785
6786 ptid_t
6787 pid_to_ptid (int pid)
6788 {
6789 return ptid_build (pid, 0, 0);
6790 }
6791
6792 /* Fetch the pid (process id) component from a ptid. */
6793
6794 int
6795 ptid_get_pid (ptid_t ptid)
6796 {
6797 return ptid.pid;
6798 }
6799
6800 /* Fetch the lwp (lightweight process) component from a ptid. */
6801
6802 long
6803 ptid_get_lwp (ptid_t ptid)
6804 {
6805 return ptid.lwp;
6806 }
6807
6808 /* Fetch the tid (thread id) component from a ptid. */
6809
6810 long
6811 ptid_get_tid (ptid_t ptid)
6812 {
6813 return ptid.tid;
6814 }
6815
6816 /* ptid_equal() is used to test equality of two ptids. */
6817
6818 int
6819 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6820 {
6821 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6822 && ptid1.tid == ptid2.tid);
6823 }
6824
6825 /* Returns true if PTID represents a process. */
6826
6827 int
6828 ptid_is_pid (ptid_t ptid)
6829 {
6830 if (ptid_equal (minus_one_ptid, ptid))
6831 return 0;
6832 if (ptid_equal (null_ptid, ptid))
6833 return 0;
6834
6835 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6836 }
6837
6838 int
6839 ptid_match (ptid_t ptid, ptid_t filter)
6840 {
6841 if (ptid_equal (filter, minus_one_ptid))
6842 return 1;
6843 if (ptid_is_pid (filter)
6844 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6845 return 1;
6846 else if (ptid_equal (ptid, filter))
6847 return 1;
6848
6849 return 0;
6850 }
6851
6852 /* restore_inferior_ptid() will be used by the cleanup machinery
6853 to restore the inferior_ptid value saved in a call to
6854 save_inferior_ptid(). */
6855
6856 static void
6857 restore_inferior_ptid (void *arg)
6858 {
6859 ptid_t *saved_ptid_ptr = arg;
6860
6861 inferior_ptid = *saved_ptid_ptr;
6862 xfree (arg);
6863 }
6864
6865 /* Save the value of inferior_ptid so that it may be restored by a
6866 later call to do_cleanups(). Returns the struct cleanup pointer
6867 needed for later doing the cleanup. */
6868
6869 struct cleanup *
6870 save_inferior_ptid (void)
6871 {
6872 ptid_t *saved_ptid_ptr;
6873
6874 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6875 *saved_ptid_ptr = inferior_ptid;
6876 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6877 }
6878 \f
6879
6880 /* User interface for reverse debugging:
6881 Set exec-direction / show exec-direction commands
6882 (returns error unless target implements to_set_exec_direction method). */
6883
6884 int execution_direction = EXEC_FORWARD;
6885 static const char exec_forward[] = "forward";
6886 static const char exec_reverse[] = "reverse";
6887 static const char *exec_direction = exec_forward;
6888 static const char *exec_direction_names[] = {
6889 exec_forward,
6890 exec_reverse,
6891 NULL
6892 };
6893
6894 static void
6895 set_exec_direction_func (char *args, int from_tty,
6896 struct cmd_list_element *cmd)
6897 {
6898 if (target_can_execute_reverse)
6899 {
6900 if (!strcmp (exec_direction, exec_forward))
6901 execution_direction = EXEC_FORWARD;
6902 else if (!strcmp (exec_direction, exec_reverse))
6903 execution_direction = EXEC_REVERSE;
6904 }
6905 else
6906 {
6907 exec_direction = exec_forward;
6908 error (_("Target does not support this operation."));
6909 }
6910 }
6911
6912 static void
6913 show_exec_direction_func (struct ui_file *out, int from_tty,
6914 struct cmd_list_element *cmd, const char *value)
6915 {
6916 switch (execution_direction) {
6917 case EXEC_FORWARD:
6918 fprintf_filtered (out, _("Forward.\n"));
6919 break;
6920 case EXEC_REVERSE:
6921 fprintf_filtered (out, _("Reverse.\n"));
6922 break;
6923 case EXEC_ERROR:
6924 default:
6925 fprintf_filtered (out, _("Forward (target `%s' does not "
6926 "support exec-direction).\n"),
6927 target_shortname);
6928 break;
6929 }
6930 }
6931
6932 /* User interface for non-stop mode. */
6933
6934 int non_stop = 0;
6935
6936 static void
6937 set_non_stop (char *args, int from_tty,
6938 struct cmd_list_element *c)
6939 {
6940 if (target_has_execution)
6941 {
6942 non_stop_1 = non_stop;
6943 error (_("Cannot change this setting while the inferior is running."));
6944 }
6945
6946 non_stop = non_stop_1;
6947 }
6948
6949 static void
6950 show_non_stop (struct ui_file *file, int from_tty,
6951 struct cmd_list_element *c, const char *value)
6952 {
6953 fprintf_filtered (file,
6954 _("Controlling the inferior in non-stop mode is %s.\n"),
6955 value);
6956 }
6957
6958 static void
6959 show_schedule_multiple (struct ui_file *file, int from_tty,
6960 struct cmd_list_element *c, const char *value)
6961 {
6962 fprintf_filtered (file, _("Resuming the execution of threads "
6963 "of all processes is %s.\n"), value);
6964 }
6965
6966 void
6967 _initialize_infrun (void)
6968 {
6969 int i;
6970 int numsigs;
6971
6972 add_info ("signals", signals_info, _("\
6973 What debugger does when program gets various signals.\n\
6974 Specify a signal as argument to print info on that signal only."));
6975 add_info_alias ("handle", "signals", 0);
6976
6977 add_com ("handle", class_run, handle_command, _("\
6978 Specify how to handle a signal.\n\
6979 Args are signals and actions to apply to those signals.\n\
6980 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6981 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6982 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6983 The special arg \"all\" is recognized to mean all signals except those\n\
6984 used by the debugger, typically SIGTRAP and SIGINT.\n\
6985 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6986 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6987 Stop means reenter debugger if this signal happens (implies print).\n\
6988 Print means print a message if this signal happens.\n\
6989 Pass means let program see this signal; otherwise program doesn't know.\n\
6990 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6991 Pass and Stop may be combined."));
6992 if (xdb_commands)
6993 {
6994 add_com ("lz", class_info, signals_info, _("\
6995 What debugger does when program gets various signals.\n\
6996 Specify a signal as argument to print info on that signal only."));
6997 add_com ("z", class_run, xdb_handle_command, _("\
6998 Specify how to handle a signal.\n\
6999 Args are signals and actions to apply to those signals.\n\
7000 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7001 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7002 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7003 The special arg \"all\" is recognized to mean all signals except those\n\
7004 used by the debugger, typically SIGTRAP and SIGINT.\n\
7005 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7006 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7007 nopass), \"Q\" (noprint)\n\
7008 Stop means reenter debugger if this signal happens (implies print).\n\
7009 Print means print a message if this signal happens.\n\
7010 Pass means let program see this signal; otherwise program doesn't know.\n\
7011 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7012 Pass and Stop may be combined."));
7013 }
7014
7015 if (!dbx_commands)
7016 stop_command = add_cmd ("stop", class_obscure,
7017 not_just_help_class_command, _("\
7018 There is no `stop' command, but you can set a hook on `stop'.\n\
7019 This allows you to set a list of commands to be run each time execution\n\
7020 of the program stops."), &cmdlist);
7021
7022 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7023 Set inferior debugging."), _("\
7024 Show inferior debugging."), _("\
7025 When non-zero, inferior specific debugging is enabled."),
7026 NULL,
7027 show_debug_infrun,
7028 &setdebuglist, &showdebuglist);
7029
7030 add_setshow_boolean_cmd ("displaced", class_maintenance,
7031 &debug_displaced, _("\
7032 Set displaced stepping debugging."), _("\
7033 Show displaced stepping debugging."), _("\
7034 When non-zero, displaced stepping specific debugging is enabled."),
7035 NULL,
7036 show_debug_displaced,
7037 &setdebuglist, &showdebuglist);
7038
7039 add_setshow_boolean_cmd ("non-stop", no_class,
7040 &non_stop_1, _("\
7041 Set whether gdb controls the inferior in non-stop mode."), _("\
7042 Show whether gdb controls the inferior in non-stop mode."), _("\
7043 When debugging a multi-threaded program and this setting is\n\
7044 off (the default, also called all-stop mode), when one thread stops\n\
7045 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7046 all other threads in the program while you interact with the thread of\n\
7047 interest. When you continue or step a thread, you can allow the other\n\
7048 threads to run, or have them remain stopped, but while you inspect any\n\
7049 thread's state, all threads stop.\n\
7050 \n\
7051 In non-stop mode, when one thread stops, other threads can continue\n\
7052 to run freely. You'll be able to step each thread independently,\n\
7053 leave it stopped or free to run as needed."),
7054 set_non_stop,
7055 show_non_stop,
7056 &setlist,
7057 &showlist);
7058
7059 numsigs = (int) TARGET_SIGNAL_LAST;
7060 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7061 signal_print = (unsigned char *)
7062 xmalloc (sizeof (signal_print[0]) * numsigs);
7063 signal_program = (unsigned char *)
7064 xmalloc (sizeof (signal_program[0]) * numsigs);
7065 signal_pass = (unsigned char *)
7066 xmalloc (sizeof (signal_program[0]) * numsigs);
7067 for (i = 0; i < numsigs; i++)
7068 {
7069 signal_stop[i] = 1;
7070 signal_print[i] = 1;
7071 signal_program[i] = 1;
7072 }
7073
7074 /* Signals caused by debugger's own actions
7075 should not be given to the program afterwards. */
7076 signal_program[TARGET_SIGNAL_TRAP] = 0;
7077 signal_program[TARGET_SIGNAL_INT] = 0;
7078
7079 /* Signals that are not errors should not normally enter the debugger. */
7080 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7081 signal_print[TARGET_SIGNAL_ALRM] = 0;
7082 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7083 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7084 signal_stop[TARGET_SIGNAL_PROF] = 0;
7085 signal_print[TARGET_SIGNAL_PROF] = 0;
7086 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7087 signal_print[TARGET_SIGNAL_CHLD] = 0;
7088 signal_stop[TARGET_SIGNAL_IO] = 0;
7089 signal_print[TARGET_SIGNAL_IO] = 0;
7090 signal_stop[TARGET_SIGNAL_POLL] = 0;
7091 signal_print[TARGET_SIGNAL_POLL] = 0;
7092 signal_stop[TARGET_SIGNAL_URG] = 0;
7093 signal_print[TARGET_SIGNAL_URG] = 0;
7094 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7095 signal_print[TARGET_SIGNAL_WINCH] = 0;
7096 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7097 signal_print[TARGET_SIGNAL_PRIO] = 0;
7098
7099 /* These signals are used internally by user-level thread
7100 implementations. (See signal(5) on Solaris.) Like the above
7101 signals, a healthy program receives and handles them as part of
7102 its normal operation. */
7103 signal_stop[TARGET_SIGNAL_LWP] = 0;
7104 signal_print[TARGET_SIGNAL_LWP] = 0;
7105 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7106 signal_print[TARGET_SIGNAL_WAITING] = 0;
7107 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7108 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7109
7110 /* Update cached state. */
7111 signal_cache_update (-1);
7112
7113 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7114 &stop_on_solib_events, _("\
7115 Set stopping for shared library events."), _("\
7116 Show stopping for shared library events."), _("\
7117 If nonzero, gdb will give control to the user when the dynamic linker\n\
7118 notifies gdb of shared library events. The most common event of interest\n\
7119 to the user would be loading/unloading of a new library."),
7120 NULL,
7121 show_stop_on_solib_events,
7122 &setlist, &showlist);
7123
7124 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7125 follow_fork_mode_kind_names,
7126 &follow_fork_mode_string, _("\
7127 Set debugger response to a program call of fork or vfork."), _("\
7128 Show debugger response to a program call of fork or vfork."), _("\
7129 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7130 parent - the original process is debugged after a fork\n\
7131 child - the new process is debugged after a fork\n\
7132 The unfollowed process will continue to run.\n\
7133 By default, the debugger will follow the parent process."),
7134 NULL,
7135 show_follow_fork_mode_string,
7136 &setlist, &showlist);
7137
7138 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7139 follow_exec_mode_names,
7140 &follow_exec_mode_string, _("\
7141 Set debugger response to a program call of exec."), _("\
7142 Show debugger response to a program call of exec."), _("\
7143 An exec call replaces the program image of a process.\n\
7144 \n\
7145 follow-exec-mode can be:\n\
7146 \n\
7147 new - the debugger creates a new inferior and rebinds the process\n\
7148 to this new inferior. The program the process was running before\n\
7149 the exec call can be restarted afterwards by restarting the original\n\
7150 inferior.\n\
7151 \n\
7152 same - the debugger keeps the process bound to the same inferior.\n\
7153 The new executable image replaces the previous executable loaded in\n\
7154 the inferior. Restarting the inferior after the exec call restarts\n\
7155 the executable the process was running after the exec call.\n\
7156 \n\
7157 By default, the debugger will use the same inferior."),
7158 NULL,
7159 show_follow_exec_mode_string,
7160 &setlist, &showlist);
7161
7162 add_setshow_enum_cmd ("scheduler-locking", class_run,
7163 scheduler_enums, &scheduler_mode, _("\
7164 Set mode for locking scheduler during execution."), _("\
7165 Show mode for locking scheduler during execution."), _("\
7166 off == no locking (threads may preempt at any time)\n\
7167 on == full locking (no thread except the current thread may run)\n\
7168 step == scheduler locked during every single-step operation.\n\
7169 In this mode, no other thread may run during a step command.\n\
7170 Other threads may run while stepping over a function call ('next')."),
7171 set_schedlock_func, /* traps on target vector */
7172 show_scheduler_mode,
7173 &setlist, &showlist);
7174
7175 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7176 Set mode for resuming threads of all processes."), _("\
7177 Show mode for resuming threads of all processes."), _("\
7178 When on, execution commands (such as 'continue' or 'next') resume all\n\
7179 threads of all processes. When off (which is the default), execution\n\
7180 commands only resume the threads of the current process. The set of\n\
7181 threads that are resumed is further refined by the scheduler-locking\n\
7182 mode (see help set scheduler-locking)."),
7183 NULL,
7184 show_schedule_multiple,
7185 &setlist, &showlist);
7186
7187 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7188 Set mode of the step operation."), _("\
7189 Show mode of the step operation."), _("\
7190 When set, doing a step over a function without debug line information\n\
7191 will stop at the first instruction of that function. Otherwise, the\n\
7192 function is skipped and the step command stops at a different source line."),
7193 NULL,
7194 show_step_stop_if_no_debug,
7195 &setlist, &showlist);
7196
7197 add_setshow_enum_cmd ("displaced-stepping", class_run,
7198 can_use_displaced_stepping_enum,
7199 &can_use_displaced_stepping, _("\
7200 Set debugger's willingness to use displaced stepping."), _("\
7201 Show debugger's willingness to use displaced stepping."), _("\
7202 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7203 supported by the target architecture. If off, gdb will not use displaced\n\
7204 stepping to step over breakpoints, even if such is supported by the target\n\
7205 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7206 if the target architecture supports it and non-stop mode is active, but will not\n\
7207 use it in all-stop mode (see help set non-stop)."),
7208 NULL,
7209 show_can_use_displaced_stepping,
7210 &setlist, &showlist);
7211
7212 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7213 &exec_direction, _("Set direction of execution.\n\
7214 Options are 'forward' or 'reverse'."),
7215 _("Show direction of execution (forward/reverse)."),
7216 _("Tells gdb whether to execute forward or backward."),
7217 set_exec_direction_func, show_exec_direction_func,
7218 &setlist, &showlist);
7219
7220 /* Set/show detach-on-fork: user-settable mode. */
7221
7222 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7223 Set whether gdb will detach the child of a fork."), _("\
7224 Show whether gdb will detach the child of a fork."), _("\
7225 Tells gdb whether to detach the child of a fork."),
7226 NULL, NULL, &setlist, &showlist);
7227
7228 /* ptid initializations */
7229 null_ptid = ptid_build (0, 0, 0);
7230 minus_one_ptid = ptid_build (-1, 0, 0);
7231 inferior_ptid = null_ptid;
7232 target_last_wait_ptid = minus_one_ptid;
7233
7234 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7235 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7236 observer_attach_thread_exit (infrun_thread_thread_exit);
7237 observer_attach_inferior_exit (infrun_inferior_exit);
7238
7239 /* Explicitly create without lookup, since that tries to create a
7240 value with a void typed value, and when we get here, gdbarch
7241 isn't initialized yet. At this point, we're quite sure there
7242 isn't another convenience variable of the same name. */
7243 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7244
7245 add_setshow_boolean_cmd ("observer", no_class,
7246 &observer_mode_1, _("\
7247 Set whether gdb controls the inferior in observer mode."), _("\
7248 Show whether gdb controls the inferior in observer mode."), _("\
7249 In observer mode, GDB can get data from the inferior, but not\n\
7250 affect its execution. Registers and memory may not be changed,\n\
7251 breakpoints may not be set, and the program cannot be interrupted\n\
7252 or signalled."),
7253 set_observer_mode,
7254 show_observer_mode,
7255 &setlist,
7256 &showlist);
7257 }
This page took 0.486461 seconds and 4 git commands to generate.