Refactor native follow-fork.
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "mi/mi-common.h"
49 #include "event-top.h"
50 #include "record.h"
51 #include "record-full.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55 #include "continuations.h"
56 #include "interps.h"
57 #include "skip.h"
58 #include "probe.h"
59 #include "objfiles.h"
60 #include "completer.h"
61 #include "target-descriptions.h"
62 #include "target-dcache.h"
63 #include "terminal.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static int follow_fork_inferior (int follow_child, int detach_fork);
84
85 static void follow_inferior_reset_breakpoints (void);
86
87 static void set_schedlock_func (char *args, int from_tty,
88 struct cmd_list_element *c);
89
90 static int currently_stepping (struct thread_info *tp);
91
92 static void xdb_handle_command (char *args, int from_tty);
93
94 void _initialize_infrun (void);
95
96 void nullify_last_target_wait_ptid (void);
97
98 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
99
100 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
101
102 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
103
104 /* When set, stop the 'step' command if we enter a function which has
105 no line number information. The normal behavior is that we step
106 over such function. */
107 int step_stop_if_no_debug = 0;
108 static void
109 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
110 struct cmd_list_element *c, const char *value)
111 {
112 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
113 }
114
115 /* In asynchronous mode, but simulating synchronous execution. */
116
117 int sync_execution = 0;
118
119 /* proceed and normal_stop use this to notify the user when the
120 inferior stopped in a different thread than it had been running
121 in. */
122
123 static ptid_t previous_inferior_ptid;
124
125 /* If set (default for legacy reasons), when following a fork, GDB
126 will detach from one of the fork branches, child or parent.
127 Exactly which branch is detached depends on 'set follow-fork-mode'
128 setting. */
129
130 static int detach_fork = 1;
131
132 int debug_displaced = 0;
133 static void
134 show_debug_displaced (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
138 }
139
140 unsigned int debug_infrun = 0;
141 static void
142 show_debug_infrun (struct ui_file *file, int from_tty,
143 struct cmd_list_element *c, const char *value)
144 {
145 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
146 }
147
148
149 /* Support for disabling address space randomization. */
150
151 int disable_randomization = 1;
152
153 static void
154 show_disable_randomization (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
156 {
157 if (target_supports_disable_randomization ())
158 fprintf_filtered (file,
159 _("Disabling randomization of debuggee's "
160 "virtual address space is %s.\n"),
161 value);
162 else
163 fputs_filtered (_("Disabling randomization of debuggee's "
164 "virtual address space is unsupported on\n"
165 "this platform.\n"), file);
166 }
167
168 static void
169 set_disable_randomization (char *args, int from_tty,
170 struct cmd_list_element *c)
171 {
172 if (!target_supports_disable_randomization ())
173 error (_("Disabling randomization of debuggee's "
174 "virtual address space is unsupported on\n"
175 "this platform."));
176 }
177
178 /* User interface for non-stop mode. */
179
180 int non_stop = 0;
181 static int non_stop_1 = 0;
182
183 static void
184 set_non_stop (char *args, int from_tty,
185 struct cmd_list_element *c)
186 {
187 if (target_has_execution)
188 {
189 non_stop_1 = non_stop;
190 error (_("Cannot change this setting while the inferior is running."));
191 }
192
193 non_stop = non_stop_1;
194 }
195
196 static void
197 show_non_stop (struct ui_file *file, int from_tty,
198 struct cmd_list_element *c, const char *value)
199 {
200 fprintf_filtered (file,
201 _("Controlling the inferior in non-stop mode is %s.\n"),
202 value);
203 }
204
205 /* "Observer mode" is somewhat like a more extreme version of
206 non-stop, in which all GDB operations that might affect the
207 target's execution have been disabled. */
208
209 int observer_mode = 0;
210 static int observer_mode_1 = 0;
211
212 static void
213 set_observer_mode (char *args, int from_tty,
214 struct cmd_list_element *c)
215 {
216 if (target_has_execution)
217 {
218 observer_mode_1 = observer_mode;
219 error (_("Cannot change this setting while the inferior is running."));
220 }
221
222 observer_mode = observer_mode_1;
223
224 may_write_registers = !observer_mode;
225 may_write_memory = !observer_mode;
226 may_insert_breakpoints = !observer_mode;
227 may_insert_tracepoints = !observer_mode;
228 /* We can insert fast tracepoints in or out of observer mode,
229 but enable them if we're going into this mode. */
230 if (observer_mode)
231 may_insert_fast_tracepoints = 1;
232 may_stop = !observer_mode;
233 update_target_permissions ();
234
235 /* Going *into* observer mode we must force non-stop, then
236 going out we leave it that way. */
237 if (observer_mode)
238 {
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that are registered with "catch signal". A
287 non-zero entry indicates that the signal is caught by some "catch
288 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
289 signals. */
290 static unsigned char *signal_catch;
291
292 /* Table of signals that the target may silently handle.
293 This is automatically determined from the flags above,
294 and simply cached here. */
295 static unsigned char *signal_pass;
296
297 #define SET_SIGS(nsigs,sigs,flags) \
298 do { \
299 int signum = (nsigs); \
300 while (signum-- > 0) \
301 if ((sigs)[signum]) \
302 (flags)[signum] = 1; \
303 } while (0)
304
305 #define UNSET_SIGS(nsigs,sigs,flags) \
306 do { \
307 int signum = (nsigs); \
308 while (signum-- > 0) \
309 if ((sigs)[signum]) \
310 (flags)[signum] = 0; \
311 } while (0)
312
313 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
314 this function is to avoid exporting `signal_program'. */
315
316 void
317 update_signals_program_target (void)
318 {
319 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
320 }
321
322 /* Value to pass to target_resume() to cause all threads to resume. */
323
324 #define RESUME_ALL minus_one_ptid
325
326 /* Command list pointer for the "stop" placeholder. */
327
328 static struct cmd_list_element *stop_command;
329
330 /* Function inferior was in as of last step command. */
331
332 static struct symbol *step_start_function;
333
334 /* Nonzero if we want to give control to the user when we're notified
335 of shared library events by the dynamic linker. */
336 int stop_on_solib_events;
337
338 /* Enable or disable optional shared library event breakpoints
339 as appropriate when the above flag is changed. */
340
341 static void
342 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
343 {
344 update_solib_breakpoints ();
345 }
346
347 static void
348 show_stop_on_solib_events (struct ui_file *file, int from_tty,
349 struct cmd_list_element *c, const char *value)
350 {
351 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
352 value);
353 }
354
355 /* Nonzero means expecting a trace trap
356 and should stop the inferior and return silently when it happens. */
357
358 int stop_after_trap;
359
360 /* Save register contents here when executing a "finish" command or are
361 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
362 Thus this contains the return value from the called function (assuming
363 values are returned in a register). */
364
365 struct regcache *stop_registers;
366
367 /* Nonzero after stop if current stack frame should be printed. */
368
369 static int stop_print_frame;
370
371 /* This is a cached copy of the pid/waitstatus of the last event
372 returned by target_wait()/deprecated_target_wait_hook(). This
373 information is returned by get_last_target_status(). */
374 static ptid_t target_last_wait_ptid;
375 static struct target_waitstatus target_last_waitstatus;
376
377 static void context_switch (ptid_t ptid);
378
379 void init_thread_stepping_state (struct thread_info *tss);
380
381 static void init_infwait_state (void);
382
383 static const char follow_fork_mode_child[] = "child";
384 static const char follow_fork_mode_parent[] = "parent";
385
386 static const char *const follow_fork_mode_kind_names[] = {
387 follow_fork_mode_child,
388 follow_fork_mode_parent,
389 NULL
390 };
391
392 static const char *follow_fork_mode_string = follow_fork_mode_parent;
393 static void
394 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
395 struct cmd_list_element *c, const char *value)
396 {
397 fprintf_filtered (file,
398 _("Debugger response to a program "
399 "call of fork or vfork is \"%s\".\n"),
400 value);
401 }
402 \f
403
404 /* Handle changes to the inferior list based on the type of fork,
405 which process is being followed, and whether the other process
406 should be detached. On entry inferior_ptid must be the ptid of
407 the fork parent. At return inferior_ptid is the ptid of the
408 followed inferior. */
409
410 static int
411 follow_fork_inferior (int follow_child, int detach_fork)
412 {
413 int has_vforked;
414 int parent_pid, child_pid;
415
416 has_vforked = (inferior_thread ()->pending_follow.kind
417 == TARGET_WAITKIND_VFORKED);
418 parent_pid = ptid_get_lwp (inferior_ptid);
419 if (parent_pid == 0)
420 parent_pid = ptid_get_pid (inferior_ptid);
421 child_pid
422 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
423
424 if (has_vforked
425 && !non_stop /* Non-stop always resumes both branches. */
426 && (!target_is_async_p () || sync_execution)
427 && !(follow_child || detach_fork || sched_multi))
428 {
429 /* The parent stays blocked inside the vfork syscall until the
430 child execs or exits. If we don't let the child run, then
431 the parent stays blocked. If we're telling the parent to run
432 in the foreground, the user will not be able to ctrl-c to get
433 back the terminal, effectively hanging the debug session. */
434 fprintf_filtered (gdb_stderr, _("\
435 Can not resume the parent process over vfork in the foreground while\n\
436 holding the child stopped. Try \"set detach-on-fork\" or \
437 \"set schedule-multiple\".\n"));
438 /* FIXME output string > 80 columns. */
439 return 1;
440 }
441
442 if (!follow_child)
443 {
444 /* Detach new forked process? */
445 if (detach_fork)
446 {
447 struct cleanup *old_chain;
448
449 /* Before detaching from the child, remove all breakpoints
450 from it. If we forked, then this has already been taken
451 care of by infrun.c. If we vforked however, any
452 breakpoint inserted in the parent is visible in the
453 child, even those added while stopped in a vfork
454 catchpoint. This will remove the breakpoints from the
455 parent also, but they'll be reinserted below. */
456 if (has_vforked)
457 {
458 /* Keep breakpoints list in sync. */
459 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
460 }
461
462 if (info_verbose || debug_infrun)
463 {
464 target_terminal_ours ();
465 fprintf_filtered (gdb_stdlog,
466 "Detaching after fork from "
467 "child process %d.\n",
468 child_pid);
469 }
470 }
471 else
472 {
473 struct inferior *parent_inf, *child_inf;
474 struct cleanup *old_chain;
475
476 /* Add process to GDB's tables. */
477 child_inf = add_inferior (child_pid);
478
479 parent_inf = current_inferior ();
480 child_inf->attach_flag = parent_inf->attach_flag;
481 copy_terminal_info (child_inf, parent_inf);
482 child_inf->gdbarch = parent_inf->gdbarch;
483 copy_inferior_target_desc_info (child_inf, parent_inf);
484
485 old_chain = save_inferior_ptid ();
486 save_current_program_space ();
487
488 inferior_ptid = ptid_build (child_pid, child_pid, 0);
489 add_thread (inferior_ptid);
490 child_inf->symfile_flags = SYMFILE_NO_READ;
491
492 /* If this is a vfork child, then the address-space is
493 shared with the parent. */
494 if (has_vforked)
495 {
496 child_inf->pspace = parent_inf->pspace;
497 child_inf->aspace = parent_inf->aspace;
498
499 /* The parent will be frozen until the child is done
500 with the shared region. Keep track of the
501 parent. */
502 child_inf->vfork_parent = parent_inf;
503 child_inf->pending_detach = 0;
504 parent_inf->vfork_child = child_inf;
505 parent_inf->pending_detach = 0;
506 }
507 else
508 {
509 child_inf->aspace = new_address_space ();
510 child_inf->pspace = add_program_space (child_inf->aspace);
511 child_inf->removable = 1;
512 set_current_program_space (child_inf->pspace);
513 clone_program_space (child_inf->pspace, parent_inf->pspace);
514
515 /* Let the shared library layer (e.g., solib-svr4) learn
516 about this new process, relocate the cloned exec, pull
517 in shared libraries, and install the solib event
518 breakpoint. If a "cloned-VM" event was propagated
519 better throughout the core, this wouldn't be
520 required. */
521 solib_create_inferior_hook (0);
522 }
523
524 do_cleanups (old_chain);
525 }
526
527 if (has_vforked)
528 {
529 struct inferior *parent_inf;
530
531 parent_inf = current_inferior ();
532
533 /* If we detached from the child, then we have to be careful
534 to not insert breakpoints in the parent until the child
535 is done with the shared memory region. However, if we're
536 staying attached to the child, then we can and should
537 insert breakpoints, so that we can debug it. A
538 subsequent child exec or exit is enough to know when does
539 the child stops using the parent's address space. */
540 parent_inf->waiting_for_vfork_done = detach_fork;
541 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
542 }
543 }
544 else
545 {
546 /* Follow the child. */
547 struct inferior *parent_inf, *child_inf;
548 struct program_space *parent_pspace;
549
550 if (info_verbose || debug_infrun)
551 {
552 target_terminal_ours ();
553 if (has_vforked)
554 fprintf_filtered (gdb_stdlog,
555 _("Attaching after process %d "
556 "vfork to child process %d.\n"),
557 parent_pid, child_pid);
558 else
559 fprintf_filtered (gdb_stdlog,
560 _("Attaching after process %d "
561 "fork to child process %d.\n"),
562 parent_pid, child_pid);
563 }
564
565 /* Add the new inferior first, so that the target_detach below
566 doesn't unpush the target. */
567
568 child_inf = add_inferior (child_pid);
569
570 parent_inf = current_inferior ();
571 child_inf->attach_flag = parent_inf->attach_flag;
572 copy_terminal_info (child_inf, parent_inf);
573 child_inf->gdbarch = parent_inf->gdbarch;
574 copy_inferior_target_desc_info (child_inf, parent_inf);
575
576 parent_pspace = parent_inf->pspace;
577
578 /* If we're vforking, we want to hold on to the parent until the
579 child exits or execs. At child exec or exit time we can
580 remove the old breakpoints from the parent and detach or
581 resume debugging it. Otherwise, detach the parent now; we'll
582 want to reuse it's program/address spaces, but we can't set
583 them to the child before removing breakpoints from the
584 parent, otherwise, the breakpoints module could decide to
585 remove breakpoints from the wrong process (since they'd be
586 assigned to the same address space). */
587
588 if (has_vforked)
589 {
590 gdb_assert (child_inf->vfork_parent == NULL);
591 gdb_assert (parent_inf->vfork_child == NULL);
592 child_inf->vfork_parent = parent_inf;
593 child_inf->pending_detach = 0;
594 parent_inf->vfork_child = child_inf;
595 parent_inf->pending_detach = detach_fork;
596 parent_inf->waiting_for_vfork_done = 0;
597 }
598 else if (detach_fork)
599 target_detach (NULL, 0);
600
601 /* Note that the detach above makes PARENT_INF dangling. */
602
603 /* Add the child thread to the appropriate lists, and switch to
604 this new thread, before cloning the program space, and
605 informing the solib layer about this new process. */
606
607 inferior_ptid = ptid_build (child_pid, child_pid, 0);
608 add_thread (inferior_ptid);
609
610 /* If this is a vfork child, then the address-space is shared
611 with the parent. If we detached from the parent, then we can
612 reuse the parent's program/address spaces. */
613 if (has_vforked || detach_fork)
614 {
615 child_inf->pspace = parent_pspace;
616 child_inf->aspace = child_inf->pspace->aspace;
617 }
618 else
619 {
620 child_inf->aspace = new_address_space ();
621 child_inf->pspace = add_program_space (child_inf->aspace);
622 child_inf->removable = 1;
623 child_inf->symfile_flags = SYMFILE_NO_READ;
624 set_current_program_space (child_inf->pspace);
625 clone_program_space (child_inf->pspace, parent_pspace);
626
627 /* Let the shared library layer (e.g., solib-svr4) learn
628 about this new process, relocate the cloned exec, pull in
629 shared libraries, and install the solib event breakpoint.
630 If a "cloned-VM" event was propagated better throughout
631 the core, this wouldn't be required. */
632 solib_create_inferior_hook (0);
633 }
634 }
635
636 return target_follow_fork (follow_child, detach_fork);
637 }
638
639 /* Tell the target to follow the fork we're stopped at. Returns true
640 if the inferior should be resumed; false, if the target for some
641 reason decided it's best not to resume. */
642
643 static int
644 follow_fork (void)
645 {
646 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
647 int should_resume = 1;
648 struct thread_info *tp;
649
650 /* Copy user stepping state to the new inferior thread. FIXME: the
651 followed fork child thread should have a copy of most of the
652 parent thread structure's run control related fields, not just these.
653 Initialized to avoid "may be used uninitialized" warnings from gcc. */
654 struct breakpoint *step_resume_breakpoint = NULL;
655 struct breakpoint *exception_resume_breakpoint = NULL;
656 CORE_ADDR step_range_start = 0;
657 CORE_ADDR step_range_end = 0;
658 struct frame_id step_frame_id = { 0 };
659 struct interp *command_interp = NULL;
660
661 if (!non_stop)
662 {
663 ptid_t wait_ptid;
664 struct target_waitstatus wait_status;
665
666 /* Get the last target status returned by target_wait(). */
667 get_last_target_status (&wait_ptid, &wait_status);
668
669 /* If not stopped at a fork event, then there's nothing else to
670 do. */
671 if (wait_status.kind != TARGET_WAITKIND_FORKED
672 && wait_status.kind != TARGET_WAITKIND_VFORKED)
673 return 1;
674
675 /* Check if we switched over from WAIT_PTID, since the event was
676 reported. */
677 if (!ptid_equal (wait_ptid, minus_one_ptid)
678 && !ptid_equal (inferior_ptid, wait_ptid))
679 {
680 /* We did. Switch back to WAIT_PTID thread, to tell the
681 target to follow it (in either direction). We'll
682 afterwards refuse to resume, and inform the user what
683 happened. */
684 switch_to_thread (wait_ptid);
685 should_resume = 0;
686 }
687 }
688
689 tp = inferior_thread ();
690
691 /* If there were any forks/vforks that were caught and are now to be
692 followed, then do so now. */
693 switch (tp->pending_follow.kind)
694 {
695 case TARGET_WAITKIND_FORKED:
696 case TARGET_WAITKIND_VFORKED:
697 {
698 ptid_t parent, child;
699
700 /* If the user did a next/step, etc, over a fork call,
701 preserve the stepping state in the fork child. */
702 if (follow_child && should_resume)
703 {
704 step_resume_breakpoint = clone_momentary_breakpoint
705 (tp->control.step_resume_breakpoint);
706 step_range_start = tp->control.step_range_start;
707 step_range_end = tp->control.step_range_end;
708 step_frame_id = tp->control.step_frame_id;
709 exception_resume_breakpoint
710 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
711 command_interp = tp->control.command_interp;
712
713 /* For now, delete the parent's sr breakpoint, otherwise,
714 parent/child sr breakpoints are considered duplicates,
715 and the child version will not be installed. Remove
716 this when the breakpoints module becomes aware of
717 inferiors and address spaces. */
718 delete_step_resume_breakpoint (tp);
719 tp->control.step_range_start = 0;
720 tp->control.step_range_end = 0;
721 tp->control.step_frame_id = null_frame_id;
722 delete_exception_resume_breakpoint (tp);
723 tp->control.command_interp = NULL;
724 }
725
726 parent = inferior_ptid;
727 child = tp->pending_follow.value.related_pid;
728
729 /* Set up inferior(s) as specified by the caller, and tell the
730 target to do whatever is necessary to follow either parent
731 or child. */
732 if (follow_fork_inferior (follow_child, detach_fork))
733 {
734 /* Target refused to follow, or there's some other reason
735 we shouldn't resume. */
736 should_resume = 0;
737 }
738 else
739 {
740 /* This pending follow fork event is now handled, one way
741 or another. The previous selected thread may be gone
742 from the lists by now, but if it is still around, need
743 to clear the pending follow request. */
744 tp = find_thread_ptid (parent);
745 if (tp)
746 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
747
748 /* This makes sure we don't try to apply the "Switched
749 over from WAIT_PID" logic above. */
750 nullify_last_target_wait_ptid ();
751
752 /* If we followed the child, switch to it... */
753 if (follow_child)
754 {
755 switch_to_thread (child);
756
757 /* ... and preserve the stepping state, in case the
758 user was stepping over the fork call. */
759 if (should_resume)
760 {
761 tp = inferior_thread ();
762 tp->control.step_resume_breakpoint
763 = step_resume_breakpoint;
764 tp->control.step_range_start = step_range_start;
765 tp->control.step_range_end = step_range_end;
766 tp->control.step_frame_id = step_frame_id;
767 tp->control.exception_resume_breakpoint
768 = exception_resume_breakpoint;
769 tp->control.command_interp = command_interp;
770 }
771 else
772 {
773 /* If we get here, it was because we're trying to
774 resume from a fork catchpoint, but, the user
775 has switched threads away from the thread that
776 forked. In that case, the resume command
777 issued is most likely not applicable to the
778 child, so just warn, and refuse to resume. */
779 warning (_("Not resuming: switched threads "
780 "before following fork child.\n"));
781 }
782
783 /* Reset breakpoints in the child as appropriate. */
784 follow_inferior_reset_breakpoints ();
785 }
786 else
787 switch_to_thread (parent);
788 }
789 }
790 break;
791 case TARGET_WAITKIND_SPURIOUS:
792 /* Nothing to follow. */
793 break;
794 default:
795 internal_error (__FILE__, __LINE__,
796 "Unexpected pending_follow.kind %d\n",
797 tp->pending_follow.kind);
798 break;
799 }
800
801 return should_resume;
802 }
803
804 static void
805 follow_inferior_reset_breakpoints (void)
806 {
807 struct thread_info *tp = inferior_thread ();
808
809 /* Was there a step_resume breakpoint? (There was if the user
810 did a "next" at the fork() call.) If so, explicitly reset its
811 thread number. Cloned step_resume breakpoints are disabled on
812 creation, so enable it here now that it is associated with the
813 correct thread.
814
815 step_resumes are a form of bp that are made to be per-thread.
816 Since we created the step_resume bp when the parent process
817 was being debugged, and now are switching to the child process,
818 from the breakpoint package's viewpoint, that's a switch of
819 "threads". We must update the bp's notion of which thread
820 it is for, or it'll be ignored when it triggers. */
821
822 if (tp->control.step_resume_breakpoint)
823 {
824 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
825 tp->control.step_resume_breakpoint->loc->enabled = 1;
826 }
827
828 /* Treat exception_resume breakpoints like step_resume breakpoints. */
829 if (tp->control.exception_resume_breakpoint)
830 {
831 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
832 tp->control.exception_resume_breakpoint->loc->enabled = 1;
833 }
834
835 /* Reinsert all breakpoints in the child. The user may have set
836 breakpoints after catching the fork, in which case those
837 were never set in the child, but only in the parent. This makes
838 sure the inserted breakpoints match the breakpoint list. */
839
840 breakpoint_re_set ();
841 insert_breakpoints ();
842 }
843
844 /* The child has exited or execed: resume threads of the parent the
845 user wanted to be executing. */
846
847 static int
848 proceed_after_vfork_done (struct thread_info *thread,
849 void *arg)
850 {
851 int pid = * (int *) arg;
852
853 if (ptid_get_pid (thread->ptid) == pid
854 && is_running (thread->ptid)
855 && !is_executing (thread->ptid)
856 && !thread->stop_requested
857 && thread->suspend.stop_signal == GDB_SIGNAL_0)
858 {
859 if (debug_infrun)
860 fprintf_unfiltered (gdb_stdlog,
861 "infrun: resuming vfork parent thread %s\n",
862 target_pid_to_str (thread->ptid));
863
864 switch_to_thread (thread->ptid);
865 clear_proceed_status (0);
866 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
867 }
868
869 return 0;
870 }
871
872 /* Called whenever we notice an exec or exit event, to handle
873 detaching or resuming a vfork parent. */
874
875 static void
876 handle_vfork_child_exec_or_exit (int exec)
877 {
878 struct inferior *inf = current_inferior ();
879
880 if (inf->vfork_parent)
881 {
882 int resume_parent = -1;
883
884 /* This exec or exit marks the end of the shared memory region
885 between the parent and the child. If the user wanted to
886 detach from the parent, now is the time. */
887
888 if (inf->vfork_parent->pending_detach)
889 {
890 struct thread_info *tp;
891 struct cleanup *old_chain;
892 struct program_space *pspace;
893 struct address_space *aspace;
894
895 /* follow-fork child, detach-on-fork on. */
896
897 inf->vfork_parent->pending_detach = 0;
898
899 if (!exec)
900 {
901 /* If we're handling a child exit, then inferior_ptid
902 points at the inferior's pid, not to a thread. */
903 old_chain = save_inferior_ptid ();
904 save_current_program_space ();
905 save_current_inferior ();
906 }
907 else
908 old_chain = save_current_space_and_thread ();
909
910 /* We're letting loose of the parent. */
911 tp = any_live_thread_of_process (inf->vfork_parent->pid);
912 switch_to_thread (tp->ptid);
913
914 /* We're about to detach from the parent, which implicitly
915 removes breakpoints from its address space. There's a
916 catch here: we want to reuse the spaces for the child,
917 but, parent/child are still sharing the pspace at this
918 point, although the exec in reality makes the kernel give
919 the child a fresh set of new pages. The problem here is
920 that the breakpoints module being unaware of this, would
921 likely chose the child process to write to the parent
922 address space. Swapping the child temporarily away from
923 the spaces has the desired effect. Yes, this is "sort
924 of" a hack. */
925
926 pspace = inf->pspace;
927 aspace = inf->aspace;
928 inf->aspace = NULL;
929 inf->pspace = NULL;
930
931 if (debug_infrun || info_verbose)
932 {
933 target_terminal_ours ();
934
935 if (exec)
936 fprintf_filtered (gdb_stdlog,
937 "Detaching vfork parent process "
938 "%d after child exec.\n",
939 inf->vfork_parent->pid);
940 else
941 fprintf_filtered (gdb_stdlog,
942 "Detaching vfork parent process "
943 "%d after child exit.\n",
944 inf->vfork_parent->pid);
945 }
946
947 target_detach (NULL, 0);
948
949 /* Put it back. */
950 inf->pspace = pspace;
951 inf->aspace = aspace;
952
953 do_cleanups (old_chain);
954 }
955 else if (exec)
956 {
957 /* We're staying attached to the parent, so, really give the
958 child a new address space. */
959 inf->pspace = add_program_space (maybe_new_address_space ());
960 inf->aspace = inf->pspace->aspace;
961 inf->removable = 1;
962 set_current_program_space (inf->pspace);
963
964 resume_parent = inf->vfork_parent->pid;
965
966 /* Break the bonds. */
967 inf->vfork_parent->vfork_child = NULL;
968 }
969 else
970 {
971 struct cleanup *old_chain;
972 struct program_space *pspace;
973
974 /* If this is a vfork child exiting, then the pspace and
975 aspaces were shared with the parent. Since we're
976 reporting the process exit, we'll be mourning all that is
977 found in the address space, and switching to null_ptid,
978 preparing to start a new inferior. But, since we don't
979 want to clobber the parent's address/program spaces, we
980 go ahead and create a new one for this exiting
981 inferior. */
982
983 /* Switch to null_ptid, so that clone_program_space doesn't want
984 to read the selected frame of a dead process. */
985 old_chain = save_inferior_ptid ();
986 inferior_ptid = null_ptid;
987
988 /* This inferior is dead, so avoid giving the breakpoints
989 module the option to write through to it (cloning a
990 program space resets breakpoints). */
991 inf->aspace = NULL;
992 inf->pspace = NULL;
993 pspace = add_program_space (maybe_new_address_space ());
994 set_current_program_space (pspace);
995 inf->removable = 1;
996 inf->symfile_flags = SYMFILE_NO_READ;
997 clone_program_space (pspace, inf->vfork_parent->pspace);
998 inf->pspace = pspace;
999 inf->aspace = pspace->aspace;
1000
1001 /* Put back inferior_ptid. We'll continue mourning this
1002 inferior. */
1003 do_cleanups (old_chain);
1004
1005 resume_parent = inf->vfork_parent->pid;
1006 /* Break the bonds. */
1007 inf->vfork_parent->vfork_child = NULL;
1008 }
1009
1010 inf->vfork_parent = NULL;
1011
1012 gdb_assert (current_program_space == inf->pspace);
1013
1014 if (non_stop && resume_parent != -1)
1015 {
1016 /* If the user wanted the parent to be running, let it go
1017 free now. */
1018 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1019
1020 if (debug_infrun)
1021 fprintf_unfiltered (gdb_stdlog,
1022 "infrun: resuming vfork parent process %d\n",
1023 resume_parent);
1024
1025 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1026
1027 do_cleanups (old_chain);
1028 }
1029 }
1030 }
1031
1032 /* Enum strings for "set|show follow-exec-mode". */
1033
1034 static const char follow_exec_mode_new[] = "new";
1035 static const char follow_exec_mode_same[] = "same";
1036 static const char *const follow_exec_mode_names[] =
1037 {
1038 follow_exec_mode_new,
1039 follow_exec_mode_same,
1040 NULL,
1041 };
1042
1043 static const char *follow_exec_mode_string = follow_exec_mode_same;
1044 static void
1045 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1046 struct cmd_list_element *c, const char *value)
1047 {
1048 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1049 }
1050
1051 /* EXECD_PATHNAME is assumed to be non-NULL. */
1052
1053 static void
1054 follow_exec (ptid_t pid, char *execd_pathname)
1055 {
1056 struct thread_info *th = inferior_thread ();
1057 struct inferior *inf = current_inferior ();
1058
1059 /* This is an exec event that we actually wish to pay attention to.
1060 Refresh our symbol table to the newly exec'd program, remove any
1061 momentary bp's, etc.
1062
1063 If there are breakpoints, they aren't really inserted now,
1064 since the exec() transformed our inferior into a fresh set
1065 of instructions.
1066
1067 We want to preserve symbolic breakpoints on the list, since
1068 we have hopes that they can be reset after the new a.out's
1069 symbol table is read.
1070
1071 However, any "raw" breakpoints must be removed from the list
1072 (e.g., the solib bp's), since their address is probably invalid
1073 now.
1074
1075 And, we DON'T want to call delete_breakpoints() here, since
1076 that may write the bp's "shadow contents" (the instruction
1077 value that was overwritten witha TRAP instruction). Since
1078 we now have a new a.out, those shadow contents aren't valid. */
1079
1080 mark_breakpoints_out ();
1081
1082 update_breakpoints_after_exec ();
1083
1084 /* If there was one, it's gone now. We cannot truly step-to-next
1085 statement through an exec(). */
1086 th->control.step_resume_breakpoint = NULL;
1087 th->control.exception_resume_breakpoint = NULL;
1088 th->control.step_range_start = 0;
1089 th->control.step_range_end = 0;
1090
1091 /* The target reports the exec event to the main thread, even if
1092 some other thread does the exec, and even if the main thread was
1093 already stopped --- if debugging in non-stop mode, it's possible
1094 the user had the main thread held stopped in the previous image
1095 --- release it now. This is the same behavior as step-over-exec
1096 with scheduler-locking on in all-stop mode. */
1097 th->stop_requested = 0;
1098
1099 /* What is this a.out's name? */
1100 printf_unfiltered (_("%s is executing new program: %s\n"),
1101 target_pid_to_str (inferior_ptid),
1102 execd_pathname);
1103
1104 /* We've followed the inferior through an exec. Therefore, the
1105 inferior has essentially been killed & reborn. */
1106
1107 gdb_flush (gdb_stdout);
1108
1109 breakpoint_init_inferior (inf_execd);
1110
1111 if (gdb_sysroot && *gdb_sysroot)
1112 {
1113 char *name = alloca (strlen (gdb_sysroot)
1114 + strlen (execd_pathname)
1115 + 1);
1116
1117 strcpy (name, gdb_sysroot);
1118 strcat (name, execd_pathname);
1119 execd_pathname = name;
1120 }
1121
1122 /* Reset the shared library package. This ensures that we get a
1123 shlib event when the child reaches "_start", at which point the
1124 dld will have had a chance to initialize the child. */
1125 /* Also, loading a symbol file below may trigger symbol lookups, and
1126 we don't want those to be satisfied by the libraries of the
1127 previous incarnation of this process. */
1128 no_shared_libraries (NULL, 0);
1129
1130 if (follow_exec_mode_string == follow_exec_mode_new)
1131 {
1132 struct program_space *pspace;
1133
1134 /* The user wants to keep the old inferior and program spaces
1135 around. Create a new fresh one, and switch to it. */
1136
1137 inf = add_inferior (current_inferior ()->pid);
1138 pspace = add_program_space (maybe_new_address_space ());
1139 inf->pspace = pspace;
1140 inf->aspace = pspace->aspace;
1141
1142 exit_inferior_num_silent (current_inferior ()->num);
1143
1144 set_current_inferior (inf);
1145 set_current_program_space (pspace);
1146 }
1147 else
1148 {
1149 /* The old description may no longer be fit for the new image.
1150 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1151 old description; we'll read a new one below. No need to do
1152 this on "follow-exec-mode new", as the old inferior stays
1153 around (its description is later cleared/refetched on
1154 restart). */
1155 target_clear_description ();
1156 }
1157
1158 gdb_assert (current_program_space == inf->pspace);
1159
1160 /* That a.out is now the one to use. */
1161 exec_file_attach (execd_pathname, 0);
1162
1163 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1164 (Position Independent Executable) main symbol file will get applied by
1165 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1166 the breakpoints with the zero displacement. */
1167
1168 symbol_file_add (execd_pathname,
1169 (inf->symfile_flags
1170 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1171 NULL, 0);
1172
1173 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1174 set_initial_language ();
1175
1176 /* If the target can specify a description, read it. Must do this
1177 after flipping to the new executable (because the target supplied
1178 description must be compatible with the executable's
1179 architecture, and the old executable may e.g., be 32-bit, while
1180 the new one 64-bit), and before anything involving memory or
1181 registers. */
1182 target_find_description ();
1183
1184 solib_create_inferior_hook (0);
1185
1186 jit_inferior_created_hook ();
1187
1188 breakpoint_re_set ();
1189
1190 /* Reinsert all breakpoints. (Those which were symbolic have
1191 been reset to the proper address in the new a.out, thanks
1192 to symbol_file_command...). */
1193 insert_breakpoints ();
1194
1195 /* The next resume of this inferior should bring it to the shlib
1196 startup breakpoints. (If the user had also set bp's on
1197 "main" from the old (parent) process, then they'll auto-
1198 matically get reset there in the new process.). */
1199 }
1200
1201 /* Non-zero if we just simulating a single-step. This is needed
1202 because we cannot remove the breakpoints in the inferior process
1203 until after the `wait' in `wait_for_inferior'. */
1204 static int singlestep_breakpoints_inserted_p = 0;
1205
1206 /* The thread we inserted single-step breakpoints for. */
1207 static ptid_t singlestep_ptid;
1208
1209 /* PC when we started this single-step. */
1210 static CORE_ADDR singlestep_pc;
1211
1212 /* Info about an instruction that is being stepped over. Invalid if
1213 ASPACE is NULL. */
1214
1215 struct step_over_info
1216 {
1217 /* The instruction's address space. */
1218 struct address_space *aspace;
1219
1220 /* The instruction's address. */
1221 CORE_ADDR address;
1222 };
1223
1224 /* The step-over info of the location that is being stepped over.
1225
1226 Note that with async/breakpoint always-inserted mode, a user might
1227 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1228 being stepped over. As setting a new breakpoint inserts all
1229 breakpoints, we need to make sure the breakpoint being stepped over
1230 isn't inserted then. We do that by only clearing the step-over
1231 info when the step-over is actually finished (or aborted).
1232
1233 Presently GDB can only step over one breakpoint at any given time.
1234 Given threads that can't run code in the same address space as the
1235 breakpoint's can't really miss the breakpoint, GDB could be taught
1236 to step-over at most one breakpoint per address space (so this info
1237 could move to the address space object if/when GDB is extended).
1238 The set of breakpoints being stepped over will normally be much
1239 smaller than the set of all breakpoints, so a flag in the
1240 breakpoint location structure would be wasteful. A separate list
1241 also saves complexity and run-time, as otherwise we'd have to go
1242 through all breakpoint locations clearing their flag whenever we
1243 start a new sequence. Similar considerations weigh against storing
1244 this info in the thread object. Plus, not all step overs actually
1245 have breakpoint locations -- e.g., stepping past a single-step
1246 breakpoint, or stepping to complete a non-continuable
1247 watchpoint. */
1248 static struct step_over_info step_over_info;
1249
1250 /* Record the address of the breakpoint/instruction we're currently
1251 stepping over. */
1252
1253 static void
1254 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1255 {
1256 step_over_info.aspace = aspace;
1257 step_over_info.address = address;
1258 }
1259
1260 /* Called when we're not longer stepping over a breakpoint / an
1261 instruction, so all breakpoints are free to be (re)inserted. */
1262
1263 static void
1264 clear_step_over_info (void)
1265 {
1266 step_over_info.aspace = NULL;
1267 step_over_info.address = 0;
1268 }
1269
1270 /* See infrun.h. */
1271
1272 int
1273 stepping_past_instruction_at (struct address_space *aspace,
1274 CORE_ADDR address)
1275 {
1276 return (step_over_info.aspace != NULL
1277 && breakpoint_address_match (aspace, address,
1278 step_over_info.aspace,
1279 step_over_info.address));
1280 }
1281
1282 \f
1283 /* Displaced stepping. */
1284
1285 /* In non-stop debugging mode, we must take special care to manage
1286 breakpoints properly; in particular, the traditional strategy for
1287 stepping a thread past a breakpoint it has hit is unsuitable.
1288 'Displaced stepping' is a tactic for stepping one thread past a
1289 breakpoint it has hit while ensuring that other threads running
1290 concurrently will hit the breakpoint as they should.
1291
1292 The traditional way to step a thread T off a breakpoint in a
1293 multi-threaded program in all-stop mode is as follows:
1294
1295 a0) Initially, all threads are stopped, and breakpoints are not
1296 inserted.
1297 a1) We single-step T, leaving breakpoints uninserted.
1298 a2) We insert breakpoints, and resume all threads.
1299
1300 In non-stop debugging, however, this strategy is unsuitable: we
1301 don't want to have to stop all threads in the system in order to
1302 continue or step T past a breakpoint. Instead, we use displaced
1303 stepping:
1304
1305 n0) Initially, T is stopped, other threads are running, and
1306 breakpoints are inserted.
1307 n1) We copy the instruction "under" the breakpoint to a separate
1308 location, outside the main code stream, making any adjustments
1309 to the instruction, register, and memory state as directed by
1310 T's architecture.
1311 n2) We single-step T over the instruction at its new location.
1312 n3) We adjust the resulting register and memory state as directed
1313 by T's architecture. This includes resetting T's PC to point
1314 back into the main instruction stream.
1315 n4) We resume T.
1316
1317 This approach depends on the following gdbarch methods:
1318
1319 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1320 indicate where to copy the instruction, and how much space must
1321 be reserved there. We use these in step n1.
1322
1323 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1324 address, and makes any necessary adjustments to the instruction,
1325 register contents, and memory. We use this in step n1.
1326
1327 - gdbarch_displaced_step_fixup adjusts registers and memory after
1328 we have successfuly single-stepped the instruction, to yield the
1329 same effect the instruction would have had if we had executed it
1330 at its original address. We use this in step n3.
1331
1332 - gdbarch_displaced_step_free_closure provides cleanup.
1333
1334 The gdbarch_displaced_step_copy_insn and
1335 gdbarch_displaced_step_fixup functions must be written so that
1336 copying an instruction with gdbarch_displaced_step_copy_insn,
1337 single-stepping across the copied instruction, and then applying
1338 gdbarch_displaced_insn_fixup should have the same effects on the
1339 thread's memory and registers as stepping the instruction in place
1340 would have. Exactly which responsibilities fall to the copy and
1341 which fall to the fixup is up to the author of those functions.
1342
1343 See the comments in gdbarch.sh for details.
1344
1345 Note that displaced stepping and software single-step cannot
1346 currently be used in combination, although with some care I think
1347 they could be made to. Software single-step works by placing
1348 breakpoints on all possible subsequent instructions; if the
1349 displaced instruction is a PC-relative jump, those breakpoints
1350 could fall in very strange places --- on pages that aren't
1351 executable, or at addresses that are not proper instruction
1352 boundaries. (We do generally let other threads run while we wait
1353 to hit the software single-step breakpoint, and they might
1354 encounter such a corrupted instruction.) One way to work around
1355 this would be to have gdbarch_displaced_step_copy_insn fully
1356 simulate the effect of PC-relative instructions (and return NULL)
1357 on architectures that use software single-stepping.
1358
1359 In non-stop mode, we can have independent and simultaneous step
1360 requests, so more than one thread may need to simultaneously step
1361 over a breakpoint. The current implementation assumes there is
1362 only one scratch space per process. In this case, we have to
1363 serialize access to the scratch space. If thread A wants to step
1364 over a breakpoint, but we are currently waiting for some other
1365 thread to complete a displaced step, we leave thread A stopped and
1366 place it in the displaced_step_request_queue. Whenever a displaced
1367 step finishes, we pick the next thread in the queue and start a new
1368 displaced step operation on it. See displaced_step_prepare and
1369 displaced_step_fixup for details. */
1370
1371 struct displaced_step_request
1372 {
1373 ptid_t ptid;
1374 struct displaced_step_request *next;
1375 };
1376
1377 /* Per-inferior displaced stepping state. */
1378 struct displaced_step_inferior_state
1379 {
1380 /* Pointer to next in linked list. */
1381 struct displaced_step_inferior_state *next;
1382
1383 /* The process this displaced step state refers to. */
1384 int pid;
1385
1386 /* A queue of pending displaced stepping requests. One entry per
1387 thread that needs to do a displaced step. */
1388 struct displaced_step_request *step_request_queue;
1389
1390 /* If this is not null_ptid, this is the thread carrying out a
1391 displaced single-step in process PID. This thread's state will
1392 require fixing up once it has completed its step. */
1393 ptid_t step_ptid;
1394
1395 /* The architecture the thread had when we stepped it. */
1396 struct gdbarch *step_gdbarch;
1397
1398 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1399 for post-step cleanup. */
1400 struct displaced_step_closure *step_closure;
1401
1402 /* The address of the original instruction, and the copy we
1403 made. */
1404 CORE_ADDR step_original, step_copy;
1405
1406 /* Saved contents of copy area. */
1407 gdb_byte *step_saved_copy;
1408 };
1409
1410 /* The list of states of processes involved in displaced stepping
1411 presently. */
1412 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1413
1414 /* Get the displaced stepping state of process PID. */
1415
1416 static struct displaced_step_inferior_state *
1417 get_displaced_stepping_state (int pid)
1418 {
1419 struct displaced_step_inferior_state *state;
1420
1421 for (state = displaced_step_inferior_states;
1422 state != NULL;
1423 state = state->next)
1424 if (state->pid == pid)
1425 return state;
1426
1427 return NULL;
1428 }
1429
1430 /* Add a new displaced stepping state for process PID to the displaced
1431 stepping state list, or return a pointer to an already existing
1432 entry, if it already exists. Never returns NULL. */
1433
1434 static struct displaced_step_inferior_state *
1435 add_displaced_stepping_state (int pid)
1436 {
1437 struct displaced_step_inferior_state *state;
1438
1439 for (state = displaced_step_inferior_states;
1440 state != NULL;
1441 state = state->next)
1442 if (state->pid == pid)
1443 return state;
1444
1445 state = xcalloc (1, sizeof (*state));
1446 state->pid = pid;
1447 state->next = displaced_step_inferior_states;
1448 displaced_step_inferior_states = state;
1449
1450 return state;
1451 }
1452
1453 /* If inferior is in displaced stepping, and ADDR equals to starting address
1454 of copy area, return corresponding displaced_step_closure. Otherwise,
1455 return NULL. */
1456
1457 struct displaced_step_closure*
1458 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1459 {
1460 struct displaced_step_inferior_state *displaced
1461 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1462
1463 /* If checking the mode of displaced instruction in copy area. */
1464 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1465 && (displaced->step_copy == addr))
1466 return displaced->step_closure;
1467
1468 return NULL;
1469 }
1470
1471 /* Remove the displaced stepping state of process PID. */
1472
1473 static void
1474 remove_displaced_stepping_state (int pid)
1475 {
1476 struct displaced_step_inferior_state *it, **prev_next_p;
1477
1478 gdb_assert (pid != 0);
1479
1480 it = displaced_step_inferior_states;
1481 prev_next_p = &displaced_step_inferior_states;
1482 while (it)
1483 {
1484 if (it->pid == pid)
1485 {
1486 *prev_next_p = it->next;
1487 xfree (it);
1488 return;
1489 }
1490
1491 prev_next_p = &it->next;
1492 it = *prev_next_p;
1493 }
1494 }
1495
1496 static void
1497 infrun_inferior_exit (struct inferior *inf)
1498 {
1499 remove_displaced_stepping_state (inf->pid);
1500 }
1501
1502 /* If ON, and the architecture supports it, GDB will use displaced
1503 stepping to step over breakpoints. If OFF, or if the architecture
1504 doesn't support it, GDB will instead use the traditional
1505 hold-and-step approach. If AUTO (which is the default), GDB will
1506 decide which technique to use to step over breakpoints depending on
1507 which of all-stop or non-stop mode is active --- displaced stepping
1508 in non-stop mode; hold-and-step in all-stop mode. */
1509
1510 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1511
1512 static void
1513 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1514 struct cmd_list_element *c,
1515 const char *value)
1516 {
1517 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1518 fprintf_filtered (file,
1519 _("Debugger's willingness to use displaced stepping "
1520 "to step over breakpoints is %s (currently %s).\n"),
1521 value, non_stop ? "on" : "off");
1522 else
1523 fprintf_filtered (file,
1524 _("Debugger's willingness to use displaced stepping "
1525 "to step over breakpoints is %s.\n"), value);
1526 }
1527
1528 /* Return non-zero if displaced stepping can/should be used to step
1529 over breakpoints. */
1530
1531 static int
1532 use_displaced_stepping (struct gdbarch *gdbarch)
1533 {
1534 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1535 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1536 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1537 && find_record_target () == NULL);
1538 }
1539
1540 /* Clean out any stray displaced stepping state. */
1541 static void
1542 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1543 {
1544 /* Indicate that there is no cleanup pending. */
1545 displaced->step_ptid = null_ptid;
1546
1547 if (displaced->step_closure)
1548 {
1549 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1550 displaced->step_closure);
1551 displaced->step_closure = NULL;
1552 }
1553 }
1554
1555 static void
1556 displaced_step_clear_cleanup (void *arg)
1557 {
1558 struct displaced_step_inferior_state *state = arg;
1559
1560 displaced_step_clear (state);
1561 }
1562
1563 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1564 void
1565 displaced_step_dump_bytes (struct ui_file *file,
1566 const gdb_byte *buf,
1567 size_t len)
1568 {
1569 int i;
1570
1571 for (i = 0; i < len; i++)
1572 fprintf_unfiltered (file, "%02x ", buf[i]);
1573 fputs_unfiltered ("\n", file);
1574 }
1575
1576 /* Prepare to single-step, using displaced stepping.
1577
1578 Note that we cannot use displaced stepping when we have a signal to
1579 deliver. If we have a signal to deliver and an instruction to step
1580 over, then after the step, there will be no indication from the
1581 target whether the thread entered a signal handler or ignored the
1582 signal and stepped over the instruction successfully --- both cases
1583 result in a simple SIGTRAP. In the first case we mustn't do a
1584 fixup, and in the second case we must --- but we can't tell which.
1585 Comments in the code for 'random signals' in handle_inferior_event
1586 explain how we handle this case instead.
1587
1588 Returns 1 if preparing was successful -- this thread is going to be
1589 stepped now; or 0 if displaced stepping this thread got queued. */
1590 static int
1591 displaced_step_prepare (ptid_t ptid)
1592 {
1593 struct cleanup *old_cleanups, *ignore_cleanups;
1594 struct thread_info *tp = find_thread_ptid (ptid);
1595 struct regcache *regcache = get_thread_regcache (ptid);
1596 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1597 CORE_ADDR original, copy;
1598 ULONGEST len;
1599 struct displaced_step_closure *closure;
1600 struct displaced_step_inferior_state *displaced;
1601 int status;
1602
1603 /* We should never reach this function if the architecture does not
1604 support displaced stepping. */
1605 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1606
1607 /* Disable range stepping while executing in the scratch pad. We
1608 want a single-step even if executing the displaced instruction in
1609 the scratch buffer lands within the stepping range (e.g., a
1610 jump/branch). */
1611 tp->control.may_range_step = 0;
1612
1613 /* We have to displaced step one thread at a time, as we only have
1614 access to a single scratch space per inferior. */
1615
1616 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1617
1618 if (!ptid_equal (displaced->step_ptid, null_ptid))
1619 {
1620 /* Already waiting for a displaced step to finish. Defer this
1621 request and place in queue. */
1622 struct displaced_step_request *req, *new_req;
1623
1624 if (debug_displaced)
1625 fprintf_unfiltered (gdb_stdlog,
1626 "displaced: defering step of %s\n",
1627 target_pid_to_str (ptid));
1628
1629 new_req = xmalloc (sizeof (*new_req));
1630 new_req->ptid = ptid;
1631 new_req->next = NULL;
1632
1633 if (displaced->step_request_queue)
1634 {
1635 for (req = displaced->step_request_queue;
1636 req && req->next;
1637 req = req->next)
1638 ;
1639 req->next = new_req;
1640 }
1641 else
1642 displaced->step_request_queue = new_req;
1643
1644 return 0;
1645 }
1646 else
1647 {
1648 if (debug_displaced)
1649 fprintf_unfiltered (gdb_stdlog,
1650 "displaced: stepping %s now\n",
1651 target_pid_to_str (ptid));
1652 }
1653
1654 displaced_step_clear (displaced);
1655
1656 old_cleanups = save_inferior_ptid ();
1657 inferior_ptid = ptid;
1658
1659 original = regcache_read_pc (regcache);
1660
1661 copy = gdbarch_displaced_step_location (gdbarch);
1662 len = gdbarch_max_insn_length (gdbarch);
1663
1664 /* Save the original contents of the copy area. */
1665 displaced->step_saved_copy = xmalloc (len);
1666 ignore_cleanups = make_cleanup (free_current_contents,
1667 &displaced->step_saved_copy);
1668 status = target_read_memory (copy, displaced->step_saved_copy, len);
1669 if (status != 0)
1670 throw_error (MEMORY_ERROR,
1671 _("Error accessing memory address %s (%s) for "
1672 "displaced-stepping scratch space."),
1673 paddress (gdbarch, copy), safe_strerror (status));
1674 if (debug_displaced)
1675 {
1676 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1677 paddress (gdbarch, copy));
1678 displaced_step_dump_bytes (gdb_stdlog,
1679 displaced->step_saved_copy,
1680 len);
1681 };
1682
1683 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1684 original, copy, regcache);
1685
1686 /* We don't support the fully-simulated case at present. */
1687 gdb_assert (closure);
1688
1689 /* Save the information we need to fix things up if the step
1690 succeeds. */
1691 displaced->step_ptid = ptid;
1692 displaced->step_gdbarch = gdbarch;
1693 displaced->step_closure = closure;
1694 displaced->step_original = original;
1695 displaced->step_copy = copy;
1696
1697 make_cleanup (displaced_step_clear_cleanup, displaced);
1698
1699 /* Resume execution at the copy. */
1700 regcache_write_pc (regcache, copy);
1701
1702 discard_cleanups (ignore_cleanups);
1703
1704 do_cleanups (old_cleanups);
1705
1706 if (debug_displaced)
1707 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1708 paddress (gdbarch, copy));
1709
1710 return 1;
1711 }
1712
1713 static void
1714 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1715 const gdb_byte *myaddr, int len)
1716 {
1717 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1718
1719 inferior_ptid = ptid;
1720 write_memory (memaddr, myaddr, len);
1721 do_cleanups (ptid_cleanup);
1722 }
1723
1724 /* Restore the contents of the copy area for thread PTID. */
1725
1726 static void
1727 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1728 ptid_t ptid)
1729 {
1730 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1731
1732 write_memory_ptid (ptid, displaced->step_copy,
1733 displaced->step_saved_copy, len);
1734 if (debug_displaced)
1735 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1736 target_pid_to_str (ptid),
1737 paddress (displaced->step_gdbarch,
1738 displaced->step_copy));
1739 }
1740
1741 static void
1742 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1743 {
1744 struct cleanup *old_cleanups;
1745 struct displaced_step_inferior_state *displaced
1746 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1747
1748 /* Was any thread of this process doing a displaced step? */
1749 if (displaced == NULL)
1750 return;
1751
1752 /* Was this event for the pid we displaced? */
1753 if (ptid_equal (displaced->step_ptid, null_ptid)
1754 || ! ptid_equal (displaced->step_ptid, event_ptid))
1755 return;
1756
1757 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1758
1759 displaced_step_restore (displaced, displaced->step_ptid);
1760
1761 /* Did the instruction complete successfully? */
1762 if (signal == GDB_SIGNAL_TRAP)
1763 {
1764 /* Fix up the resulting state. */
1765 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1766 displaced->step_closure,
1767 displaced->step_original,
1768 displaced->step_copy,
1769 get_thread_regcache (displaced->step_ptid));
1770 }
1771 else
1772 {
1773 /* Since the instruction didn't complete, all we can do is
1774 relocate the PC. */
1775 struct regcache *regcache = get_thread_regcache (event_ptid);
1776 CORE_ADDR pc = regcache_read_pc (regcache);
1777
1778 pc = displaced->step_original + (pc - displaced->step_copy);
1779 regcache_write_pc (regcache, pc);
1780 }
1781
1782 do_cleanups (old_cleanups);
1783
1784 displaced->step_ptid = null_ptid;
1785
1786 /* Are there any pending displaced stepping requests? If so, run
1787 one now. Leave the state object around, since we're likely to
1788 need it again soon. */
1789 while (displaced->step_request_queue)
1790 {
1791 struct displaced_step_request *head;
1792 ptid_t ptid;
1793 struct regcache *regcache;
1794 struct gdbarch *gdbarch;
1795 CORE_ADDR actual_pc;
1796 struct address_space *aspace;
1797
1798 head = displaced->step_request_queue;
1799 ptid = head->ptid;
1800 displaced->step_request_queue = head->next;
1801 xfree (head);
1802
1803 context_switch (ptid);
1804
1805 regcache = get_thread_regcache (ptid);
1806 actual_pc = regcache_read_pc (regcache);
1807 aspace = get_regcache_aspace (regcache);
1808
1809 if (breakpoint_here_p (aspace, actual_pc))
1810 {
1811 if (debug_displaced)
1812 fprintf_unfiltered (gdb_stdlog,
1813 "displaced: stepping queued %s now\n",
1814 target_pid_to_str (ptid));
1815
1816 displaced_step_prepare (ptid);
1817
1818 gdbarch = get_regcache_arch (regcache);
1819
1820 if (debug_displaced)
1821 {
1822 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1823 gdb_byte buf[4];
1824
1825 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1826 paddress (gdbarch, actual_pc));
1827 read_memory (actual_pc, buf, sizeof (buf));
1828 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1829 }
1830
1831 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1832 displaced->step_closure))
1833 target_resume (ptid, 1, GDB_SIGNAL_0);
1834 else
1835 target_resume (ptid, 0, GDB_SIGNAL_0);
1836
1837 /* Done, we're stepping a thread. */
1838 break;
1839 }
1840 else
1841 {
1842 int step;
1843 struct thread_info *tp = inferior_thread ();
1844
1845 /* The breakpoint we were sitting under has since been
1846 removed. */
1847 tp->control.trap_expected = 0;
1848
1849 /* Go back to what we were trying to do. */
1850 step = currently_stepping (tp);
1851
1852 if (debug_displaced)
1853 fprintf_unfiltered (gdb_stdlog,
1854 "displaced: breakpoint is gone: %s, step(%d)\n",
1855 target_pid_to_str (tp->ptid), step);
1856
1857 target_resume (ptid, step, GDB_SIGNAL_0);
1858 tp->suspend.stop_signal = GDB_SIGNAL_0;
1859
1860 /* This request was discarded. See if there's any other
1861 thread waiting for its turn. */
1862 }
1863 }
1864 }
1865
1866 /* Update global variables holding ptids to hold NEW_PTID if they were
1867 holding OLD_PTID. */
1868 static void
1869 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1870 {
1871 struct displaced_step_request *it;
1872 struct displaced_step_inferior_state *displaced;
1873
1874 if (ptid_equal (inferior_ptid, old_ptid))
1875 inferior_ptid = new_ptid;
1876
1877 if (ptid_equal (singlestep_ptid, old_ptid))
1878 singlestep_ptid = new_ptid;
1879
1880 for (displaced = displaced_step_inferior_states;
1881 displaced;
1882 displaced = displaced->next)
1883 {
1884 if (ptid_equal (displaced->step_ptid, old_ptid))
1885 displaced->step_ptid = new_ptid;
1886
1887 for (it = displaced->step_request_queue; it; it = it->next)
1888 if (ptid_equal (it->ptid, old_ptid))
1889 it->ptid = new_ptid;
1890 }
1891 }
1892
1893 \f
1894 /* Resuming. */
1895
1896 /* Things to clean up if we QUIT out of resume (). */
1897 static void
1898 resume_cleanups (void *ignore)
1899 {
1900 normal_stop ();
1901 }
1902
1903 static const char schedlock_off[] = "off";
1904 static const char schedlock_on[] = "on";
1905 static const char schedlock_step[] = "step";
1906 static const char *const scheduler_enums[] = {
1907 schedlock_off,
1908 schedlock_on,
1909 schedlock_step,
1910 NULL
1911 };
1912 static const char *scheduler_mode = schedlock_off;
1913 static void
1914 show_scheduler_mode (struct ui_file *file, int from_tty,
1915 struct cmd_list_element *c, const char *value)
1916 {
1917 fprintf_filtered (file,
1918 _("Mode for locking scheduler "
1919 "during execution is \"%s\".\n"),
1920 value);
1921 }
1922
1923 static void
1924 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1925 {
1926 if (!target_can_lock_scheduler)
1927 {
1928 scheduler_mode = schedlock_off;
1929 error (_("Target '%s' cannot support this command."), target_shortname);
1930 }
1931 }
1932
1933 /* True if execution commands resume all threads of all processes by
1934 default; otherwise, resume only threads of the current inferior
1935 process. */
1936 int sched_multi = 0;
1937
1938 /* Try to setup for software single stepping over the specified location.
1939 Return 1 if target_resume() should use hardware single step.
1940
1941 GDBARCH the current gdbarch.
1942 PC the location to step over. */
1943
1944 static int
1945 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1946 {
1947 int hw_step = 1;
1948
1949 if (execution_direction == EXEC_FORWARD
1950 && gdbarch_software_single_step_p (gdbarch)
1951 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1952 {
1953 hw_step = 0;
1954 /* Do not pull these breakpoints until after a `wait' in
1955 `wait_for_inferior'. */
1956 singlestep_breakpoints_inserted_p = 1;
1957 singlestep_ptid = inferior_ptid;
1958 singlestep_pc = pc;
1959 }
1960 return hw_step;
1961 }
1962
1963 ptid_t
1964 user_visible_resume_ptid (int step)
1965 {
1966 /* By default, resume all threads of all processes. */
1967 ptid_t resume_ptid = RESUME_ALL;
1968
1969 /* Maybe resume only all threads of the current process. */
1970 if (!sched_multi && target_supports_multi_process ())
1971 {
1972 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1973 }
1974
1975 /* Maybe resume a single thread after all. */
1976 if (non_stop)
1977 {
1978 /* With non-stop mode on, threads are always handled
1979 individually. */
1980 resume_ptid = inferior_ptid;
1981 }
1982 else if ((scheduler_mode == schedlock_on)
1983 || (scheduler_mode == schedlock_step && step))
1984 {
1985 /* User-settable 'scheduler' mode requires solo thread resume. */
1986 resume_ptid = inferior_ptid;
1987 }
1988
1989 /* We may actually resume fewer threads at first, e.g., if a thread
1990 is stopped at a breakpoint that needs stepping-off, but that
1991 should not be visible to the user/frontend, and neither should
1992 the frontend/user be allowed to proceed any of the threads that
1993 happen to be stopped for internal run control handling, if a
1994 previous command wanted them resumed. */
1995 return resume_ptid;
1996 }
1997
1998 /* Resume the inferior, but allow a QUIT. This is useful if the user
1999 wants to interrupt some lengthy single-stepping operation
2000 (for child processes, the SIGINT goes to the inferior, and so
2001 we get a SIGINT random_signal, but for remote debugging and perhaps
2002 other targets, that's not true).
2003
2004 STEP nonzero if we should step (zero to continue instead).
2005 SIG is the signal to give the inferior (zero for none). */
2006 void
2007 resume (int step, enum gdb_signal sig)
2008 {
2009 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2010 struct regcache *regcache = get_current_regcache ();
2011 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2012 struct thread_info *tp = inferior_thread ();
2013 CORE_ADDR pc = regcache_read_pc (regcache);
2014 struct address_space *aspace = get_regcache_aspace (regcache);
2015 ptid_t resume_ptid;
2016 /* From here on, this represents the caller's step vs continue
2017 request, while STEP represents what we'll actually request the
2018 target to do. STEP can decay from a step to a continue, if e.g.,
2019 we need to implement single-stepping with breakpoints (software
2020 single-step). When deciding whether "set scheduler-locking step"
2021 applies, it's the callers intention that counts. */
2022 const int entry_step = step;
2023
2024 QUIT;
2025
2026 if (current_inferior ()->waiting_for_vfork_done)
2027 {
2028 /* Don't try to single-step a vfork parent that is waiting for
2029 the child to get out of the shared memory region (by exec'ing
2030 or exiting). This is particularly important on software
2031 single-step archs, as the child process would trip on the
2032 software single step breakpoint inserted for the parent
2033 process. Since the parent will not actually execute any
2034 instruction until the child is out of the shared region (such
2035 are vfork's semantics), it is safe to simply continue it.
2036 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2037 the parent, and tell it to `keep_going', which automatically
2038 re-sets it stepping. */
2039 if (debug_infrun)
2040 fprintf_unfiltered (gdb_stdlog,
2041 "infrun: resume : clear step\n");
2042 step = 0;
2043 }
2044
2045 if (debug_infrun)
2046 fprintf_unfiltered (gdb_stdlog,
2047 "infrun: resume (step=%d, signal=%s), "
2048 "trap_expected=%d, current thread [%s] at %s\n",
2049 step, gdb_signal_to_symbol_string (sig),
2050 tp->control.trap_expected,
2051 target_pid_to_str (inferior_ptid),
2052 paddress (gdbarch, pc));
2053
2054 /* Normally, by the time we reach `resume', the breakpoints are either
2055 removed or inserted, as appropriate. The exception is if we're sitting
2056 at a permanent breakpoint; we need to step over it, but permanent
2057 breakpoints can't be removed. So we have to test for it here. */
2058 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2059 {
2060 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
2061 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2062 else
2063 error (_("\
2064 The program is stopped at a permanent breakpoint, but GDB does not know\n\
2065 how to step past a permanent breakpoint on this architecture. Try using\n\
2066 a command like `return' or `jump' to continue execution."));
2067 }
2068
2069 /* If we have a breakpoint to step over, make sure to do a single
2070 step only. Same if we have software watchpoints. */
2071 if (tp->control.trap_expected || bpstat_should_step ())
2072 tp->control.may_range_step = 0;
2073
2074 /* If enabled, step over breakpoints by executing a copy of the
2075 instruction at a different address.
2076
2077 We can't use displaced stepping when we have a signal to deliver;
2078 the comments for displaced_step_prepare explain why. The
2079 comments in the handle_inferior event for dealing with 'random
2080 signals' explain what we do instead.
2081
2082 We can't use displaced stepping when we are waiting for vfork_done
2083 event, displaced stepping breaks the vfork child similarly as single
2084 step software breakpoint. */
2085 if (use_displaced_stepping (gdbarch)
2086 && (tp->control.trap_expected
2087 || (step && gdbarch_software_single_step_p (gdbarch)))
2088 && sig == GDB_SIGNAL_0
2089 && !current_inferior ()->waiting_for_vfork_done)
2090 {
2091 struct displaced_step_inferior_state *displaced;
2092
2093 if (!displaced_step_prepare (inferior_ptid))
2094 {
2095 /* Got placed in displaced stepping queue. Will be resumed
2096 later when all the currently queued displaced stepping
2097 requests finish. The thread is not executing at this
2098 point, and the call to set_executing will be made later.
2099 But we need to call set_running here, since from the
2100 user/frontend's point of view, threads were set running.
2101 Unless we're calling an inferior function, as in that
2102 case we pretend the inferior doesn't run at all. */
2103 if (!tp->control.in_infcall)
2104 set_running (user_visible_resume_ptid (entry_step), 1);
2105 discard_cleanups (old_cleanups);
2106 return;
2107 }
2108
2109 /* Update pc to reflect the new address from which we will execute
2110 instructions due to displaced stepping. */
2111 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2112
2113 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2114 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2115 displaced->step_closure);
2116 }
2117
2118 /* Do we need to do it the hard way, w/temp breakpoints? */
2119 else if (step)
2120 step = maybe_software_singlestep (gdbarch, pc);
2121
2122 /* Currently, our software single-step implementation leads to different
2123 results than hardware single-stepping in one situation: when stepping
2124 into delivering a signal which has an associated signal handler,
2125 hardware single-step will stop at the first instruction of the handler,
2126 while software single-step will simply skip execution of the handler.
2127
2128 For now, this difference in behavior is accepted since there is no
2129 easy way to actually implement single-stepping into a signal handler
2130 without kernel support.
2131
2132 However, there is one scenario where this difference leads to follow-on
2133 problems: if we're stepping off a breakpoint by removing all breakpoints
2134 and then single-stepping. In this case, the software single-step
2135 behavior means that even if there is a *breakpoint* in the signal
2136 handler, GDB still would not stop.
2137
2138 Fortunately, we can at least fix this particular issue. We detect
2139 here the case where we are about to deliver a signal while software
2140 single-stepping with breakpoints removed. In this situation, we
2141 revert the decisions to remove all breakpoints and insert single-
2142 step breakpoints, and instead we install a step-resume breakpoint
2143 at the current address, deliver the signal without stepping, and
2144 once we arrive back at the step-resume breakpoint, actually step
2145 over the breakpoint we originally wanted to step over. */
2146 if (singlestep_breakpoints_inserted_p
2147 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
2148 {
2149 /* If we have nested signals or a pending signal is delivered
2150 immediately after a handler returns, might might already have
2151 a step-resume breakpoint set on the earlier handler. We cannot
2152 set another step-resume breakpoint; just continue on until the
2153 original breakpoint is hit. */
2154 if (tp->control.step_resume_breakpoint == NULL)
2155 {
2156 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2157 tp->step_after_step_resume_breakpoint = 1;
2158 }
2159
2160 remove_single_step_breakpoints ();
2161 singlestep_breakpoints_inserted_p = 0;
2162
2163 clear_step_over_info ();
2164 tp->control.trap_expected = 0;
2165
2166 insert_breakpoints ();
2167 }
2168
2169 /* If STEP is set, it's a request to use hardware stepping
2170 facilities. But in that case, we should never
2171 use singlestep breakpoint. */
2172 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
2173
2174 /* Decide the set of threads to ask the target to resume. Start
2175 by assuming everything will be resumed, than narrow the set
2176 by applying increasingly restricting conditions. */
2177 resume_ptid = user_visible_resume_ptid (entry_step);
2178
2179 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2180 (e.g., we might need to step over a breakpoint), from the
2181 user/frontend's point of view, all threads in RESUME_PTID are now
2182 running. Unless we're calling an inferior function, as in that
2183 case pretend we inferior doesn't run at all. */
2184 if (!tp->control.in_infcall)
2185 set_running (resume_ptid, 1);
2186
2187 /* Maybe resume a single thread after all. */
2188 if ((step || singlestep_breakpoints_inserted_p)
2189 && tp->control.trap_expected)
2190 {
2191 /* We're allowing a thread to run past a breakpoint it has
2192 hit, by single-stepping the thread with the breakpoint
2193 removed. In which case, we need to single-step only this
2194 thread, and keep others stopped, as they can miss this
2195 breakpoint if allowed to run. */
2196 resume_ptid = inferior_ptid;
2197 }
2198
2199 if (gdbarch_cannot_step_breakpoint (gdbarch))
2200 {
2201 /* Most targets can step a breakpoint instruction, thus
2202 executing it normally. But if this one cannot, just
2203 continue and we will hit it anyway. */
2204 if (step && breakpoint_inserted_here_p (aspace, pc))
2205 step = 0;
2206 }
2207
2208 if (debug_displaced
2209 && use_displaced_stepping (gdbarch)
2210 && tp->control.trap_expected)
2211 {
2212 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
2213 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2214 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2215 gdb_byte buf[4];
2216
2217 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2218 paddress (resume_gdbarch, actual_pc));
2219 read_memory (actual_pc, buf, sizeof (buf));
2220 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2221 }
2222
2223 if (tp->control.may_range_step)
2224 {
2225 /* If we're resuming a thread with the PC out of the step
2226 range, then we're doing some nested/finer run control
2227 operation, like stepping the thread out of the dynamic
2228 linker or the displaced stepping scratch pad. We
2229 shouldn't have allowed a range step then. */
2230 gdb_assert (pc_in_thread_step_range (pc, tp));
2231 }
2232
2233 /* Install inferior's terminal modes. */
2234 target_terminal_inferior ();
2235
2236 /* Avoid confusing the next resume, if the next stop/resume
2237 happens to apply to another thread. */
2238 tp->suspend.stop_signal = GDB_SIGNAL_0;
2239
2240 /* Advise target which signals may be handled silently. If we have
2241 removed breakpoints because we are stepping over one (which can
2242 happen only if we are not using displaced stepping), we need to
2243 receive all signals to avoid accidentally skipping a breakpoint
2244 during execution of a signal handler. */
2245 if ((step || singlestep_breakpoints_inserted_p)
2246 && tp->control.trap_expected
2247 && !use_displaced_stepping (gdbarch))
2248 target_pass_signals (0, NULL);
2249 else
2250 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2251
2252 target_resume (resume_ptid, step, sig);
2253
2254 discard_cleanups (old_cleanups);
2255 }
2256 \f
2257 /* Proceeding. */
2258
2259 /* Clear out all variables saying what to do when inferior is continued.
2260 First do this, then set the ones you want, then call `proceed'. */
2261
2262 static void
2263 clear_proceed_status_thread (struct thread_info *tp)
2264 {
2265 if (debug_infrun)
2266 fprintf_unfiltered (gdb_stdlog,
2267 "infrun: clear_proceed_status_thread (%s)\n",
2268 target_pid_to_str (tp->ptid));
2269
2270 /* If this signal should not be seen by program, give it zero.
2271 Used for debugging signals. */
2272 if (!signal_pass_state (tp->suspend.stop_signal))
2273 tp->suspend.stop_signal = GDB_SIGNAL_0;
2274
2275 tp->control.trap_expected = 0;
2276 tp->control.step_range_start = 0;
2277 tp->control.step_range_end = 0;
2278 tp->control.may_range_step = 0;
2279 tp->control.step_frame_id = null_frame_id;
2280 tp->control.step_stack_frame_id = null_frame_id;
2281 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2282 tp->stop_requested = 0;
2283
2284 tp->control.stop_step = 0;
2285
2286 tp->control.proceed_to_finish = 0;
2287
2288 tp->control.command_interp = NULL;
2289
2290 /* Discard any remaining commands or status from previous stop. */
2291 bpstat_clear (&tp->control.stop_bpstat);
2292 }
2293
2294 void
2295 clear_proceed_status (int step)
2296 {
2297 if (!non_stop)
2298 {
2299 struct thread_info *tp;
2300 ptid_t resume_ptid;
2301
2302 resume_ptid = user_visible_resume_ptid (step);
2303
2304 /* In all-stop mode, delete the per-thread status of all threads
2305 we're about to resume, implicitly and explicitly. */
2306 ALL_NON_EXITED_THREADS (tp)
2307 {
2308 if (!ptid_match (tp->ptid, resume_ptid))
2309 continue;
2310 clear_proceed_status_thread (tp);
2311 }
2312 }
2313
2314 if (!ptid_equal (inferior_ptid, null_ptid))
2315 {
2316 struct inferior *inferior;
2317
2318 if (non_stop)
2319 {
2320 /* If in non-stop mode, only delete the per-thread status of
2321 the current thread. */
2322 clear_proceed_status_thread (inferior_thread ());
2323 }
2324
2325 inferior = current_inferior ();
2326 inferior->control.stop_soon = NO_STOP_QUIETLY;
2327 }
2328
2329 stop_after_trap = 0;
2330
2331 clear_step_over_info ();
2332
2333 observer_notify_about_to_proceed ();
2334
2335 if (stop_registers)
2336 {
2337 regcache_xfree (stop_registers);
2338 stop_registers = NULL;
2339 }
2340 }
2341
2342 /* Returns true if TP is still stopped at a breakpoint that needs
2343 stepping-over in order to make progress. If the breakpoint is gone
2344 meanwhile, we can skip the whole step-over dance. */
2345
2346 static int
2347 thread_still_needs_step_over (struct thread_info *tp)
2348 {
2349 if (tp->stepping_over_breakpoint)
2350 {
2351 struct regcache *regcache = get_thread_regcache (tp->ptid);
2352
2353 if (breakpoint_here_p (get_regcache_aspace (regcache),
2354 regcache_read_pc (regcache)))
2355 return 1;
2356
2357 tp->stepping_over_breakpoint = 0;
2358 }
2359
2360 return 0;
2361 }
2362
2363 /* Returns true if scheduler locking applies. STEP indicates whether
2364 we're about to do a step/next-like command to a thread. */
2365
2366 static int
2367 schedlock_applies (int step)
2368 {
2369 return (scheduler_mode == schedlock_on
2370 || (scheduler_mode == schedlock_step
2371 && step));
2372 }
2373
2374 /* Look a thread other than EXCEPT that has previously reported a
2375 breakpoint event, and thus needs a step-over in order to make
2376 progress. Returns NULL is none is found. STEP indicates whether
2377 we're about to step the current thread, in order to decide whether
2378 "set scheduler-locking step" applies. */
2379
2380 static struct thread_info *
2381 find_thread_needs_step_over (int step, struct thread_info *except)
2382 {
2383 struct thread_info *tp, *current;
2384
2385 /* With non-stop mode on, threads are always handled individually. */
2386 gdb_assert (! non_stop);
2387
2388 current = inferior_thread ();
2389
2390 /* If scheduler locking applies, we can avoid iterating over all
2391 threads. */
2392 if (schedlock_applies (step))
2393 {
2394 if (except != current
2395 && thread_still_needs_step_over (current))
2396 return current;
2397
2398 return NULL;
2399 }
2400
2401 ALL_NON_EXITED_THREADS (tp)
2402 {
2403 /* Ignore the EXCEPT thread. */
2404 if (tp == except)
2405 continue;
2406 /* Ignore threads of processes we're not resuming. */
2407 if (!sched_multi
2408 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2409 continue;
2410
2411 if (thread_still_needs_step_over (tp))
2412 return tp;
2413 }
2414
2415 return NULL;
2416 }
2417
2418 /* Basic routine for continuing the program in various fashions.
2419
2420 ADDR is the address to resume at, or -1 for resume where stopped.
2421 SIGGNAL is the signal to give it, or 0 for none,
2422 or -1 for act according to how it stopped.
2423 STEP is nonzero if should trap after one instruction.
2424 -1 means return after that and print nothing.
2425 You should probably set various step_... variables
2426 before calling here, if you are stepping.
2427
2428 You should call clear_proceed_status before calling proceed. */
2429
2430 void
2431 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2432 {
2433 struct regcache *regcache;
2434 struct gdbarch *gdbarch;
2435 struct thread_info *tp;
2436 CORE_ADDR pc;
2437 struct address_space *aspace;
2438
2439 /* If we're stopped at a fork/vfork, follow the branch set by the
2440 "set follow-fork-mode" command; otherwise, we'll just proceed
2441 resuming the current thread. */
2442 if (!follow_fork ())
2443 {
2444 /* The target for some reason decided not to resume. */
2445 normal_stop ();
2446 if (target_can_async_p ())
2447 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2448 return;
2449 }
2450
2451 /* We'll update this if & when we switch to a new thread. */
2452 previous_inferior_ptid = inferior_ptid;
2453
2454 regcache = get_current_regcache ();
2455 gdbarch = get_regcache_arch (regcache);
2456 aspace = get_regcache_aspace (regcache);
2457 pc = regcache_read_pc (regcache);
2458 tp = inferior_thread ();
2459
2460 if (step > 0)
2461 step_start_function = find_pc_function (pc);
2462 if (step < 0)
2463 stop_after_trap = 1;
2464
2465 /* Fill in with reasonable starting values. */
2466 init_thread_stepping_state (tp);
2467
2468 if (addr == (CORE_ADDR) -1)
2469 {
2470 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2471 && execution_direction != EXEC_REVERSE)
2472 /* There is a breakpoint at the address we will resume at,
2473 step one instruction before inserting breakpoints so that
2474 we do not stop right away (and report a second hit at this
2475 breakpoint).
2476
2477 Note, we don't do this in reverse, because we won't
2478 actually be executing the breakpoint insn anyway.
2479 We'll be (un-)executing the previous instruction. */
2480 tp->stepping_over_breakpoint = 1;
2481 else if (gdbarch_single_step_through_delay_p (gdbarch)
2482 && gdbarch_single_step_through_delay (gdbarch,
2483 get_current_frame ()))
2484 /* We stepped onto an instruction that needs to be stepped
2485 again before re-inserting the breakpoint, do so. */
2486 tp->stepping_over_breakpoint = 1;
2487 }
2488 else
2489 {
2490 regcache_write_pc (regcache, addr);
2491 }
2492
2493 if (siggnal != GDB_SIGNAL_DEFAULT)
2494 tp->suspend.stop_signal = siggnal;
2495
2496 /* Record the interpreter that issued the execution command that
2497 caused this thread to resume. If the top level interpreter is
2498 MI/async, and the execution command was a CLI command
2499 (next/step/etc.), we'll want to print stop event output to the MI
2500 console channel (the stepped-to line, etc.), as if the user
2501 entered the execution command on a real GDB console. */
2502 inferior_thread ()->control.command_interp = command_interp ();
2503
2504 if (debug_infrun)
2505 fprintf_unfiltered (gdb_stdlog,
2506 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2507 paddress (gdbarch, addr),
2508 gdb_signal_to_symbol_string (siggnal), step);
2509
2510 if (non_stop)
2511 /* In non-stop, each thread is handled individually. The context
2512 must already be set to the right thread here. */
2513 ;
2514 else
2515 {
2516 struct thread_info *step_over;
2517
2518 /* In a multi-threaded task we may select another thread and
2519 then continue or step.
2520
2521 But if the old thread was stopped at a breakpoint, it will
2522 immediately cause another breakpoint stop without any
2523 execution (i.e. it will report a breakpoint hit incorrectly).
2524 So we must step over it first.
2525
2526 Look for a thread other than the current (TP) that reported a
2527 breakpoint hit and hasn't been resumed yet since. */
2528 step_over = find_thread_needs_step_over (step, tp);
2529 if (step_over != NULL)
2530 {
2531 if (debug_infrun)
2532 fprintf_unfiltered (gdb_stdlog,
2533 "infrun: need to step-over [%s] first\n",
2534 target_pid_to_str (step_over->ptid));
2535
2536 /* Store the prev_pc for the stepping thread too, needed by
2537 switch_back_to_stepping thread. */
2538 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2539 switch_to_thread (step_over->ptid);
2540 tp = step_over;
2541 }
2542 }
2543
2544 /* If we need to step over a breakpoint, and we're not using
2545 displaced stepping to do so, insert all breakpoints (watchpoints,
2546 etc.) but the one we're stepping over, step one instruction, and
2547 then re-insert the breakpoint when that step is finished. */
2548 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2549 {
2550 struct regcache *regcache = get_current_regcache ();
2551
2552 set_step_over_info (get_regcache_aspace (regcache),
2553 regcache_read_pc (regcache));
2554 }
2555 else
2556 clear_step_over_info ();
2557
2558 insert_breakpoints ();
2559
2560 tp->control.trap_expected = tp->stepping_over_breakpoint;
2561
2562 annotate_starting ();
2563
2564 /* Make sure that output from GDB appears before output from the
2565 inferior. */
2566 gdb_flush (gdb_stdout);
2567
2568 /* Refresh prev_pc value just prior to resuming. This used to be
2569 done in stop_waiting, however, setting prev_pc there did not handle
2570 scenarios such as inferior function calls or returning from
2571 a function via the return command. In those cases, the prev_pc
2572 value was not set properly for subsequent commands. The prev_pc value
2573 is used to initialize the starting line number in the ecs. With an
2574 invalid value, the gdb next command ends up stopping at the position
2575 represented by the next line table entry past our start position.
2576 On platforms that generate one line table entry per line, this
2577 is not a problem. However, on the ia64, the compiler generates
2578 extraneous line table entries that do not increase the line number.
2579 When we issue the gdb next command on the ia64 after an inferior call
2580 or a return command, we often end up a few instructions forward, still
2581 within the original line we started.
2582
2583 An attempt was made to refresh the prev_pc at the same time the
2584 execution_control_state is initialized (for instance, just before
2585 waiting for an inferior event). But this approach did not work
2586 because of platforms that use ptrace, where the pc register cannot
2587 be read unless the inferior is stopped. At that point, we are not
2588 guaranteed the inferior is stopped and so the regcache_read_pc() call
2589 can fail. Setting the prev_pc value here ensures the value is updated
2590 correctly when the inferior is stopped. */
2591 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2592
2593 /* Reset to normal state. */
2594 init_infwait_state ();
2595
2596 /* Resume inferior. */
2597 resume (tp->control.trap_expected || step || bpstat_should_step (),
2598 tp->suspend.stop_signal);
2599
2600 /* Wait for it to stop (if not standalone)
2601 and in any case decode why it stopped, and act accordingly. */
2602 /* Do this only if we are not using the event loop, or if the target
2603 does not support asynchronous execution. */
2604 if (!target_can_async_p ())
2605 {
2606 wait_for_inferior ();
2607 normal_stop ();
2608 }
2609 }
2610 \f
2611
2612 /* Start remote-debugging of a machine over a serial link. */
2613
2614 void
2615 start_remote (int from_tty)
2616 {
2617 struct inferior *inferior;
2618
2619 inferior = current_inferior ();
2620 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2621
2622 /* Always go on waiting for the target, regardless of the mode. */
2623 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2624 indicate to wait_for_inferior that a target should timeout if
2625 nothing is returned (instead of just blocking). Because of this,
2626 targets expecting an immediate response need to, internally, set
2627 things up so that the target_wait() is forced to eventually
2628 timeout. */
2629 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2630 differentiate to its caller what the state of the target is after
2631 the initial open has been performed. Here we're assuming that
2632 the target has stopped. It should be possible to eventually have
2633 target_open() return to the caller an indication that the target
2634 is currently running and GDB state should be set to the same as
2635 for an async run. */
2636 wait_for_inferior ();
2637
2638 /* Now that the inferior has stopped, do any bookkeeping like
2639 loading shared libraries. We want to do this before normal_stop,
2640 so that the displayed frame is up to date. */
2641 post_create_inferior (&current_target, from_tty);
2642
2643 normal_stop ();
2644 }
2645
2646 /* Initialize static vars when a new inferior begins. */
2647
2648 void
2649 init_wait_for_inferior (void)
2650 {
2651 /* These are meaningless until the first time through wait_for_inferior. */
2652
2653 breakpoint_init_inferior (inf_starting);
2654
2655 clear_proceed_status (0);
2656
2657 target_last_wait_ptid = minus_one_ptid;
2658
2659 previous_inferior_ptid = inferior_ptid;
2660 init_infwait_state ();
2661
2662 /* Discard any skipped inlined frames. */
2663 clear_inline_frame_state (minus_one_ptid);
2664
2665 singlestep_ptid = null_ptid;
2666 singlestep_pc = 0;
2667 }
2668
2669 \f
2670 /* This enum encodes possible reasons for doing a target_wait, so that
2671 wfi can call target_wait in one place. (Ultimately the call will be
2672 moved out of the infinite loop entirely.) */
2673
2674 enum infwait_states
2675 {
2676 infwait_normal_state,
2677 infwait_step_watch_state,
2678 infwait_nonstep_watch_state
2679 };
2680
2681 /* The PTID we'll do a target_wait on.*/
2682 ptid_t waiton_ptid;
2683
2684 /* Current inferior wait state. */
2685 static enum infwait_states infwait_state;
2686
2687 /* Data to be passed around while handling an event. This data is
2688 discarded between events. */
2689 struct execution_control_state
2690 {
2691 ptid_t ptid;
2692 /* The thread that got the event, if this was a thread event; NULL
2693 otherwise. */
2694 struct thread_info *event_thread;
2695
2696 struct target_waitstatus ws;
2697 int stop_func_filled_in;
2698 CORE_ADDR stop_func_start;
2699 CORE_ADDR stop_func_end;
2700 const char *stop_func_name;
2701 int wait_some_more;
2702
2703 /* We were in infwait_step_watch_state or
2704 infwait_nonstep_watch_state state, and the thread reported an
2705 event. */
2706 int stepped_after_stopped_by_watchpoint;
2707
2708 /* True if the event thread hit the single-step breakpoint of
2709 another thread. Thus the event doesn't cause a stop, the thread
2710 needs to be single-stepped past the single-step breakpoint before
2711 we can switch back to the original stepping thread. */
2712 int hit_singlestep_breakpoint;
2713 };
2714
2715 static void handle_inferior_event (struct execution_control_state *ecs);
2716
2717 static void handle_step_into_function (struct gdbarch *gdbarch,
2718 struct execution_control_state *ecs);
2719 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2720 struct execution_control_state *ecs);
2721 static void handle_signal_stop (struct execution_control_state *ecs);
2722 static void check_exception_resume (struct execution_control_state *,
2723 struct frame_info *);
2724
2725 static void end_stepping_range (struct execution_control_state *ecs);
2726 static void stop_waiting (struct execution_control_state *ecs);
2727 static void prepare_to_wait (struct execution_control_state *ecs);
2728 static void keep_going (struct execution_control_state *ecs);
2729 static void process_event_stop_test (struct execution_control_state *ecs);
2730 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2731
2732 /* Callback for iterate over threads. If the thread is stopped, but
2733 the user/frontend doesn't know about that yet, go through
2734 normal_stop, as if the thread had just stopped now. ARG points at
2735 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2736 ptid_is_pid(PTID) is true, applies to all threads of the process
2737 pointed at by PTID. Otherwise, apply only to the thread pointed by
2738 PTID. */
2739
2740 static int
2741 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2742 {
2743 ptid_t ptid = * (ptid_t *) arg;
2744
2745 if ((ptid_equal (info->ptid, ptid)
2746 || ptid_equal (minus_one_ptid, ptid)
2747 || (ptid_is_pid (ptid)
2748 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2749 && is_running (info->ptid)
2750 && !is_executing (info->ptid))
2751 {
2752 struct cleanup *old_chain;
2753 struct execution_control_state ecss;
2754 struct execution_control_state *ecs = &ecss;
2755
2756 memset (ecs, 0, sizeof (*ecs));
2757
2758 old_chain = make_cleanup_restore_current_thread ();
2759
2760 overlay_cache_invalid = 1;
2761 /* Flush target cache before starting to handle each event.
2762 Target was running and cache could be stale. This is just a
2763 heuristic. Running threads may modify target memory, but we
2764 don't get any event. */
2765 target_dcache_invalidate ();
2766
2767 /* Go through handle_inferior_event/normal_stop, so we always
2768 have consistent output as if the stop event had been
2769 reported. */
2770 ecs->ptid = info->ptid;
2771 ecs->event_thread = find_thread_ptid (info->ptid);
2772 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2773 ecs->ws.value.sig = GDB_SIGNAL_0;
2774
2775 handle_inferior_event (ecs);
2776
2777 if (!ecs->wait_some_more)
2778 {
2779 struct thread_info *tp;
2780
2781 normal_stop ();
2782
2783 /* Finish off the continuations. */
2784 tp = inferior_thread ();
2785 do_all_intermediate_continuations_thread (tp, 1);
2786 do_all_continuations_thread (tp, 1);
2787 }
2788
2789 do_cleanups (old_chain);
2790 }
2791
2792 return 0;
2793 }
2794
2795 /* This function is attached as a "thread_stop_requested" observer.
2796 Cleanup local state that assumed the PTID was to be resumed, and
2797 report the stop to the frontend. */
2798
2799 static void
2800 infrun_thread_stop_requested (ptid_t ptid)
2801 {
2802 struct displaced_step_inferior_state *displaced;
2803
2804 /* PTID was requested to stop. Remove it from the displaced
2805 stepping queue, so we don't try to resume it automatically. */
2806
2807 for (displaced = displaced_step_inferior_states;
2808 displaced;
2809 displaced = displaced->next)
2810 {
2811 struct displaced_step_request *it, **prev_next_p;
2812
2813 it = displaced->step_request_queue;
2814 prev_next_p = &displaced->step_request_queue;
2815 while (it)
2816 {
2817 if (ptid_match (it->ptid, ptid))
2818 {
2819 *prev_next_p = it->next;
2820 it->next = NULL;
2821 xfree (it);
2822 }
2823 else
2824 {
2825 prev_next_p = &it->next;
2826 }
2827
2828 it = *prev_next_p;
2829 }
2830 }
2831
2832 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2833 }
2834
2835 static void
2836 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2837 {
2838 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2839 nullify_last_target_wait_ptid ();
2840 }
2841
2842 /* Callback for iterate_over_threads. */
2843
2844 static int
2845 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2846 {
2847 if (is_exited (info->ptid))
2848 return 0;
2849
2850 delete_step_resume_breakpoint (info);
2851 delete_exception_resume_breakpoint (info);
2852 return 0;
2853 }
2854
2855 /* In all-stop, delete the step resume breakpoint of any thread that
2856 had one. In non-stop, delete the step resume breakpoint of the
2857 thread that just stopped. */
2858
2859 static void
2860 delete_step_thread_step_resume_breakpoint (void)
2861 {
2862 if (!target_has_execution
2863 || ptid_equal (inferior_ptid, null_ptid))
2864 /* If the inferior has exited, we have already deleted the step
2865 resume breakpoints out of GDB's lists. */
2866 return;
2867
2868 if (non_stop)
2869 {
2870 /* If in non-stop mode, only delete the step-resume or
2871 longjmp-resume breakpoint of the thread that just stopped
2872 stepping. */
2873 struct thread_info *tp = inferior_thread ();
2874
2875 delete_step_resume_breakpoint (tp);
2876 delete_exception_resume_breakpoint (tp);
2877 }
2878 else
2879 /* In all-stop mode, delete all step-resume and longjmp-resume
2880 breakpoints of any thread that had them. */
2881 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2882 }
2883
2884 /* A cleanup wrapper. */
2885
2886 static void
2887 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2888 {
2889 delete_step_thread_step_resume_breakpoint ();
2890 }
2891
2892 /* Pretty print the results of target_wait, for debugging purposes. */
2893
2894 static void
2895 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2896 const struct target_waitstatus *ws)
2897 {
2898 char *status_string = target_waitstatus_to_string (ws);
2899 struct ui_file *tmp_stream = mem_fileopen ();
2900 char *text;
2901
2902 /* The text is split over several lines because it was getting too long.
2903 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2904 output as a unit; we want only one timestamp printed if debug_timestamp
2905 is set. */
2906
2907 fprintf_unfiltered (tmp_stream,
2908 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2909 if (ptid_get_pid (waiton_ptid) != -1)
2910 fprintf_unfiltered (tmp_stream,
2911 " [%s]", target_pid_to_str (waiton_ptid));
2912 fprintf_unfiltered (tmp_stream, ", status) =\n");
2913 fprintf_unfiltered (tmp_stream,
2914 "infrun: %d [%s],\n",
2915 ptid_get_pid (result_ptid),
2916 target_pid_to_str (result_ptid));
2917 fprintf_unfiltered (tmp_stream,
2918 "infrun: %s\n",
2919 status_string);
2920
2921 text = ui_file_xstrdup (tmp_stream, NULL);
2922
2923 /* This uses %s in part to handle %'s in the text, but also to avoid
2924 a gcc error: the format attribute requires a string literal. */
2925 fprintf_unfiltered (gdb_stdlog, "%s", text);
2926
2927 xfree (status_string);
2928 xfree (text);
2929 ui_file_delete (tmp_stream);
2930 }
2931
2932 /* Prepare and stabilize the inferior for detaching it. E.g.,
2933 detaching while a thread is displaced stepping is a recipe for
2934 crashing it, as nothing would readjust the PC out of the scratch
2935 pad. */
2936
2937 void
2938 prepare_for_detach (void)
2939 {
2940 struct inferior *inf = current_inferior ();
2941 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2942 struct cleanup *old_chain_1;
2943 struct displaced_step_inferior_state *displaced;
2944
2945 displaced = get_displaced_stepping_state (inf->pid);
2946
2947 /* Is any thread of this process displaced stepping? If not,
2948 there's nothing else to do. */
2949 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2950 return;
2951
2952 if (debug_infrun)
2953 fprintf_unfiltered (gdb_stdlog,
2954 "displaced-stepping in-process while detaching");
2955
2956 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2957 inf->detaching = 1;
2958
2959 while (!ptid_equal (displaced->step_ptid, null_ptid))
2960 {
2961 struct cleanup *old_chain_2;
2962 struct execution_control_state ecss;
2963 struct execution_control_state *ecs;
2964
2965 ecs = &ecss;
2966 memset (ecs, 0, sizeof (*ecs));
2967
2968 overlay_cache_invalid = 1;
2969 /* Flush target cache before starting to handle each event.
2970 Target was running and cache could be stale. This is just a
2971 heuristic. Running threads may modify target memory, but we
2972 don't get any event. */
2973 target_dcache_invalidate ();
2974
2975 if (deprecated_target_wait_hook)
2976 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2977 else
2978 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2979
2980 if (debug_infrun)
2981 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2982
2983 /* If an error happens while handling the event, propagate GDB's
2984 knowledge of the executing state to the frontend/user running
2985 state. */
2986 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2987 &minus_one_ptid);
2988
2989 /* Now figure out what to do with the result of the result. */
2990 handle_inferior_event (ecs);
2991
2992 /* No error, don't finish the state yet. */
2993 discard_cleanups (old_chain_2);
2994
2995 /* Breakpoints and watchpoints are not installed on the target
2996 at this point, and signals are passed directly to the
2997 inferior, so this must mean the process is gone. */
2998 if (!ecs->wait_some_more)
2999 {
3000 discard_cleanups (old_chain_1);
3001 error (_("Program exited while detaching"));
3002 }
3003 }
3004
3005 discard_cleanups (old_chain_1);
3006 }
3007
3008 /* Wait for control to return from inferior to debugger.
3009
3010 If inferior gets a signal, we may decide to start it up again
3011 instead of returning. That is why there is a loop in this function.
3012 When this function actually returns it means the inferior
3013 should be left stopped and GDB should read more commands. */
3014
3015 void
3016 wait_for_inferior (void)
3017 {
3018 struct cleanup *old_cleanups;
3019
3020 if (debug_infrun)
3021 fprintf_unfiltered
3022 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3023
3024 old_cleanups =
3025 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
3026
3027 while (1)
3028 {
3029 struct execution_control_state ecss;
3030 struct execution_control_state *ecs = &ecss;
3031 struct cleanup *old_chain;
3032
3033 memset (ecs, 0, sizeof (*ecs));
3034
3035 overlay_cache_invalid = 1;
3036
3037 /* Flush target cache before starting to handle each event.
3038 Target was running and cache could be stale. This is just a
3039 heuristic. Running threads may modify target memory, but we
3040 don't get any event. */
3041 target_dcache_invalidate ();
3042
3043 if (deprecated_target_wait_hook)
3044 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3045 else
3046 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3047
3048 if (debug_infrun)
3049 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3050
3051 /* If an error happens while handling the event, propagate GDB's
3052 knowledge of the executing state to the frontend/user running
3053 state. */
3054 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3055
3056 /* Now figure out what to do with the result of the result. */
3057 handle_inferior_event (ecs);
3058
3059 /* No error, don't finish the state yet. */
3060 discard_cleanups (old_chain);
3061
3062 if (!ecs->wait_some_more)
3063 break;
3064 }
3065
3066 do_cleanups (old_cleanups);
3067 }
3068
3069 /* Asynchronous version of wait_for_inferior. It is called by the
3070 event loop whenever a change of state is detected on the file
3071 descriptor corresponding to the target. It can be called more than
3072 once to complete a single execution command. In such cases we need
3073 to keep the state in a global variable ECSS. If it is the last time
3074 that this function is called for a single execution command, then
3075 report to the user that the inferior has stopped, and do the
3076 necessary cleanups. */
3077
3078 void
3079 fetch_inferior_event (void *client_data)
3080 {
3081 struct execution_control_state ecss;
3082 struct execution_control_state *ecs = &ecss;
3083 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3084 struct cleanup *ts_old_chain;
3085 int was_sync = sync_execution;
3086 int cmd_done = 0;
3087
3088 memset (ecs, 0, sizeof (*ecs));
3089
3090 /* We're handling a live event, so make sure we're doing live
3091 debugging. If we're looking at traceframes while the target is
3092 running, we're going to need to get back to that mode after
3093 handling the event. */
3094 if (non_stop)
3095 {
3096 make_cleanup_restore_current_traceframe ();
3097 set_current_traceframe (-1);
3098 }
3099
3100 if (non_stop)
3101 /* In non-stop mode, the user/frontend should not notice a thread
3102 switch due to internal events. Make sure we reverse to the
3103 user selected thread and frame after handling the event and
3104 running any breakpoint commands. */
3105 make_cleanup_restore_current_thread ();
3106
3107 overlay_cache_invalid = 1;
3108 /* Flush target cache before starting to handle each event. Target
3109 was running and cache could be stale. This is just a heuristic.
3110 Running threads may modify target memory, but we don't get any
3111 event. */
3112 target_dcache_invalidate ();
3113
3114 make_cleanup_restore_integer (&execution_direction);
3115 execution_direction = target_execution_direction ();
3116
3117 if (deprecated_target_wait_hook)
3118 ecs->ptid =
3119 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3120 else
3121 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3122
3123 if (debug_infrun)
3124 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3125
3126 /* If an error happens while handling the event, propagate GDB's
3127 knowledge of the executing state to the frontend/user running
3128 state. */
3129 if (!non_stop)
3130 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3131 else
3132 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3133
3134 /* Get executed before make_cleanup_restore_current_thread above to apply
3135 still for the thread which has thrown the exception. */
3136 make_bpstat_clear_actions_cleanup ();
3137
3138 /* Now figure out what to do with the result of the result. */
3139 handle_inferior_event (ecs);
3140
3141 if (!ecs->wait_some_more)
3142 {
3143 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3144
3145 delete_step_thread_step_resume_breakpoint ();
3146
3147 /* We may not find an inferior if this was a process exit. */
3148 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3149 normal_stop ();
3150
3151 if (target_has_execution
3152 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3153 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3154 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3155 && ecs->event_thread->step_multi
3156 && ecs->event_thread->control.stop_step)
3157 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3158 else
3159 {
3160 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3161 cmd_done = 1;
3162 }
3163 }
3164
3165 /* No error, don't finish the thread states yet. */
3166 discard_cleanups (ts_old_chain);
3167
3168 /* Revert thread and frame. */
3169 do_cleanups (old_chain);
3170
3171 /* If the inferior was in sync execution mode, and now isn't,
3172 restore the prompt (a synchronous execution command has finished,
3173 and we're ready for input). */
3174 if (interpreter_async && was_sync && !sync_execution)
3175 observer_notify_sync_execution_done ();
3176
3177 if (cmd_done
3178 && !was_sync
3179 && exec_done_display_p
3180 && (ptid_equal (inferior_ptid, null_ptid)
3181 || !is_running (inferior_ptid)))
3182 printf_unfiltered (_("completed.\n"));
3183 }
3184
3185 /* Record the frame and location we're currently stepping through. */
3186 void
3187 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3188 {
3189 struct thread_info *tp = inferior_thread ();
3190
3191 tp->control.step_frame_id = get_frame_id (frame);
3192 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3193
3194 tp->current_symtab = sal.symtab;
3195 tp->current_line = sal.line;
3196 }
3197
3198 /* Clear context switchable stepping state. */
3199
3200 void
3201 init_thread_stepping_state (struct thread_info *tss)
3202 {
3203 tss->stepping_over_breakpoint = 0;
3204 tss->step_after_step_resume_breakpoint = 0;
3205 }
3206
3207 /* Set the cached copy of the last ptid/waitstatus. */
3208
3209 static void
3210 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3211 {
3212 target_last_wait_ptid = ptid;
3213 target_last_waitstatus = status;
3214 }
3215
3216 /* Return the cached copy of the last pid/waitstatus returned by
3217 target_wait()/deprecated_target_wait_hook(). The data is actually
3218 cached by handle_inferior_event(), which gets called immediately
3219 after target_wait()/deprecated_target_wait_hook(). */
3220
3221 void
3222 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3223 {
3224 *ptidp = target_last_wait_ptid;
3225 *status = target_last_waitstatus;
3226 }
3227
3228 void
3229 nullify_last_target_wait_ptid (void)
3230 {
3231 target_last_wait_ptid = minus_one_ptid;
3232 }
3233
3234 /* Switch thread contexts. */
3235
3236 static void
3237 context_switch (ptid_t ptid)
3238 {
3239 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3240 {
3241 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3242 target_pid_to_str (inferior_ptid));
3243 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3244 target_pid_to_str (ptid));
3245 }
3246
3247 switch_to_thread (ptid);
3248 }
3249
3250 static void
3251 adjust_pc_after_break (struct execution_control_state *ecs)
3252 {
3253 struct regcache *regcache;
3254 struct gdbarch *gdbarch;
3255 struct address_space *aspace;
3256 CORE_ADDR breakpoint_pc, decr_pc;
3257
3258 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3259 we aren't, just return.
3260
3261 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3262 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3263 implemented by software breakpoints should be handled through the normal
3264 breakpoint layer.
3265
3266 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3267 different signals (SIGILL or SIGEMT for instance), but it is less
3268 clear where the PC is pointing afterwards. It may not match
3269 gdbarch_decr_pc_after_break. I don't know any specific target that
3270 generates these signals at breakpoints (the code has been in GDB since at
3271 least 1992) so I can not guess how to handle them here.
3272
3273 In earlier versions of GDB, a target with
3274 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3275 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3276 target with both of these set in GDB history, and it seems unlikely to be
3277 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3278
3279 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3280 return;
3281
3282 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3283 return;
3284
3285 /* In reverse execution, when a breakpoint is hit, the instruction
3286 under it has already been de-executed. The reported PC always
3287 points at the breakpoint address, so adjusting it further would
3288 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3289 architecture:
3290
3291 B1 0x08000000 : INSN1
3292 B2 0x08000001 : INSN2
3293 0x08000002 : INSN3
3294 PC -> 0x08000003 : INSN4
3295
3296 Say you're stopped at 0x08000003 as above. Reverse continuing
3297 from that point should hit B2 as below. Reading the PC when the
3298 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3299 been de-executed already.
3300
3301 B1 0x08000000 : INSN1
3302 B2 PC -> 0x08000001 : INSN2
3303 0x08000002 : INSN3
3304 0x08000003 : INSN4
3305
3306 We can't apply the same logic as for forward execution, because
3307 we would wrongly adjust the PC to 0x08000000, since there's a
3308 breakpoint at PC - 1. We'd then report a hit on B1, although
3309 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3310 behaviour. */
3311 if (execution_direction == EXEC_REVERSE)
3312 return;
3313
3314 /* If this target does not decrement the PC after breakpoints, then
3315 we have nothing to do. */
3316 regcache = get_thread_regcache (ecs->ptid);
3317 gdbarch = get_regcache_arch (regcache);
3318
3319 decr_pc = target_decr_pc_after_break (gdbarch);
3320 if (decr_pc == 0)
3321 return;
3322
3323 aspace = get_regcache_aspace (regcache);
3324
3325 /* Find the location where (if we've hit a breakpoint) the
3326 breakpoint would be. */
3327 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3328
3329 /* Check whether there actually is a software breakpoint inserted at
3330 that location.
3331
3332 If in non-stop mode, a race condition is possible where we've
3333 removed a breakpoint, but stop events for that breakpoint were
3334 already queued and arrive later. To suppress those spurious
3335 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3336 and retire them after a number of stop events are reported. */
3337 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3338 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3339 {
3340 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3341
3342 if (record_full_is_used ())
3343 record_full_gdb_operation_disable_set ();
3344
3345 /* When using hardware single-step, a SIGTRAP is reported for both
3346 a completed single-step and a software breakpoint. Need to
3347 differentiate between the two, as the latter needs adjusting
3348 but the former does not.
3349
3350 The SIGTRAP can be due to a completed hardware single-step only if
3351 - we didn't insert software single-step breakpoints
3352 - the thread to be examined is still the current thread
3353 - this thread is currently being stepped
3354
3355 If any of these events did not occur, we must have stopped due
3356 to hitting a software breakpoint, and have to back up to the
3357 breakpoint address.
3358
3359 As a special case, we could have hardware single-stepped a
3360 software breakpoint. In this case (prev_pc == breakpoint_pc),
3361 we also need to back up to the breakpoint address. */
3362
3363 if (singlestep_breakpoints_inserted_p
3364 || !ptid_equal (ecs->ptid, inferior_ptid)
3365 || !currently_stepping (ecs->event_thread)
3366 || ecs->event_thread->prev_pc == breakpoint_pc)
3367 regcache_write_pc (regcache, breakpoint_pc);
3368
3369 do_cleanups (old_cleanups);
3370 }
3371 }
3372
3373 static void
3374 init_infwait_state (void)
3375 {
3376 waiton_ptid = pid_to_ptid (-1);
3377 infwait_state = infwait_normal_state;
3378 }
3379
3380 static int
3381 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3382 {
3383 for (frame = get_prev_frame (frame);
3384 frame != NULL;
3385 frame = get_prev_frame (frame))
3386 {
3387 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3388 return 1;
3389 if (get_frame_type (frame) != INLINE_FRAME)
3390 break;
3391 }
3392
3393 return 0;
3394 }
3395
3396 /* Auxiliary function that handles syscall entry/return events.
3397 It returns 1 if the inferior should keep going (and GDB
3398 should ignore the event), or 0 if the event deserves to be
3399 processed. */
3400
3401 static int
3402 handle_syscall_event (struct execution_control_state *ecs)
3403 {
3404 struct regcache *regcache;
3405 int syscall_number;
3406
3407 if (!ptid_equal (ecs->ptid, inferior_ptid))
3408 context_switch (ecs->ptid);
3409
3410 regcache = get_thread_regcache (ecs->ptid);
3411 syscall_number = ecs->ws.value.syscall_number;
3412 stop_pc = regcache_read_pc (regcache);
3413
3414 if (catch_syscall_enabled () > 0
3415 && catching_syscall_number (syscall_number) > 0)
3416 {
3417 if (debug_infrun)
3418 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3419 syscall_number);
3420
3421 ecs->event_thread->control.stop_bpstat
3422 = bpstat_stop_status (get_regcache_aspace (regcache),
3423 stop_pc, ecs->ptid, &ecs->ws);
3424
3425 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3426 {
3427 /* Catchpoint hit. */
3428 return 0;
3429 }
3430 }
3431
3432 /* If no catchpoint triggered for this, then keep going. */
3433 keep_going (ecs);
3434 return 1;
3435 }
3436
3437 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3438
3439 static void
3440 fill_in_stop_func (struct gdbarch *gdbarch,
3441 struct execution_control_state *ecs)
3442 {
3443 if (!ecs->stop_func_filled_in)
3444 {
3445 /* Don't care about return value; stop_func_start and stop_func_name
3446 will both be 0 if it doesn't work. */
3447 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3448 &ecs->stop_func_start, &ecs->stop_func_end);
3449 ecs->stop_func_start
3450 += gdbarch_deprecated_function_start_offset (gdbarch);
3451
3452 if (gdbarch_skip_entrypoint_p (gdbarch))
3453 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3454 ecs->stop_func_start);
3455
3456 ecs->stop_func_filled_in = 1;
3457 }
3458 }
3459
3460
3461 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3462
3463 static enum stop_kind
3464 get_inferior_stop_soon (ptid_t ptid)
3465 {
3466 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3467
3468 gdb_assert (inf != NULL);
3469 return inf->control.stop_soon;
3470 }
3471
3472 /* Given an execution control state that has been freshly filled in by
3473 an event from the inferior, figure out what it means and take
3474 appropriate action.
3475
3476 The alternatives are:
3477
3478 1) stop_waiting and return; to really stop and return to the
3479 debugger.
3480
3481 2) keep_going and return; to wait for the next event (set
3482 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3483 once). */
3484
3485 static void
3486 handle_inferior_event (struct execution_control_state *ecs)
3487 {
3488 enum stop_kind stop_soon;
3489
3490 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3491 {
3492 /* We had an event in the inferior, but we are not interested in
3493 handling it at this level. The lower layers have already
3494 done what needs to be done, if anything.
3495
3496 One of the possible circumstances for this is when the
3497 inferior produces output for the console. The inferior has
3498 not stopped, and we are ignoring the event. Another possible
3499 circumstance is any event which the lower level knows will be
3500 reported multiple times without an intervening resume. */
3501 if (debug_infrun)
3502 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3503 prepare_to_wait (ecs);
3504 return;
3505 }
3506
3507 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3508 && target_can_async_p () && !sync_execution)
3509 {
3510 /* There were no unwaited-for children left in the target, but,
3511 we're not synchronously waiting for events either. Just
3512 ignore. Otherwise, if we were running a synchronous
3513 execution command, we need to cancel it and give the user
3514 back the terminal. */
3515 if (debug_infrun)
3516 fprintf_unfiltered (gdb_stdlog,
3517 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3518 prepare_to_wait (ecs);
3519 return;
3520 }
3521
3522 /* Cache the last pid/waitstatus. */
3523 set_last_target_status (ecs->ptid, ecs->ws);
3524
3525 /* Always clear state belonging to the previous time we stopped. */
3526 stop_stack_dummy = STOP_NONE;
3527
3528 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3529 {
3530 /* No unwaited-for children left. IOW, all resumed children
3531 have exited. */
3532 if (debug_infrun)
3533 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3534
3535 stop_print_frame = 0;
3536 stop_waiting (ecs);
3537 return;
3538 }
3539
3540 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3541 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3542 {
3543 ecs->event_thread = find_thread_ptid (ecs->ptid);
3544 /* If it's a new thread, add it to the thread database. */
3545 if (ecs->event_thread == NULL)
3546 ecs->event_thread = add_thread (ecs->ptid);
3547
3548 /* Disable range stepping. If the next step request could use a
3549 range, this will be end up re-enabled then. */
3550 ecs->event_thread->control.may_range_step = 0;
3551 }
3552
3553 /* Dependent on valid ECS->EVENT_THREAD. */
3554 adjust_pc_after_break (ecs);
3555
3556 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3557 reinit_frame_cache ();
3558
3559 breakpoint_retire_moribund ();
3560
3561 /* First, distinguish signals caused by the debugger from signals
3562 that have to do with the program's own actions. Note that
3563 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3564 on the operating system version. Here we detect when a SIGILL or
3565 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3566 something similar for SIGSEGV, since a SIGSEGV will be generated
3567 when we're trying to execute a breakpoint instruction on a
3568 non-executable stack. This happens for call dummy breakpoints
3569 for architectures like SPARC that place call dummies on the
3570 stack. */
3571 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3572 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3573 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3574 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3575 {
3576 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3577
3578 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3579 regcache_read_pc (regcache)))
3580 {
3581 if (debug_infrun)
3582 fprintf_unfiltered (gdb_stdlog,
3583 "infrun: Treating signal as SIGTRAP\n");
3584 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3585 }
3586 }
3587
3588 /* Mark the non-executing threads accordingly. In all-stop, all
3589 threads of all processes are stopped when we get any event
3590 reported. In non-stop mode, only the event thread stops. If
3591 we're handling a process exit in non-stop mode, there's nothing
3592 to do, as threads of the dead process are gone, and threads of
3593 any other process were left running. */
3594 if (!non_stop)
3595 set_executing (minus_one_ptid, 0);
3596 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3597 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3598 set_executing (ecs->ptid, 0);
3599
3600 switch (infwait_state)
3601 {
3602 case infwait_normal_state:
3603 if (debug_infrun)
3604 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3605 break;
3606
3607 case infwait_step_watch_state:
3608 if (debug_infrun)
3609 fprintf_unfiltered (gdb_stdlog,
3610 "infrun: infwait_step_watch_state\n");
3611
3612 ecs->stepped_after_stopped_by_watchpoint = 1;
3613 break;
3614
3615 case infwait_nonstep_watch_state:
3616 if (debug_infrun)
3617 fprintf_unfiltered (gdb_stdlog,
3618 "infrun: infwait_nonstep_watch_state\n");
3619 insert_breakpoints ();
3620
3621 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3622 handle things like signals arriving and other things happening
3623 in combination correctly? */
3624 ecs->stepped_after_stopped_by_watchpoint = 1;
3625 break;
3626
3627 default:
3628 internal_error (__FILE__, __LINE__, _("bad switch"));
3629 }
3630
3631 infwait_state = infwait_normal_state;
3632 waiton_ptid = pid_to_ptid (-1);
3633
3634 switch (ecs->ws.kind)
3635 {
3636 case TARGET_WAITKIND_LOADED:
3637 if (debug_infrun)
3638 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3639 if (!ptid_equal (ecs->ptid, inferior_ptid))
3640 context_switch (ecs->ptid);
3641 /* Ignore gracefully during startup of the inferior, as it might
3642 be the shell which has just loaded some objects, otherwise
3643 add the symbols for the newly loaded objects. Also ignore at
3644 the beginning of an attach or remote session; we will query
3645 the full list of libraries once the connection is
3646 established. */
3647
3648 stop_soon = get_inferior_stop_soon (ecs->ptid);
3649 if (stop_soon == NO_STOP_QUIETLY)
3650 {
3651 struct regcache *regcache;
3652
3653 regcache = get_thread_regcache (ecs->ptid);
3654
3655 handle_solib_event ();
3656
3657 ecs->event_thread->control.stop_bpstat
3658 = bpstat_stop_status (get_regcache_aspace (regcache),
3659 stop_pc, ecs->ptid, &ecs->ws);
3660
3661 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3662 {
3663 /* A catchpoint triggered. */
3664 process_event_stop_test (ecs);
3665 return;
3666 }
3667
3668 /* If requested, stop when the dynamic linker notifies
3669 gdb of events. This allows the user to get control
3670 and place breakpoints in initializer routines for
3671 dynamically loaded objects (among other things). */
3672 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3673 if (stop_on_solib_events)
3674 {
3675 /* Make sure we print "Stopped due to solib-event" in
3676 normal_stop. */
3677 stop_print_frame = 1;
3678
3679 stop_waiting (ecs);
3680 return;
3681 }
3682 }
3683
3684 /* If we are skipping through a shell, or through shared library
3685 loading that we aren't interested in, resume the program. If
3686 we're running the program normally, also resume. */
3687 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3688 {
3689 /* Loading of shared libraries might have changed breakpoint
3690 addresses. Make sure new breakpoints are inserted. */
3691 if (stop_soon == NO_STOP_QUIETLY)
3692 insert_breakpoints ();
3693 resume (0, GDB_SIGNAL_0);
3694 prepare_to_wait (ecs);
3695 return;
3696 }
3697
3698 /* But stop if we're attaching or setting up a remote
3699 connection. */
3700 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3701 || stop_soon == STOP_QUIETLY_REMOTE)
3702 {
3703 if (debug_infrun)
3704 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3705 stop_waiting (ecs);
3706 return;
3707 }
3708
3709 internal_error (__FILE__, __LINE__,
3710 _("unhandled stop_soon: %d"), (int) stop_soon);
3711
3712 case TARGET_WAITKIND_SPURIOUS:
3713 if (debug_infrun)
3714 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3715 if (!ptid_equal (ecs->ptid, inferior_ptid))
3716 context_switch (ecs->ptid);
3717 resume (0, GDB_SIGNAL_0);
3718 prepare_to_wait (ecs);
3719 return;
3720
3721 case TARGET_WAITKIND_EXITED:
3722 case TARGET_WAITKIND_SIGNALLED:
3723 if (debug_infrun)
3724 {
3725 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3726 fprintf_unfiltered (gdb_stdlog,
3727 "infrun: TARGET_WAITKIND_EXITED\n");
3728 else
3729 fprintf_unfiltered (gdb_stdlog,
3730 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3731 }
3732
3733 inferior_ptid = ecs->ptid;
3734 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3735 set_current_program_space (current_inferior ()->pspace);
3736 handle_vfork_child_exec_or_exit (0);
3737 target_terminal_ours (); /* Must do this before mourn anyway. */
3738
3739 /* Clearing any previous state of convenience variables. */
3740 clear_exit_convenience_vars ();
3741
3742 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3743 {
3744 /* Record the exit code in the convenience variable $_exitcode, so
3745 that the user can inspect this again later. */
3746 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3747 (LONGEST) ecs->ws.value.integer);
3748
3749 /* Also record this in the inferior itself. */
3750 current_inferior ()->has_exit_code = 1;
3751 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3752
3753 /* Support the --return-child-result option. */
3754 return_child_result_value = ecs->ws.value.integer;
3755
3756 observer_notify_exited (ecs->ws.value.integer);
3757 }
3758 else
3759 {
3760 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3761 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3762
3763 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3764 {
3765 /* Set the value of the internal variable $_exitsignal,
3766 which holds the signal uncaught by the inferior. */
3767 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3768 gdbarch_gdb_signal_to_target (gdbarch,
3769 ecs->ws.value.sig));
3770 }
3771 else
3772 {
3773 /* We don't have access to the target's method used for
3774 converting between signal numbers (GDB's internal
3775 representation <-> target's representation).
3776 Therefore, we cannot do a good job at displaying this
3777 information to the user. It's better to just warn
3778 her about it (if infrun debugging is enabled), and
3779 give up. */
3780 if (debug_infrun)
3781 fprintf_filtered (gdb_stdlog, _("\
3782 Cannot fill $_exitsignal with the correct signal number.\n"));
3783 }
3784
3785 observer_notify_signal_exited (ecs->ws.value.sig);
3786 }
3787
3788 gdb_flush (gdb_stdout);
3789 target_mourn_inferior ();
3790 singlestep_breakpoints_inserted_p = 0;
3791 cancel_single_step_breakpoints ();
3792 stop_print_frame = 0;
3793 stop_waiting (ecs);
3794 return;
3795
3796 /* The following are the only cases in which we keep going;
3797 the above cases end in a continue or goto. */
3798 case TARGET_WAITKIND_FORKED:
3799 case TARGET_WAITKIND_VFORKED:
3800 if (debug_infrun)
3801 {
3802 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3803 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3804 else
3805 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3806 }
3807
3808 /* Check whether the inferior is displaced stepping. */
3809 {
3810 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3811 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3812 struct displaced_step_inferior_state *displaced
3813 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3814
3815 /* If checking displaced stepping is supported, and thread
3816 ecs->ptid is displaced stepping. */
3817 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3818 {
3819 struct inferior *parent_inf
3820 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3821 struct regcache *child_regcache;
3822 CORE_ADDR parent_pc;
3823
3824 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3825 indicating that the displaced stepping of syscall instruction
3826 has been done. Perform cleanup for parent process here. Note
3827 that this operation also cleans up the child process for vfork,
3828 because their pages are shared. */
3829 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3830
3831 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3832 {
3833 /* Restore scratch pad for child process. */
3834 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3835 }
3836
3837 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3838 the child's PC is also within the scratchpad. Set the child's PC
3839 to the parent's PC value, which has already been fixed up.
3840 FIXME: we use the parent's aspace here, although we're touching
3841 the child, because the child hasn't been added to the inferior
3842 list yet at this point. */
3843
3844 child_regcache
3845 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3846 gdbarch,
3847 parent_inf->aspace);
3848 /* Read PC value of parent process. */
3849 parent_pc = regcache_read_pc (regcache);
3850
3851 if (debug_displaced)
3852 fprintf_unfiltered (gdb_stdlog,
3853 "displaced: write child pc from %s to %s\n",
3854 paddress (gdbarch,
3855 regcache_read_pc (child_regcache)),
3856 paddress (gdbarch, parent_pc));
3857
3858 regcache_write_pc (child_regcache, parent_pc);
3859 }
3860 }
3861
3862 if (!ptid_equal (ecs->ptid, inferior_ptid))
3863 context_switch (ecs->ptid);
3864
3865 /* Immediately detach breakpoints from the child before there's
3866 any chance of letting the user delete breakpoints from the
3867 breakpoint lists. If we don't do this early, it's easy to
3868 leave left over traps in the child, vis: "break foo; catch
3869 fork; c; <fork>; del; c; <child calls foo>". We only follow
3870 the fork on the last `continue', and by that time the
3871 breakpoint at "foo" is long gone from the breakpoint table.
3872 If we vforked, then we don't need to unpatch here, since both
3873 parent and child are sharing the same memory pages; we'll
3874 need to unpatch at follow/detach time instead to be certain
3875 that new breakpoints added between catchpoint hit time and
3876 vfork follow are detached. */
3877 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3878 {
3879 /* This won't actually modify the breakpoint list, but will
3880 physically remove the breakpoints from the child. */
3881 detach_breakpoints (ecs->ws.value.related_pid);
3882 }
3883
3884 if (singlestep_breakpoints_inserted_p)
3885 {
3886 /* Pull the single step breakpoints out of the target. */
3887 remove_single_step_breakpoints ();
3888 singlestep_breakpoints_inserted_p = 0;
3889 }
3890
3891 /* In case the event is caught by a catchpoint, remember that
3892 the event is to be followed at the next resume of the thread,
3893 and not immediately. */
3894 ecs->event_thread->pending_follow = ecs->ws;
3895
3896 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3897
3898 ecs->event_thread->control.stop_bpstat
3899 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3900 stop_pc, ecs->ptid, &ecs->ws);
3901
3902 /* If no catchpoint triggered for this, then keep going. Note
3903 that we're interested in knowing the bpstat actually causes a
3904 stop, not just if it may explain the signal. Software
3905 watchpoints, for example, always appear in the bpstat. */
3906 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3907 {
3908 ptid_t parent;
3909 ptid_t child;
3910 int should_resume;
3911 int follow_child
3912 = (follow_fork_mode_string == follow_fork_mode_child);
3913
3914 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3915
3916 should_resume = follow_fork ();
3917
3918 parent = ecs->ptid;
3919 child = ecs->ws.value.related_pid;
3920
3921 /* In non-stop mode, also resume the other branch. */
3922 if (non_stop && !detach_fork)
3923 {
3924 if (follow_child)
3925 switch_to_thread (parent);
3926 else
3927 switch_to_thread (child);
3928
3929 ecs->event_thread = inferior_thread ();
3930 ecs->ptid = inferior_ptid;
3931 keep_going (ecs);
3932 }
3933
3934 if (follow_child)
3935 switch_to_thread (child);
3936 else
3937 switch_to_thread (parent);
3938
3939 ecs->event_thread = inferior_thread ();
3940 ecs->ptid = inferior_ptid;
3941
3942 if (should_resume)
3943 keep_going (ecs);
3944 else
3945 stop_waiting (ecs);
3946 return;
3947 }
3948 process_event_stop_test (ecs);
3949 return;
3950
3951 case TARGET_WAITKIND_VFORK_DONE:
3952 /* Done with the shared memory region. Re-insert breakpoints in
3953 the parent, and keep going. */
3954
3955 if (debug_infrun)
3956 fprintf_unfiltered (gdb_stdlog,
3957 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3958
3959 if (!ptid_equal (ecs->ptid, inferior_ptid))
3960 context_switch (ecs->ptid);
3961
3962 current_inferior ()->waiting_for_vfork_done = 0;
3963 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3964 /* This also takes care of reinserting breakpoints in the
3965 previously locked inferior. */
3966 keep_going (ecs);
3967 return;
3968
3969 case TARGET_WAITKIND_EXECD:
3970 if (debug_infrun)
3971 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3972
3973 if (!ptid_equal (ecs->ptid, inferior_ptid))
3974 context_switch (ecs->ptid);
3975
3976 singlestep_breakpoints_inserted_p = 0;
3977 cancel_single_step_breakpoints ();
3978
3979 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3980
3981 /* Do whatever is necessary to the parent branch of the vfork. */
3982 handle_vfork_child_exec_or_exit (1);
3983
3984 /* This causes the eventpoints and symbol table to be reset.
3985 Must do this now, before trying to determine whether to
3986 stop. */
3987 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3988
3989 ecs->event_thread->control.stop_bpstat
3990 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3991 stop_pc, ecs->ptid, &ecs->ws);
3992
3993 /* Note that this may be referenced from inside
3994 bpstat_stop_status above, through inferior_has_execd. */
3995 xfree (ecs->ws.value.execd_pathname);
3996 ecs->ws.value.execd_pathname = NULL;
3997
3998 /* If no catchpoint triggered for this, then keep going. */
3999 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4000 {
4001 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4002 keep_going (ecs);
4003 return;
4004 }
4005 process_event_stop_test (ecs);
4006 return;
4007
4008 /* Be careful not to try to gather much state about a thread
4009 that's in a syscall. It's frequently a losing proposition. */
4010 case TARGET_WAITKIND_SYSCALL_ENTRY:
4011 if (debug_infrun)
4012 fprintf_unfiltered (gdb_stdlog,
4013 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4014 /* Getting the current syscall number. */
4015 if (handle_syscall_event (ecs) == 0)
4016 process_event_stop_test (ecs);
4017 return;
4018
4019 /* Before examining the threads further, step this thread to
4020 get it entirely out of the syscall. (We get notice of the
4021 event when the thread is just on the verge of exiting a
4022 syscall. Stepping one instruction seems to get it back
4023 into user code.) */
4024 case TARGET_WAITKIND_SYSCALL_RETURN:
4025 if (debug_infrun)
4026 fprintf_unfiltered (gdb_stdlog,
4027 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4028 if (handle_syscall_event (ecs) == 0)
4029 process_event_stop_test (ecs);
4030 return;
4031
4032 case TARGET_WAITKIND_STOPPED:
4033 if (debug_infrun)
4034 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4035 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4036 handle_signal_stop (ecs);
4037 return;
4038
4039 case TARGET_WAITKIND_NO_HISTORY:
4040 if (debug_infrun)
4041 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4042 /* Reverse execution: target ran out of history info. */
4043
4044 /* Pull the single step breakpoints out of the target. */
4045 if (singlestep_breakpoints_inserted_p)
4046 {
4047 if (!ptid_equal (ecs->ptid, inferior_ptid))
4048 context_switch (ecs->ptid);
4049 remove_single_step_breakpoints ();
4050 singlestep_breakpoints_inserted_p = 0;
4051 }
4052 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4053 observer_notify_no_history ();
4054 stop_waiting (ecs);
4055 return;
4056 }
4057 }
4058
4059 /* Come here when the program has stopped with a signal. */
4060
4061 static void
4062 handle_signal_stop (struct execution_control_state *ecs)
4063 {
4064 struct frame_info *frame;
4065 struct gdbarch *gdbarch;
4066 int stopped_by_watchpoint;
4067 enum stop_kind stop_soon;
4068 int random_signal;
4069
4070 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4071
4072 /* Do we need to clean up the state of a thread that has
4073 completed a displaced single-step? (Doing so usually affects
4074 the PC, so do it here, before we set stop_pc.) */
4075 displaced_step_fixup (ecs->ptid,
4076 ecs->event_thread->suspend.stop_signal);
4077
4078 /* If we either finished a single-step or hit a breakpoint, but
4079 the user wanted this thread to be stopped, pretend we got a
4080 SIG0 (generic unsignaled stop). */
4081 if (ecs->event_thread->stop_requested
4082 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4083 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4084
4085 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4086
4087 if (debug_infrun)
4088 {
4089 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4090 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4091 struct cleanup *old_chain = save_inferior_ptid ();
4092
4093 inferior_ptid = ecs->ptid;
4094
4095 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4096 paddress (gdbarch, stop_pc));
4097 if (target_stopped_by_watchpoint ())
4098 {
4099 CORE_ADDR addr;
4100
4101 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4102
4103 if (target_stopped_data_address (&current_target, &addr))
4104 fprintf_unfiltered (gdb_stdlog,
4105 "infrun: stopped data address = %s\n",
4106 paddress (gdbarch, addr));
4107 else
4108 fprintf_unfiltered (gdb_stdlog,
4109 "infrun: (no data address available)\n");
4110 }
4111
4112 do_cleanups (old_chain);
4113 }
4114
4115 /* This is originated from start_remote(), start_inferior() and
4116 shared libraries hook functions. */
4117 stop_soon = get_inferior_stop_soon (ecs->ptid);
4118 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4119 {
4120 if (!ptid_equal (ecs->ptid, inferior_ptid))
4121 context_switch (ecs->ptid);
4122 if (debug_infrun)
4123 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4124 stop_print_frame = 1;
4125 stop_waiting (ecs);
4126 return;
4127 }
4128
4129 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4130 && stop_after_trap)
4131 {
4132 if (!ptid_equal (ecs->ptid, inferior_ptid))
4133 context_switch (ecs->ptid);
4134 if (debug_infrun)
4135 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4136 stop_print_frame = 0;
4137 stop_waiting (ecs);
4138 return;
4139 }
4140
4141 /* This originates from attach_command(). We need to overwrite
4142 the stop_signal here, because some kernels don't ignore a
4143 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4144 See more comments in inferior.h. On the other hand, if we
4145 get a non-SIGSTOP, report it to the user - assume the backend
4146 will handle the SIGSTOP if it should show up later.
4147
4148 Also consider that the attach is complete when we see a
4149 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4150 target extended-remote report it instead of a SIGSTOP
4151 (e.g. gdbserver). We already rely on SIGTRAP being our
4152 signal, so this is no exception.
4153
4154 Also consider that the attach is complete when we see a
4155 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4156 the target to stop all threads of the inferior, in case the
4157 low level attach operation doesn't stop them implicitly. If
4158 they weren't stopped implicitly, then the stub will report a
4159 GDB_SIGNAL_0, meaning: stopped for no particular reason
4160 other than GDB's request. */
4161 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4162 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4163 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4164 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4165 {
4166 stop_print_frame = 1;
4167 stop_waiting (ecs);
4168 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4169 return;
4170 }
4171
4172 /* See if something interesting happened to the non-current thread. If
4173 so, then switch to that thread. */
4174 if (!ptid_equal (ecs->ptid, inferior_ptid))
4175 {
4176 if (debug_infrun)
4177 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4178
4179 context_switch (ecs->ptid);
4180
4181 if (deprecated_context_hook)
4182 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4183 }
4184
4185 /* At this point, get hold of the now-current thread's frame. */
4186 frame = get_current_frame ();
4187 gdbarch = get_frame_arch (frame);
4188
4189 /* Pull the single step breakpoints out of the target. */
4190 if (singlestep_breakpoints_inserted_p)
4191 {
4192 /* However, before doing so, if this single-step breakpoint was
4193 actually for another thread, set this thread up for moving
4194 past it. */
4195 if (!ptid_equal (ecs->ptid, singlestep_ptid)
4196 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4197 {
4198 struct regcache *regcache;
4199 struct address_space *aspace;
4200 CORE_ADDR pc;
4201
4202 regcache = get_thread_regcache (ecs->ptid);
4203 aspace = get_regcache_aspace (regcache);
4204 pc = regcache_read_pc (regcache);
4205 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4206 {
4207 if (debug_infrun)
4208 {
4209 fprintf_unfiltered (gdb_stdlog,
4210 "infrun: [%s] hit step over single-step"
4211 " breakpoint of [%s]\n",
4212 target_pid_to_str (ecs->ptid),
4213 target_pid_to_str (singlestep_ptid));
4214 }
4215 ecs->hit_singlestep_breakpoint = 1;
4216 }
4217 }
4218
4219 remove_single_step_breakpoints ();
4220 singlestep_breakpoints_inserted_p = 0;
4221 }
4222
4223 if (ecs->stepped_after_stopped_by_watchpoint)
4224 stopped_by_watchpoint = 0;
4225 else
4226 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4227
4228 /* If necessary, step over this watchpoint. We'll be back to display
4229 it in a moment. */
4230 if (stopped_by_watchpoint
4231 && (target_have_steppable_watchpoint
4232 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4233 {
4234 /* At this point, we are stopped at an instruction which has
4235 attempted to write to a piece of memory under control of
4236 a watchpoint. The instruction hasn't actually executed
4237 yet. If we were to evaluate the watchpoint expression
4238 now, we would get the old value, and therefore no change
4239 would seem to have occurred.
4240
4241 In order to make watchpoints work `right', we really need
4242 to complete the memory write, and then evaluate the
4243 watchpoint expression. We do this by single-stepping the
4244 target.
4245
4246 It may not be necessary to disable the watchpoint to step over
4247 it. For example, the PA can (with some kernel cooperation)
4248 single step over a watchpoint without disabling the watchpoint.
4249
4250 It is far more common to need to disable a watchpoint to step
4251 the inferior over it. If we have non-steppable watchpoints,
4252 we must disable the current watchpoint; it's simplest to
4253 disable all watchpoints and breakpoints. */
4254 int hw_step = 1;
4255
4256 if (!target_have_steppable_watchpoint)
4257 {
4258 remove_breakpoints ();
4259 /* See comment in resume why we need to stop bypassing signals
4260 while breakpoints have been removed. */
4261 target_pass_signals (0, NULL);
4262 }
4263 /* Single step */
4264 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4265 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4266 waiton_ptid = ecs->ptid;
4267 if (target_have_steppable_watchpoint)
4268 infwait_state = infwait_step_watch_state;
4269 else
4270 infwait_state = infwait_nonstep_watch_state;
4271 prepare_to_wait (ecs);
4272 return;
4273 }
4274
4275 ecs->event_thread->stepping_over_breakpoint = 0;
4276 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4277 ecs->event_thread->control.stop_step = 0;
4278 stop_print_frame = 1;
4279 stopped_by_random_signal = 0;
4280
4281 /* Hide inlined functions starting here, unless we just performed stepi or
4282 nexti. After stepi and nexti, always show the innermost frame (not any
4283 inline function call sites). */
4284 if (ecs->event_thread->control.step_range_end != 1)
4285 {
4286 struct address_space *aspace =
4287 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4288
4289 /* skip_inline_frames is expensive, so we avoid it if we can
4290 determine that the address is one where functions cannot have
4291 been inlined. This improves performance with inferiors that
4292 load a lot of shared libraries, because the solib event
4293 breakpoint is defined as the address of a function (i.e. not
4294 inline). Note that we have to check the previous PC as well
4295 as the current one to catch cases when we have just
4296 single-stepped off a breakpoint prior to reinstating it.
4297 Note that we're assuming that the code we single-step to is
4298 not inline, but that's not definitive: there's nothing
4299 preventing the event breakpoint function from containing
4300 inlined code, and the single-step ending up there. If the
4301 user had set a breakpoint on that inlined code, the missing
4302 skip_inline_frames call would break things. Fortunately
4303 that's an extremely unlikely scenario. */
4304 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4305 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4306 && ecs->event_thread->control.trap_expected
4307 && pc_at_non_inline_function (aspace,
4308 ecs->event_thread->prev_pc,
4309 &ecs->ws)))
4310 {
4311 skip_inline_frames (ecs->ptid);
4312
4313 /* Re-fetch current thread's frame in case that invalidated
4314 the frame cache. */
4315 frame = get_current_frame ();
4316 gdbarch = get_frame_arch (frame);
4317 }
4318 }
4319
4320 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4321 && ecs->event_thread->control.trap_expected
4322 && gdbarch_single_step_through_delay_p (gdbarch)
4323 && currently_stepping (ecs->event_thread))
4324 {
4325 /* We're trying to step off a breakpoint. Turns out that we're
4326 also on an instruction that needs to be stepped multiple
4327 times before it's been fully executing. E.g., architectures
4328 with a delay slot. It needs to be stepped twice, once for
4329 the instruction and once for the delay slot. */
4330 int step_through_delay
4331 = gdbarch_single_step_through_delay (gdbarch, frame);
4332
4333 if (debug_infrun && step_through_delay)
4334 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4335 if (ecs->event_thread->control.step_range_end == 0
4336 && step_through_delay)
4337 {
4338 /* The user issued a continue when stopped at a breakpoint.
4339 Set up for another trap and get out of here. */
4340 ecs->event_thread->stepping_over_breakpoint = 1;
4341 keep_going (ecs);
4342 return;
4343 }
4344 else if (step_through_delay)
4345 {
4346 /* The user issued a step when stopped at a breakpoint.
4347 Maybe we should stop, maybe we should not - the delay
4348 slot *might* correspond to a line of source. In any
4349 case, don't decide that here, just set
4350 ecs->stepping_over_breakpoint, making sure we
4351 single-step again before breakpoints are re-inserted. */
4352 ecs->event_thread->stepping_over_breakpoint = 1;
4353 }
4354 }
4355
4356 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4357 handles this event. */
4358 ecs->event_thread->control.stop_bpstat
4359 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4360 stop_pc, ecs->ptid, &ecs->ws);
4361
4362 /* Following in case break condition called a
4363 function. */
4364 stop_print_frame = 1;
4365
4366 /* This is where we handle "moribund" watchpoints. Unlike
4367 software breakpoints traps, hardware watchpoint traps are
4368 always distinguishable from random traps. If no high-level
4369 watchpoint is associated with the reported stop data address
4370 anymore, then the bpstat does not explain the signal ---
4371 simply make sure to ignore it if `stopped_by_watchpoint' is
4372 set. */
4373
4374 if (debug_infrun
4375 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4376 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4377 GDB_SIGNAL_TRAP)
4378 && stopped_by_watchpoint)
4379 fprintf_unfiltered (gdb_stdlog,
4380 "infrun: no user watchpoint explains "
4381 "watchpoint SIGTRAP, ignoring\n");
4382
4383 /* NOTE: cagney/2003-03-29: These checks for a random signal
4384 at one stage in the past included checks for an inferior
4385 function call's call dummy's return breakpoint. The original
4386 comment, that went with the test, read:
4387
4388 ``End of a stack dummy. Some systems (e.g. Sony news) give
4389 another signal besides SIGTRAP, so check here as well as
4390 above.''
4391
4392 If someone ever tries to get call dummys on a
4393 non-executable stack to work (where the target would stop
4394 with something like a SIGSEGV), then those tests might need
4395 to be re-instated. Given, however, that the tests were only
4396 enabled when momentary breakpoints were not being used, I
4397 suspect that it won't be the case.
4398
4399 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4400 be necessary for call dummies on a non-executable stack on
4401 SPARC. */
4402
4403 /* See if the breakpoints module can explain the signal. */
4404 random_signal
4405 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4406 ecs->event_thread->suspend.stop_signal);
4407
4408 /* If not, perhaps stepping/nexting can. */
4409 if (random_signal)
4410 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4411 && currently_stepping (ecs->event_thread));
4412
4413 /* Perhaps the thread hit a single-step breakpoint of _another_
4414 thread. Single-step breakpoints are transparent to the
4415 breakpoints module. */
4416 if (random_signal)
4417 random_signal = !ecs->hit_singlestep_breakpoint;
4418
4419 /* No? Perhaps we got a moribund watchpoint. */
4420 if (random_signal)
4421 random_signal = !stopped_by_watchpoint;
4422
4423 /* For the program's own signals, act according to
4424 the signal handling tables. */
4425
4426 if (random_signal)
4427 {
4428 /* Signal not for debugging purposes. */
4429 int printed = 0;
4430 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4431 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4432
4433 if (debug_infrun)
4434 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4435 gdb_signal_to_symbol_string (stop_signal));
4436
4437 stopped_by_random_signal = 1;
4438
4439 if (signal_print[ecs->event_thread->suspend.stop_signal])
4440 {
4441 /* The signal table tells us to print about this signal. */
4442 printed = 1;
4443 target_terminal_ours_for_output ();
4444 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4445 }
4446 /* Always stop on signals if we're either just gaining control
4447 of the program, or the user explicitly requested this thread
4448 to remain stopped. */
4449 if (stop_soon != NO_STOP_QUIETLY
4450 || ecs->event_thread->stop_requested
4451 || (!inf->detaching
4452 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4453 {
4454 stop_waiting (ecs);
4455 return;
4456 }
4457 /* If not going to stop, give terminal back
4458 if we took it away. */
4459 else if (printed)
4460 target_terminal_inferior ();
4461
4462 /* Clear the signal if it should not be passed. */
4463 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4464 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4465
4466 if (ecs->event_thread->prev_pc == stop_pc
4467 && ecs->event_thread->control.trap_expected
4468 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4469 {
4470 /* We were just starting a new sequence, attempting to
4471 single-step off of a breakpoint and expecting a SIGTRAP.
4472 Instead this signal arrives. This signal will take us out
4473 of the stepping range so GDB needs to remember to, when
4474 the signal handler returns, resume stepping off that
4475 breakpoint. */
4476 /* To simplify things, "continue" is forced to use the same
4477 code paths as single-step - set a breakpoint at the
4478 signal return address and then, once hit, step off that
4479 breakpoint. */
4480 if (debug_infrun)
4481 fprintf_unfiltered (gdb_stdlog,
4482 "infrun: signal arrived while stepping over "
4483 "breakpoint\n");
4484
4485 insert_hp_step_resume_breakpoint_at_frame (frame);
4486 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4487 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4488 ecs->event_thread->control.trap_expected = 0;
4489
4490 /* If we were nexting/stepping some other thread, switch to
4491 it, so that we don't continue it, losing control. */
4492 if (!switch_back_to_stepped_thread (ecs))
4493 keep_going (ecs);
4494 return;
4495 }
4496
4497 if (ecs->event_thread->control.step_range_end != 0
4498 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4499 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4500 && frame_id_eq (get_stack_frame_id (frame),
4501 ecs->event_thread->control.step_stack_frame_id)
4502 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4503 {
4504 /* The inferior is about to take a signal that will take it
4505 out of the single step range. Set a breakpoint at the
4506 current PC (which is presumably where the signal handler
4507 will eventually return) and then allow the inferior to
4508 run free.
4509
4510 Note that this is only needed for a signal delivered
4511 while in the single-step range. Nested signals aren't a
4512 problem as they eventually all return. */
4513 if (debug_infrun)
4514 fprintf_unfiltered (gdb_stdlog,
4515 "infrun: signal may take us out of "
4516 "single-step range\n");
4517
4518 insert_hp_step_resume_breakpoint_at_frame (frame);
4519 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4520 ecs->event_thread->control.trap_expected = 0;
4521 keep_going (ecs);
4522 return;
4523 }
4524
4525 /* Note: step_resume_breakpoint may be non-NULL. This occures
4526 when either there's a nested signal, or when there's a
4527 pending signal enabled just as the signal handler returns
4528 (leaving the inferior at the step-resume-breakpoint without
4529 actually executing it). Either way continue until the
4530 breakpoint is really hit. */
4531
4532 if (!switch_back_to_stepped_thread (ecs))
4533 {
4534 if (debug_infrun)
4535 fprintf_unfiltered (gdb_stdlog,
4536 "infrun: random signal, keep going\n");
4537
4538 keep_going (ecs);
4539 }
4540 return;
4541 }
4542
4543 process_event_stop_test (ecs);
4544 }
4545
4546 /* Come here when we've got some debug event / signal we can explain
4547 (IOW, not a random signal), and test whether it should cause a
4548 stop, or whether we should resume the inferior (transparently).
4549 E.g., could be a breakpoint whose condition evaluates false; we
4550 could be still stepping within the line; etc. */
4551
4552 static void
4553 process_event_stop_test (struct execution_control_state *ecs)
4554 {
4555 struct symtab_and_line stop_pc_sal;
4556 struct frame_info *frame;
4557 struct gdbarch *gdbarch;
4558 CORE_ADDR jmp_buf_pc;
4559 struct bpstat_what what;
4560
4561 /* Handle cases caused by hitting a breakpoint. */
4562
4563 frame = get_current_frame ();
4564 gdbarch = get_frame_arch (frame);
4565
4566 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4567
4568 if (what.call_dummy)
4569 {
4570 stop_stack_dummy = what.call_dummy;
4571 }
4572
4573 /* If we hit an internal event that triggers symbol changes, the
4574 current frame will be invalidated within bpstat_what (e.g., if we
4575 hit an internal solib event). Re-fetch it. */
4576 frame = get_current_frame ();
4577 gdbarch = get_frame_arch (frame);
4578
4579 switch (what.main_action)
4580 {
4581 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4582 /* If we hit the breakpoint at longjmp while stepping, we
4583 install a momentary breakpoint at the target of the
4584 jmp_buf. */
4585
4586 if (debug_infrun)
4587 fprintf_unfiltered (gdb_stdlog,
4588 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4589
4590 ecs->event_thread->stepping_over_breakpoint = 1;
4591
4592 if (what.is_longjmp)
4593 {
4594 struct value *arg_value;
4595
4596 /* If we set the longjmp breakpoint via a SystemTap probe,
4597 then use it to extract the arguments. The destination PC
4598 is the third argument to the probe. */
4599 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4600 if (arg_value)
4601 jmp_buf_pc = value_as_address (arg_value);
4602 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4603 || !gdbarch_get_longjmp_target (gdbarch,
4604 frame, &jmp_buf_pc))
4605 {
4606 if (debug_infrun)
4607 fprintf_unfiltered (gdb_stdlog,
4608 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4609 "(!gdbarch_get_longjmp_target)\n");
4610 keep_going (ecs);
4611 return;
4612 }
4613
4614 /* Insert a breakpoint at resume address. */
4615 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4616 }
4617 else
4618 check_exception_resume (ecs, frame);
4619 keep_going (ecs);
4620 return;
4621
4622 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4623 {
4624 struct frame_info *init_frame;
4625
4626 /* There are several cases to consider.
4627
4628 1. The initiating frame no longer exists. In this case we
4629 must stop, because the exception or longjmp has gone too
4630 far.
4631
4632 2. The initiating frame exists, and is the same as the
4633 current frame. We stop, because the exception or longjmp
4634 has been caught.
4635
4636 3. The initiating frame exists and is different from the
4637 current frame. This means the exception or longjmp has
4638 been caught beneath the initiating frame, so keep going.
4639
4640 4. longjmp breakpoint has been placed just to protect
4641 against stale dummy frames and user is not interested in
4642 stopping around longjmps. */
4643
4644 if (debug_infrun)
4645 fprintf_unfiltered (gdb_stdlog,
4646 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4647
4648 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4649 != NULL);
4650 delete_exception_resume_breakpoint (ecs->event_thread);
4651
4652 if (what.is_longjmp)
4653 {
4654 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4655
4656 if (!frame_id_p (ecs->event_thread->initiating_frame))
4657 {
4658 /* Case 4. */
4659 keep_going (ecs);
4660 return;
4661 }
4662 }
4663
4664 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4665
4666 if (init_frame)
4667 {
4668 struct frame_id current_id
4669 = get_frame_id (get_current_frame ());
4670 if (frame_id_eq (current_id,
4671 ecs->event_thread->initiating_frame))
4672 {
4673 /* Case 2. Fall through. */
4674 }
4675 else
4676 {
4677 /* Case 3. */
4678 keep_going (ecs);
4679 return;
4680 }
4681 }
4682
4683 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4684 exists. */
4685 delete_step_resume_breakpoint (ecs->event_thread);
4686
4687 end_stepping_range (ecs);
4688 }
4689 return;
4690
4691 case BPSTAT_WHAT_SINGLE:
4692 if (debug_infrun)
4693 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4694 ecs->event_thread->stepping_over_breakpoint = 1;
4695 /* Still need to check other stuff, at least the case where we
4696 are stepping and step out of the right range. */
4697 break;
4698
4699 case BPSTAT_WHAT_STEP_RESUME:
4700 if (debug_infrun)
4701 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4702
4703 delete_step_resume_breakpoint (ecs->event_thread);
4704 if (ecs->event_thread->control.proceed_to_finish
4705 && execution_direction == EXEC_REVERSE)
4706 {
4707 struct thread_info *tp = ecs->event_thread;
4708
4709 /* We are finishing a function in reverse, and just hit the
4710 step-resume breakpoint at the start address of the
4711 function, and we're almost there -- just need to back up
4712 by one more single-step, which should take us back to the
4713 function call. */
4714 tp->control.step_range_start = tp->control.step_range_end = 1;
4715 keep_going (ecs);
4716 return;
4717 }
4718 fill_in_stop_func (gdbarch, ecs);
4719 if (stop_pc == ecs->stop_func_start
4720 && execution_direction == EXEC_REVERSE)
4721 {
4722 /* We are stepping over a function call in reverse, and just
4723 hit the step-resume breakpoint at the start address of
4724 the function. Go back to single-stepping, which should
4725 take us back to the function call. */
4726 ecs->event_thread->stepping_over_breakpoint = 1;
4727 keep_going (ecs);
4728 return;
4729 }
4730 break;
4731
4732 case BPSTAT_WHAT_STOP_NOISY:
4733 if (debug_infrun)
4734 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4735 stop_print_frame = 1;
4736
4737 /* Assume the thread stopped for a breapoint. We'll still check
4738 whether a/the breakpoint is there when the thread is next
4739 resumed. */
4740 ecs->event_thread->stepping_over_breakpoint = 1;
4741
4742 stop_waiting (ecs);
4743 return;
4744
4745 case BPSTAT_WHAT_STOP_SILENT:
4746 if (debug_infrun)
4747 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4748 stop_print_frame = 0;
4749
4750 /* Assume the thread stopped for a breapoint. We'll still check
4751 whether a/the breakpoint is there when the thread is next
4752 resumed. */
4753 ecs->event_thread->stepping_over_breakpoint = 1;
4754 stop_waiting (ecs);
4755 return;
4756
4757 case BPSTAT_WHAT_HP_STEP_RESUME:
4758 if (debug_infrun)
4759 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4760
4761 delete_step_resume_breakpoint (ecs->event_thread);
4762 if (ecs->event_thread->step_after_step_resume_breakpoint)
4763 {
4764 /* Back when the step-resume breakpoint was inserted, we
4765 were trying to single-step off a breakpoint. Go back to
4766 doing that. */
4767 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4768 ecs->event_thread->stepping_over_breakpoint = 1;
4769 keep_going (ecs);
4770 return;
4771 }
4772 break;
4773
4774 case BPSTAT_WHAT_KEEP_CHECKING:
4775 break;
4776 }
4777
4778 /* We come here if we hit a breakpoint but should not stop for it.
4779 Possibly we also were stepping and should stop for that. So fall
4780 through and test for stepping. But, if not stepping, do not
4781 stop. */
4782
4783 /* In all-stop mode, if we're currently stepping but have stopped in
4784 some other thread, we need to switch back to the stepped thread. */
4785 if (switch_back_to_stepped_thread (ecs))
4786 return;
4787
4788 if (ecs->event_thread->control.step_resume_breakpoint)
4789 {
4790 if (debug_infrun)
4791 fprintf_unfiltered (gdb_stdlog,
4792 "infrun: step-resume breakpoint is inserted\n");
4793
4794 /* Having a step-resume breakpoint overrides anything
4795 else having to do with stepping commands until
4796 that breakpoint is reached. */
4797 keep_going (ecs);
4798 return;
4799 }
4800
4801 if (ecs->event_thread->control.step_range_end == 0)
4802 {
4803 if (debug_infrun)
4804 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4805 /* Likewise if we aren't even stepping. */
4806 keep_going (ecs);
4807 return;
4808 }
4809
4810 /* Re-fetch current thread's frame in case the code above caused
4811 the frame cache to be re-initialized, making our FRAME variable
4812 a dangling pointer. */
4813 frame = get_current_frame ();
4814 gdbarch = get_frame_arch (frame);
4815 fill_in_stop_func (gdbarch, ecs);
4816
4817 /* If stepping through a line, keep going if still within it.
4818
4819 Note that step_range_end is the address of the first instruction
4820 beyond the step range, and NOT the address of the last instruction
4821 within it!
4822
4823 Note also that during reverse execution, we may be stepping
4824 through a function epilogue and therefore must detect when
4825 the current-frame changes in the middle of a line. */
4826
4827 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4828 && (execution_direction != EXEC_REVERSE
4829 || frame_id_eq (get_frame_id (frame),
4830 ecs->event_thread->control.step_frame_id)))
4831 {
4832 if (debug_infrun)
4833 fprintf_unfiltered
4834 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4835 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4836 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4837
4838 /* Tentatively re-enable range stepping; `resume' disables it if
4839 necessary (e.g., if we're stepping over a breakpoint or we
4840 have software watchpoints). */
4841 ecs->event_thread->control.may_range_step = 1;
4842
4843 /* When stepping backward, stop at beginning of line range
4844 (unless it's the function entry point, in which case
4845 keep going back to the call point). */
4846 if (stop_pc == ecs->event_thread->control.step_range_start
4847 && stop_pc != ecs->stop_func_start
4848 && execution_direction == EXEC_REVERSE)
4849 end_stepping_range (ecs);
4850 else
4851 keep_going (ecs);
4852
4853 return;
4854 }
4855
4856 /* We stepped out of the stepping range. */
4857
4858 /* If we are stepping at the source level and entered the runtime
4859 loader dynamic symbol resolution code...
4860
4861 EXEC_FORWARD: we keep on single stepping until we exit the run
4862 time loader code and reach the callee's address.
4863
4864 EXEC_REVERSE: we've already executed the callee (backward), and
4865 the runtime loader code is handled just like any other
4866 undebuggable function call. Now we need only keep stepping
4867 backward through the trampoline code, and that's handled further
4868 down, so there is nothing for us to do here. */
4869
4870 if (execution_direction != EXEC_REVERSE
4871 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4872 && in_solib_dynsym_resolve_code (stop_pc))
4873 {
4874 CORE_ADDR pc_after_resolver =
4875 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4876
4877 if (debug_infrun)
4878 fprintf_unfiltered (gdb_stdlog,
4879 "infrun: stepped into dynsym resolve code\n");
4880
4881 if (pc_after_resolver)
4882 {
4883 /* Set up a step-resume breakpoint at the address
4884 indicated by SKIP_SOLIB_RESOLVER. */
4885 struct symtab_and_line sr_sal;
4886
4887 init_sal (&sr_sal);
4888 sr_sal.pc = pc_after_resolver;
4889 sr_sal.pspace = get_frame_program_space (frame);
4890
4891 insert_step_resume_breakpoint_at_sal (gdbarch,
4892 sr_sal, null_frame_id);
4893 }
4894
4895 keep_going (ecs);
4896 return;
4897 }
4898
4899 if (ecs->event_thread->control.step_range_end != 1
4900 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4901 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4902 && get_frame_type (frame) == SIGTRAMP_FRAME)
4903 {
4904 if (debug_infrun)
4905 fprintf_unfiltered (gdb_stdlog,
4906 "infrun: stepped into signal trampoline\n");
4907 /* The inferior, while doing a "step" or "next", has ended up in
4908 a signal trampoline (either by a signal being delivered or by
4909 the signal handler returning). Just single-step until the
4910 inferior leaves the trampoline (either by calling the handler
4911 or returning). */
4912 keep_going (ecs);
4913 return;
4914 }
4915
4916 /* If we're in the return path from a shared library trampoline,
4917 we want to proceed through the trampoline when stepping. */
4918 /* macro/2012-04-25: This needs to come before the subroutine
4919 call check below as on some targets return trampolines look
4920 like subroutine calls (MIPS16 return thunks). */
4921 if (gdbarch_in_solib_return_trampoline (gdbarch,
4922 stop_pc, ecs->stop_func_name)
4923 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4924 {
4925 /* Determine where this trampoline returns. */
4926 CORE_ADDR real_stop_pc;
4927
4928 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4929
4930 if (debug_infrun)
4931 fprintf_unfiltered (gdb_stdlog,
4932 "infrun: stepped into solib return tramp\n");
4933
4934 /* Only proceed through if we know where it's going. */
4935 if (real_stop_pc)
4936 {
4937 /* And put the step-breakpoint there and go until there. */
4938 struct symtab_and_line sr_sal;
4939
4940 init_sal (&sr_sal); /* initialize to zeroes */
4941 sr_sal.pc = real_stop_pc;
4942 sr_sal.section = find_pc_overlay (sr_sal.pc);
4943 sr_sal.pspace = get_frame_program_space (frame);
4944
4945 /* Do not specify what the fp should be when we stop since
4946 on some machines the prologue is where the new fp value
4947 is established. */
4948 insert_step_resume_breakpoint_at_sal (gdbarch,
4949 sr_sal, null_frame_id);
4950
4951 /* Restart without fiddling with the step ranges or
4952 other state. */
4953 keep_going (ecs);
4954 return;
4955 }
4956 }
4957
4958 /* Check for subroutine calls. The check for the current frame
4959 equalling the step ID is not necessary - the check of the
4960 previous frame's ID is sufficient - but it is a common case and
4961 cheaper than checking the previous frame's ID.
4962
4963 NOTE: frame_id_eq will never report two invalid frame IDs as
4964 being equal, so to get into this block, both the current and
4965 previous frame must have valid frame IDs. */
4966 /* The outer_frame_id check is a heuristic to detect stepping
4967 through startup code. If we step over an instruction which
4968 sets the stack pointer from an invalid value to a valid value,
4969 we may detect that as a subroutine call from the mythical
4970 "outermost" function. This could be fixed by marking
4971 outermost frames as !stack_p,code_p,special_p. Then the
4972 initial outermost frame, before sp was valid, would
4973 have code_addr == &_start. See the comment in frame_id_eq
4974 for more. */
4975 if (!frame_id_eq (get_stack_frame_id (frame),
4976 ecs->event_thread->control.step_stack_frame_id)
4977 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4978 ecs->event_thread->control.step_stack_frame_id)
4979 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4980 outer_frame_id)
4981 || step_start_function != find_pc_function (stop_pc))))
4982 {
4983 CORE_ADDR real_stop_pc;
4984
4985 if (debug_infrun)
4986 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4987
4988 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4989 || ((ecs->event_thread->control.step_range_end == 1)
4990 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4991 ecs->stop_func_start)))
4992 {
4993 /* I presume that step_over_calls is only 0 when we're
4994 supposed to be stepping at the assembly language level
4995 ("stepi"). Just stop. */
4996 /* Also, maybe we just did a "nexti" inside a prolog, so we
4997 thought it was a subroutine call but it was not. Stop as
4998 well. FENN */
4999 /* And this works the same backward as frontward. MVS */
5000 end_stepping_range (ecs);
5001 return;
5002 }
5003
5004 /* Reverse stepping through solib trampolines. */
5005
5006 if (execution_direction == EXEC_REVERSE
5007 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5008 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5009 || (ecs->stop_func_start == 0
5010 && in_solib_dynsym_resolve_code (stop_pc))))
5011 {
5012 /* Any solib trampoline code can be handled in reverse
5013 by simply continuing to single-step. We have already
5014 executed the solib function (backwards), and a few
5015 steps will take us back through the trampoline to the
5016 caller. */
5017 keep_going (ecs);
5018 return;
5019 }
5020
5021 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5022 {
5023 /* We're doing a "next".
5024
5025 Normal (forward) execution: set a breakpoint at the
5026 callee's return address (the address at which the caller
5027 will resume).
5028
5029 Reverse (backward) execution. set the step-resume
5030 breakpoint at the start of the function that we just
5031 stepped into (backwards), and continue to there. When we
5032 get there, we'll need to single-step back to the caller. */
5033
5034 if (execution_direction == EXEC_REVERSE)
5035 {
5036 /* If we're already at the start of the function, we've either
5037 just stepped backward into a single instruction function,
5038 or stepped back out of a signal handler to the first instruction
5039 of the function. Just keep going, which will single-step back
5040 to the caller. */
5041 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5042 {
5043 struct symtab_and_line sr_sal;
5044
5045 /* Normal function call return (static or dynamic). */
5046 init_sal (&sr_sal);
5047 sr_sal.pc = ecs->stop_func_start;
5048 sr_sal.pspace = get_frame_program_space (frame);
5049 insert_step_resume_breakpoint_at_sal (gdbarch,
5050 sr_sal, null_frame_id);
5051 }
5052 }
5053 else
5054 insert_step_resume_breakpoint_at_caller (frame);
5055
5056 keep_going (ecs);
5057 return;
5058 }
5059
5060 /* If we are in a function call trampoline (a stub between the
5061 calling routine and the real function), locate the real
5062 function. That's what tells us (a) whether we want to step
5063 into it at all, and (b) what prologue we want to run to the
5064 end of, if we do step into it. */
5065 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5066 if (real_stop_pc == 0)
5067 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5068 if (real_stop_pc != 0)
5069 ecs->stop_func_start = real_stop_pc;
5070
5071 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5072 {
5073 struct symtab_and_line sr_sal;
5074
5075 init_sal (&sr_sal);
5076 sr_sal.pc = ecs->stop_func_start;
5077 sr_sal.pspace = get_frame_program_space (frame);
5078
5079 insert_step_resume_breakpoint_at_sal (gdbarch,
5080 sr_sal, null_frame_id);
5081 keep_going (ecs);
5082 return;
5083 }
5084
5085 /* If we have line number information for the function we are
5086 thinking of stepping into and the function isn't on the skip
5087 list, step into it.
5088
5089 If there are several symtabs at that PC (e.g. with include
5090 files), just want to know whether *any* of them have line
5091 numbers. find_pc_line handles this. */
5092 {
5093 struct symtab_and_line tmp_sal;
5094
5095 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5096 if (tmp_sal.line != 0
5097 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5098 &tmp_sal))
5099 {
5100 if (execution_direction == EXEC_REVERSE)
5101 handle_step_into_function_backward (gdbarch, ecs);
5102 else
5103 handle_step_into_function (gdbarch, ecs);
5104 return;
5105 }
5106 }
5107
5108 /* If we have no line number and the step-stop-if-no-debug is
5109 set, we stop the step so that the user has a chance to switch
5110 in assembly mode. */
5111 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5112 && step_stop_if_no_debug)
5113 {
5114 end_stepping_range (ecs);
5115 return;
5116 }
5117
5118 if (execution_direction == EXEC_REVERSE)
5119 {
5120 /* If we're already at the start of the function, we've either just
5121 stepped backward into a single instruction function without line
5122 number info, or stepped back out of a signal handler to the first
5123 instruction of the function without line number info. Just keep
5124 going, which will single-step back to the caller. */
5125 if (ecs->stop_func_start != stop_pc)
5126 {
5127 /* Set a breakpoint at callee's start address.
5128 From there we can step once and be back in the caller. */
5129 struct symtab_and_line sr_sal;
5130
5131 init_sal (&sr_sal);
5132 sr_sal.pc = ecs->stop_func_start;
5133 sr_sal.pspace = get_frame_program_space (frame);
5134 insert_step_resume_breakpoint_at_sal (gdbarch,
5135 sr_sal, null_frame_id);
5136 }
5137 }
5138 else
5139 /* Set a breakpoint at callee's return address (the address
5140 at which the caller will resume). */
5141 insert_step_resume_breakpoint_at_caller (frame);
5142
5143 keep_going (ecs);
5144 return;
5145 }
5146
5147 /* Reverse stepping through solib trampolines. */
5148
5149 if (execution_direction == EXEC_REVERSE
5150 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5151 {
5152 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5153 || (ecs->stop_func_start == 0
5154 && in_solib_dynsym_resolve_code (stop_pc)))
5155 {
5156 /* Any solib trampoline code can be handled in reverse
5157 by simply continuing to single-step. We have already
5158 executed the solib function (backwards), and a few
5159 steps will take us back through the trampoline to the
5160 caller. */
5161 keep_going (ecs);
5162 return;
5163 }
5164 else if (in_solib_dynsym_resolve_code (stop_pc))
5165 {
5166 /* Stepped backward into the solib dynsym resolver.
5167 Set a breakpoint at its start and continue, then
5168 one more step will take us out. */
5169 struct symtab_and_line sr_sal;
5170
5171 init_sal (&sr_sal);
5172 sr_sal.pc = ecs->stop_func_start;
5173 sr_sal.pspace = get_frame_program_space (frame);
5174 insert_step_resume_breakpoint_at_sal (gdbarch,
5175 sr_sal, null_frame_id);
5176 keep_going (ecs);
5177 return;
5178 }
5179 }
5180
5181 stop_pc_sal = find_pc_line (stop_pc, 0);
5182
5183 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5184 the trampoline processing logic, however, there are some trampolines
5185 that have no names, so we should do trampoline handling first. */
5186 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5187 && ecs->stop_func_name == NULL
5188 && stop_pc_sal.line == 0)
5189 {
5190 if (debug_infrun)
5191 fprintf_unfiltered (gdb_stdlog,
5192 "infrun: stepped into undebuggable function\n");
5193
5194 /* The inferior just stepped into, or returned to, an
5195 undebuggable function (where there is no debugging information
5196 and no line number corresponding to the address where the
5197 inferior stopped). Since we want to skip this kind of code,
5198 we keep going until the inferior returns from this
5199 function - unless the user has asked us not to (via
5200 set step-mode) or we no longer know how to get back
5201 to the call site. */
5202 if (step_stop_if_no_debug
5203 || !frame_id_p (frame_unwind_caller_id (frame)))
5204 {
5205 /* If we have no line number and the step-stop-if-no-debug
5206 is set, we stop the step so that the user has a chance to
5207 switch in assembly mode. */
5208 end_stepping_range (ecs);
5209 return;
5210 }
5211 else
5212 {
5213 /* Set a breakpoint at callee's return address (the address
5214 at which the caller will resume). */
5215 insert_step_resume_breakpoint_at_caller (frame);
5216 keep_going (ecs);
5217 return;
5218 }
5219 }
5220
5221 if (ecs->event_thread->control.step_range_end == 1)
5222 {
5223 /* It is stepi or nexti. We always want to stop stepping after
5224 one instruction. */
5225 if (debug_infrun)
5226 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5227 end_stepping_range (ecs);
5228 return;
5229 }
5230
5231 if (stop_pc_sal.line == 0)
5232 {
5233 /* We have no line number information. That means to stop
5234 stepping (does this always happen right after one instruction,
5235 when we do "s" in a function with no line numbers,
5236 or can this happen as a result of a return or longjmp?). */
5237 if (debug_infrun)
5238 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5239 end_stepping_range (ecs);
5240 return;
5241 }
5242
5243 /* Look for "calls" to inlined functions, part one. If the inline
5244 frame machinery detected some skipped call sites, we have entered
5245 a new inline function. */
5246
5247 if (frame_id_eq (get_frame_id (get_current_frame ()),
5248 ecs->event_thread->control.step_frame_id)
5249 && inline_skipped_frames (ecs->ptid))
5250 {
5251 struct symtab_and_line call_sal;
5252
5253 if (debug_infrun)
5254 fprintf_unfiltered (gdb_stdlog,
5255 "infrun: stepped into inlined function\n");
5256
5257 find_frame_sal (get_current_frame (), &call_sal);
5258
5259 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5260 {
5261 /* For "step", we're going to stop. But if the call site
5262 for this inlined function is on the same source line as
5263 we were previously stepping, go down into the function
5264 first. Otherwise stop at the call site. */
5265
5266 if (call_sal.line == ecs->event_thread->current_line
5267 && call_sal.symtab == ecs->event_thread->current_symtab)
5268 step_into_inline_frame (ecs->ptid);
5269
5270 end_stepping_range (ecs);
5271 return;
5272 }
5273 else
5274 {
5275 /* For "next", we should stop at the call site if it is on a
5276 different source line. Otherwise continue through the
5277 inlined function. */
5278 if (call_sal.line == ecs->event_thread->current_line
5279 && call_sal.symtab == ecs->event_thread->current_symtab)
5280 keep_going (ecs);
5281 else
5282 end_stepping_range (ecs);
5283 return;
5284 }
5285 }
5286
5287 /* Look for "calls" to inlined functions, part two. If we are still
5288 in the same real function we were stepping through, but we have
5289 to go further up to find the exact frame ID, we are stepping
5290 through a more inlined call beyond its call site. */
5291
5292 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5293 && !frame_id_eq (get_frame_id (get_current_frame ()),
5294 ecs->event_thread->control.step_frame_id)
5295 && stepped_in_from (get_current_frame (),
5296 ecs->event_thread->control.step_frame_id))
5297 {
5298 if (debug_infrun)
5299 fprintf_unfiltered (gdb_stdlog,
5300 "infrun: stepping through inlined function\n");
5301
5302 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5303 keep_going (ecs);
5304 else
5305 end_stepping_range (ecs);
5306 return;
5307 }
5308
5309 if ((stop_pc == stop_pc_sal.pc)
5310 && (ecs->event_thread->current_line != stop_pc_sal.line
5311 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5312 {
5313 /* We are at the start of a different line. So stop. Note that
5314 we don't stop if we step into the middle of a different line.
5315 That is said to make things like for (;;) statements work
5316 better. */
5317 if (debug_infrun)
5318 fprintf_unfiltered (gdb_stdlog,
5319 "infrun: stepped to a different line\n");
5320 end_stepping_range (ecs);
5321 return;
5322 }
5323
5324 /* We aren't done stepping.
5325
5326 Optimize by setting the stepping range to the line.
5327 (We might not be in the original line, but if we entered a
5328 new line in mid-statement, we continue stepping. This makes
5329 things like for(;;) statements work better.) */
5330
5331 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5332 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5333 ecs->event_thread->control.may_range_step = 1;
5334 set_step_info (frame, stop_pc_sal);
5335
5336 if (debug_infrun)
5337 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5338 keep_going (ecs);
5339 }
5340
5341 /* In all-stop mode, if we're currently stepping but have stopped in
5342 some other thread, we may need to switch back to the stepped
5343 thread. Returns true we set the inferior running, false if we left
5344 it stopped (and the event needs further processing). */
5345
5346 static int
5347 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5348 {
5349 if (!non_stop)
5350 {
5351 struct thread_info *tp;
5352 struct thread_info *stepping_thread;
5353 struct thread_info *step_over;
5354
5355 /* If any thread is blocked on some internal breakpoint, and we
5356 simply need to step over that breakpoint to get it going
5357 again, do that first. */
5358
5359 /* However, if we see an event for the stepping thread, then we
5360 know all other threads have been moved past their breakpoints
5361 already. Let the caller check whether the step is finished,
5362 etc., before deciding to move it past a breakpoint. */
5363 if (ecs->event_thread->control.step_range_end != 0)
5364 return 0;
5365
5366 /* Check if the current thread is blocked on an incomplete
5367 step-over, interrupted by a random signal. */
5368 if (ecs->event_thread->control.trap_expected
5369 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5370 {
5371 if (debug_infrun)
5372 {
5373 fprintf_unfiltered (gdb_stdlog,
5374 "infrun: need to finish step-over of [%s]\n",
5375 target_pid_to_str (ecs->event_thread->ptid));
5376 }
5377 keep_going (ecs);
5378 return 1;
5379 }
5380
5381 /* Check if the current thread is blocked by a single-step
5382 breakpoint of another thread. */
5383 if (ecs->hit_singlestep_breakpoint)
5384 {
5385 if (debug_infrun)
5386 {
5387 fprintf_unfiltered (gdb_stdlog,
5388 "infrun: need to step [%s] over single-step "
5389 "breakpoint\n",
5390 target_pid_to_str (ecs->ptid));
5391 }
5392 keep_going (ecs);
5393 return 1;
5394 }
5395
5396 /* Otherwise, we no longer expect a trap in the current thread.
5397 Clear the trap_expected flag before switching back -- this is
5398 what keep_going does as well, if we call it. */
5399 ecs->event_thread->control.trap_expected = 0;
5400
5401 /* Likewise, clear the signal if it should not be passed. */
5402 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5403 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5404
5405 /* If scheduler locking applies even if not stepping, there's no
5406 need to walk over threads. Above we've checked whether the
5407 current thread is stepping. If some other thread not the
5408 event thread is stepping, then it must be that scheduler
5409 locking is not in effect. */
5410 if (schedlock_applies (0))
5411 return 0;
5412
5413 /* Look for the stepping/nexting thread, and check if any other
5414 thread other than the stepping thread needs to start a
5415 step-over. Do all step-overs before actually proceeding with
5416 step/next/etc. */
5417 stepping_thread = NULL;
5418 step_over = NULL;
5419 ALL_NON_EXITED_THREADS (tp)
5420 {
5421 /* Ignore threads of processes we're not resuming. */
5422 if (!sched_multi
5423 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5424 continue;
5425
5426 /* When stepping over a breakpoint, we lock all threads
5427 except the one that needs to move past the breakpoint.
5428 If a non-event thread has this set, the "incomplete
5429 step-over" check above should have caught it earlier. */
5430 gdb_assert (!tp->control.trap_expected);
5431
5432 /* Did we find the stepping thread? */
5433 if (tp->control.step_range_end)
5434 {
5435 /* Yep. There should only one though. */
5436 gdb_assert (stepping_thread == NULL);
5437
5438 /* The event thread is handled at the top, before we
5439 enter this loop. */
5440 gdb_assert (tp != ecs->event_thread);
5441
5442 /* If some thread other than the event thread is
5443 stepping, then scheduler locking can't be in effect,
5444 otherwise we wouldn't have resumed the current event
5445 thread in the first place. */
5446 gdb_assert (!schedlock_applies (1));
5447
5448 stepping_thread = tp;
5449 }
5450 else if (thread_still_needs_step_over (tp))
5451 {
5452 step_over = tp;
5453
5454 /* At the top we've returned early if the event thread
5455 is stepping. If some other thread not the event
5456 thread is stepping, then scheduler locking can't be
5457 in effect, and we can resume this thread. No need to
5458 keep looking for the stepping thread then. */
5459 break;
5460 }
5461 }
5462
5463 if (step_over != NULL)
5464 {
5465 tp = step_over;
5466 if (debug_infrun)
5467 {
5468 fprintf_unfiltered (gdb_stdlog,
5469 "infrun: need to step-over [%s]\n",
5470 target_pid_to_str (tp->ptid));
5471 }
5472
5473 /* Only the stepping thread should have this set. */
5474 gdb_assert (tp->control.step_range_end == 0);
5475
5476 ecs->ptid = tp->ptid;
5477 ecs->event_thread = tp;
5478 switch_to_thread (ecs->ptid);
5479 keep_going (ecs);
5480 return 1;
5481 }
5482
5483 if (stepping_thread != NULL)
5484 {
5485 struct frame_info *frame;
5486 struct gdbarch *gdbarch;
5487
5488 tp = stepping_thread;
5489
5490 /* If the stepping thread exited, then don't try to switch
5491 back and resume it, which could fail in several different
5492 ways depending on the target. Instead, just keep going.
5493
5494 We can find a stepping dead thread in the thread list in
5495 two cases:
5496
5497 - The target supports thread exit events, and when the
5498 target tries to delete the thread from the thread list,
5499 inferior_ptid pointed at the exiting thread. In such
5500 case, calling delete_thread does not really remove the
5501 thread from the list; instead, the thread is left listed,
5502 with 'exited' state.
5503
5504 - The target's debug interface does not support thread
5505 exit events, and so we have no idea whatsoever if the
5506 previously stepping thread is still alive. For that
5507 reason, we need to synchronously query the target
5508 now. */
5509 if (is_exited (tp->ptid)
5510 || !target_thread_alive (tp->ptid))
5511 {
5512 if (debug_infrun)
5513 fprintf_unfiltered (gdb_stdlog,
5514 "infrun: not switching back to "
5515 "stepped thread, it has vanished\n");
5516
5517 delete_thread (tp->ptid);
5518 keep_going (ecs);
5519 return 1;
5520 }
5521
5522 if (debug_infrun)
5523 fprintf_unfiltered (gdb_stdlog,
5524 "infrun: switching back to stepped thread\n");
5525
5526 ecs->event_thread = tp;
5527 ecs->ptid = tp->ptid;
5528 context_switch (ecs->ptid);
5529
5530 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5531 frame = get_current_frame ();
5532 gdbarch = get_frame_arch (frame);
5533
5534 /* If the PC of the thread we were trying to single-step has
5535 changed, then that thread has trapped or been signaled,
5536 but the event has not been reported to GDB yet. Re-poll
5537 the target looking for this particular thread's event
5538 (i.e. temporarily enable schedlock) by:
5539
5540 - setting a break at the current PC
5541 - resuming that particular thread, only (by setting
5542 trap expected)
5543
5544 This prevents us continuously moving the single-step
5545 breakpoint forward, one instruction at a time,
5546 overstepping. */
5547
5548 if (gdbarch_software_single_step_p (gdbarch)
5549 && stop_pc != tp->prev_pc)
5550 {
5551 if (debug_infrun)
5552 fprintf_unfiltered (gdb_stdlog,
5553 "infrun: expected thread advanced also\n");
5554
5555 insert_single_step_breakpoint (get_frame_arch (frame),
5556 get_frame_address_space (frame),
5557 stop_pc);
5558 singlestep_breakpoints_inserted_p = 1;
5559 ecs->event_thread->control.trap_expected = 1;
5560 singlestep_ptid = inferior_ptid;
5561 singlestep_pc = stop_pc;
5562
5563 resume (0, GDB_SIGNAL_0);
5564 prepare_to_wait (ecs);
5565 }
5566 else
5567 {
5568 if (debug_infrun)
5569 fprintf_unfiltered (gdb_stdlog,
5570 "infrun: expected thread still "
5571 "hasn't advanced\n");
5572 keep_going (ecs);
5573 }
5574
5575 return 1;
5576 }
5577 }
5578 return 0;
5579 }
5580
5581 /* Is thread TP in the middle of single-stepping? */
5582
5583 static int
5584 currently_stepping (struct thread_info *tp)
5585 {
5586 return ((tp->control.step_range_end
5587 && tp->control.step_resume_breakpoint == NULL)
5588 || tp->control.trap_expected
5589 || bpstat_should_step ());
5590 }
5591
5592 /* Inferior has stepped into a subroutine call with source code that
5593 we should not step over. Do step to the first line of code in
5594 it. */
5595
5596 static void
5597 handle_step_into_function (struct gdbarch *gdbarch,
5598 struct execution_control_state *ecs)
5599 {
5600 struct symtab *s;
5601 struct symtab_and_line stop_func_sal, sr_sal;
5602
5603 fill_in_stop_func (gdbarch, ecs);
5604
5605 s = find_pc_symtab (stop_pc);
5606 if (s && s->language != language_asm)
5607 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5608 ecs->stop_func_start);
5609
5610 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5611 /* Use the step_resume_break to step until the end of the prologue,
5612 even if that involves jumps (as it seems to on the vax under
5613 4.2). */
5614 /* If the prologue ends in the middle of a source line, continue to
5615 the end of that source line (if it is still within the function).
5616 Otherwise, just go to end of prologue. */
5617 if (stop_func_sal.end
5618 && stop_func_sal.pc != ecs->stop_func_start
5619 && stop_func_sal.end < ecs->stop_func_end)
5620 ecs->stop_func_start = stop_func_sal.end;
5621
5622 /* Architectures which require breakpoint adjustment might not be able
5623 to place a breakpoint at the computed address. If so, the test
5624 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5625 ecs->stop_func_start to an address at which a breakpoint may be
5626 legitimately placed.
5627
5628 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5629 made, GDB will enter an infinite loop when stepping through
5630 optimized code consisting of VLIW instructions which contain
5631 subinstructions corresponding to different source lines. On
5632 FR-V, it's not permitted to place a breakpoint on any but the
5633 first subinstruction of a VLIW instruction. When a breakpoint is
5634 set, GDB will adjust the breakpoint address to the beginning of
5635 the VLIW instruction. Thus, we need to make the corresponding
5636 adjustment here when computing the stop address. */
5637
5638 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5639 {
5640 ecs->stop_func_start
5641 = gdbarch_adjust_breakpoint_address (gdbarch,
5642 ecs->stop_func_start);
5643 }
5644
5645 if (ecs->stop_func_start == stop_pc)
5646 {
5647 /* We are already there: stop now. */
5648 end_stepping_range (ecs);
5649 return;
5650 }
5651 else
5652 {
5653 /* Put the step-breakpoint there and go until there. */
5654 init_sal (&sr_sal); /* initialize to zeroes */
5655 sr_sal.pc = ecs->stop_func_start;
5656 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5657 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5658
5659 /* Do not specify what the fp should be when we stop since on
5660 some machines the prologue is where the new fp value is
5661 established. */
5662 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5663
5664 /* And make sure stepping stops right away then. */
5665 ecs->event_thread->control.step_range_end
5666 = ecs->event_thread->control.step_range_start;
5667 }
5668 keep_going (ecs);
5669 }
5670
5671 /* Inferior has stepped backward into a subroutine call with source
5672 code that we should not step over. Do step to the beginning of the
5673 last line of code in it. */
5674
5675 static void
5676 handle_step_into_function_backward (struct gdbarch *gdbarch,
5677 struct execution_control_state *ecs)
5678 {
5679 struct symtab *s;
5680 struct symtab_and_line stop_func_sal;
5681
5682 fill_in_stop_func (gdbarch, ecs);
5683
5684 s = find_pc_symtab (stop_pc);
5685 if (s && s->language != language_asm)
5686 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5687 ecs->stop_func_start);
5688
5689 stop_func_sal = find_pc_line (stop_pc, 0);
5690
5691 /* OK, we're just going to keep stepping here. */
5692 if (stop_func_sal.pc == stop_pc)
5693 {
5694 /* We're there already. Just stop stepping now. */
5695 end_stepping_range (ecs);
5696 }
5697 else
5698 {
5699 /* Else just reset the step range and keep going.
5700 No step-resume breakpoint, they don't work for
5701 epilogues, which can have multiple entry paths. */
5702 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5703 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5704 keep_going (ecs);
5705 }
5706 return;
5707 }
5708
5709 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5710 This is used to both functions and to skip over code. */
5711
5712 static void
5713 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5714 struct symtab_and_line sr_sal,
5715 struct frame_id sr_id,
5716 enum bptype sr_type)
5717 {
5718 /* There should never be more than one step-resume or longjmp-resume
5719 breakpoint per thread, so we should never be setting a new
5720 step_resume_breakpoint when one is already active. */
5721 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5722 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5723
5724 if (debug_infrun)
5725 fprintf_unfiltered (gdb_stdlog,
5726 "infrun: inserting step-resume breakpoint at %s\n",
5727 paddress (gdbarch, sr_sal.pc));
5728
5729 inferior_thread ()->control.step_resume_breakpoint
5730 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5731 }
5732
5733 void
5734 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5735 struct symtab_and_line sr_sal,
5736 struct frame_id sr_id)
5737 {
5738 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5739 sr_sal, sr_id,
5740 bp_step_resume);
5741 }
5742
5743 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5744 This is used to skip a potential signal handler.
5745
5746 This is called with the interrupted function's frame. The signal
5747 handler, when it returns, will resume the interrupted function at
5748 RETURN_FRAME.pc. */
5749
5750 static void
5751 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5752 {
5753 struct symtab_and_line sr_sal;
5754 struct gdbarch *gdbarch;
5755
5756 gdb_assert (return_frame != NULL);
5757 init_sal (&sr_sal); /* initialize to zeros */
5758
5759 gdbarch = get_frame_arch (return_frame);
5760 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5761 sr_sal.section = find_pc_overlay (sr_sal.pc);
5762 sr_sal.pspace = get_frame_program_space (return_frame);
5763
5764 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5765 get_stack_frame_id (return_frame),
5766 bp_hp_step_resume);
5767 }
5768
5769 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5770 is used to skip a function after stepping into it (for "next" or if
5771 the called function has no debugging information).
5772
5773 The current function has almost always been reached by single
5774 stepping a call or return instruction. NEXT_FRAME belongs to the
5775 current function, and the breakpoint will be set at the caller's
5776 resume address.
5777
5778 This is a separate function rather than reusing
5779 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5780 get_prev_frame, which may stop prematurely (see the implementation
5781 of frame_unwind_caller_id for an example). */
5782
5783 static void
5784 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5785 {
5786 struct symtab_and_line sr_sal;
5787 struct gdbarch *gdbarch;
5788
5789 /* We shouldn't have gotten here if we don't know where the call site
5790 is. */
5791 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5792
5793 init_sal (&sr_sal); /* initialize to zeros */
5794
5795 gdbarch = frame_unwind_caller_arch (next_frame);
5796 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5797 frame_unwind_caller_pc (next_frame));
5798 sr_sal.section = find_pc_overlay (sr_sal.pc);
5799 sr_sal.pspace = frame_unwind_program_space (next_frame);
5800
5801 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5802 frame_unwind_caller_id (next_frame));
5803 }
5804
5805 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5806 new breakpoint at the target of a jmp_buf. The handling of
5807 longjmp-resume uses the same mechanisms used for handling
5808 "step-resume" breakpoints. */
5809
5810 static void
5811 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5812 {
5813 /* There should never be more than one longjmp-resume breakpoint per
5814 thread, so we should never be setting a new
5815 longjmp_resume_breakpoint when one is already active. */
5816 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5817
5818 if (debug_infrun)
5819 fprintf_unfiltered (gdb_stdlog,
5820 "infrun: inserting longjmp-resume breakpoint at %s\n",
5821 paddress (gdbarch, pc));
5822
5823 inferior_thread ()->control.exception_resume_breakpoint =
5824 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5825 }
5826
5827 /* Insert an exception resume breakpoint. TP is the thread throwing
5828 the exception. The block B is the block of the unwinder debug hook
5829 function. FRAME is the frame corresponding to the call to this
5830 function. SYM is the symbol of the function argument holding the
5831 target PC of the exception. */
5832
5833 static void
5834 insert_exception_resume_breakpoint (struct thread_info *tp,
5835 const struct block *b,
5836 struct frame_info *frame,
5837 struct symbol *sym)
5838 {
5839 volatile struct gdb_exception e;
5840
5841 /* We want to ignore errors here. */
5842 TRY_CATCH (e, RETURN_MASK_ERROR)
5843 {
5844 struct symbol *vsym;
5845 struct value *value;
5846 CORE_ADDR handler;
5847 struct breakpoint *bp;
5848
5849 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5850 value = read_var_value (vsym, frame);
5851 /* If the value was optimized out, revert to the old behavior. */
5852 if (! value_optimized_out (value))
5853 {
5854 handler = value_as_address (value);
5855
5856 if (debug_infrun)
5857 fprintf_unfiltered (gdb_stdlog,
5858 "infrun: exception resume at %lx\n",
5859 (unsigned long) handler);
5860
5861 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5862 handler, bp_exception_resume);
5863
5864 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5865 frame = NULL;
5866
5867 bp->thread = tp->num;
5868 inferior_thread ()->control.exception_resume_breakpoint = bp;
5869 }
5870 }
5871 }
5872
5873 /* A helper for check_exception_resume that sets an
5874 exception-breakpoint based on a SystemTap probe. */
5875
5876 static void
5877 insert_exception_resume_from_probe (struct thread_info *tp,
5878 const struct bound_probe *probe,
5879 struct frame_info *frame)
5880 {
5881 struct value *arg_value;
5882 CORE_ADDR handler;
5883 struct breakpoint *bp;
5884
5885 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5886 if (!arg_value)
5887 return;
5888
5889 handler = value_as_address (arg_value);
5890
5891 if (debug_infrun)
5892 fprintf_unfiltered (gdb_stdlog,
5893 "infrun: exception resume at %s\n",
5894 paddress (get_objfile_arch (probe->objfile),
5895 handler));
5896
5897 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5898 handler, bp_exception_resume);
5899 bp->thread = tp->num;
5900 inferior_thread ()->control.exception_resume_breakpoint = bp;
5901 }
5902
5903 /* This is called when an exception has been intercepted. Check to
5904 see whether the exception's destination is of interest, and if so,
5905 set an exception resume breakpoint there. */
5906
5907 static void
5908 check_exception_resume (struct execution_control_state *ecs,
5909 struct frame_info *frame)
5910 {
5911 volatile struct gdb_exception e;
5912 struct bound_probe probe;
5913 struct symbol *func;
5914
5915 /* First see if this exception unwinding breakpoint was set via a
5916 SystemTap probe point. If so, the probe has two arguments: the
5917 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5918 set a breakpoint there. */
5919 probe = find_probe_by_pc (get_frame_pc (frame));
5920 if (probe.probe)
5921 {
5922 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5923 return;
5924 }
5925
5926 func = get_frame_function (frame);
5927 if (!func)
5928 return;
5929
5930 TRY_CATCH (e, RETURN_MASK_ERROR)
5931 {
5932 const struct block *b;
5933 struct block_iterator iter;
5934 struct symbol *sym;
5935 int argno = 0;
5936
5937 /* The exception breakpoint is a thread-specific breakpoint on
5938 the unwinder's debug hook, declared as:
5939
5940 void _Unwind_DebugHook (void *cfa, void *handler);
5941
5942 The CFA argument indicates the frame to which control is
5943 about to be transferred. HANDLER is the destination PC.
5944
5945 We ignore the CFA and set a temporary breakpoint at HANDLER.
5946 This is not extremely efficient but it avoids issues in gdb
5947 with computing the DWARF CFA, and it also works even in weird
5948 cases such as throwing an exception from inside a signal
5949 handler. */
5950
5951 b = SYMBOL_BLOCK_VALUE (func);
5952 ALL_BLOCK_SYMBOLS (b, iter, sym)
5953 {
5954 if (!SYMBOL_IS_ARGUMENT (sym))
5955 continue;
5956
5957 if (argno == 0)
5958 ++argno;
5959 else
5960 {
5961 insert_exception_resume_breakpoint (ecs->event_thread,
5962 b, frame, sym);
5963 break;
5964 }
5965 }
5966 }
5967 }
5968
5969 static void
5970 stop_waiting (struct execution_control_state *ecs)
5971 {
5972 if (debug_infrun)
5973 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5974
5975 clear_step_over_info ();
5976
5977 /* Let callers know we don't want to wait for the inferior anymore. */
5978 ecs->wait_some_more = 0;
5979 }
5980
5981 /* Called when we should continue running the inferior, because the
5982 current event doesn't cause a user visible stop. This does the
5983 resuming part; waiting for the next event is done elsewhere. */
5984
5985 static void
5986 keep_going (struct execution_control_state *ecs)
5987 {
5988 /* Make sure normal_stop is called if we get a QUIT handled before
5989 reaching resume. */
5990 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5991
5992 /* Save the pc before execution, to compare with pc after stop. */
5993 ecs->event_thread->prev_pc
5994 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5995
5996 if (ecs->event_thread->control.trap_expected
5997 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5998 {
5999 /* We haven't yet gotten our trap, and either: intercepted a
6000 non-signal event (e.g., a fork); or took a signal which we
6001 are supposed to pass through to the inferior. Simply
6002 continue. */
6003 discard_cleanups (old_cleanups);
6004 resume (currently_stepping (ecs->event_thread),
6005 ecs->event_thread->suspend.stop_signal);
6006 }
6007 else
6008 {
6009 volatile struct gdb_exception e;
6010 struct regcache *regcache = get_current_regcache ();
6011
6012 /* Either the trap was not expected, but we are continuing
6013 anyway (if we got a signal, the user asked it be passed to
6014 the child)
6015 -- or --
6016 We got our expected trap, but decided we should resume from
6017 it.
6018
6019 We're going to run this baby now!
6020
6021 Note that insert_breakpoints won't try to re-insert
6022 already inserted breakpoints. Therefore, we don't
6023 care if breakpoints were already inserted, or not. */
6024
6025 /* If we need to step over a breakpoint, and we're not using
6026 displaced stepping to do so, insert all breakpoints
6027 (watchpoints, etc.) but the one we're stepping over, step one
6028 instruction, and then re-insert the breakpoint when that step
6029 is finished. */
6030 if ((ecs->hit_singlestep_breakpoint
6031 || thread_still_needs_step_over (ecs->event_thread))
6032 && !use_displaced_stepping (get_regcache_arch (regcache)))
6033 {
6034 set_step_over_info (get_regcache_aspace (regcache),
6035 regcache_read_pc (regcache));
6036 }
6037 else
6038 clear_step_over_info ();
6039
6040 /* Stop stepping if inserting breakpoints fails. */
6041 TRY_CATCH (e, RETURN_MASK_ERROR)
6042 {
6043 insert_breakpoints ();
6044 }
6045 if (e.reason < 0)
6046 {
6047 exception_print (gdb_stderr, e);
6048 stop_waiting (ecs);
6049 return;
6050 }
6051
6052 ecs->event_thread->control.trap_expected
6053 = (ecs->event_thread->stepping_over_breakpoint
6054 || ecs->hit_singlestep_breakpoint);
6055
6056 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6057 explicitly specifies that such a signal should be delivered
6058 to the target program). Typically, that would occur when a
6059 user is debugging a target monitor on a simulator: the target
6060 monitor sets a breakpoint; the simulator encounters this
6061 breakpoint and halts the simulation handing control to GDB;
6062 GDB, noting that the stop address doesn't map to any known
6063 breakpoint, returns control back to the simulator; the
6064 simulator then delivers the hardware equivalent of a
6065 GDB_SIGNAL_TRAP to the program being debugged. */
6066 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6067 && !signal_program[ecs->event_thread->suspend.stop_signal])
6068 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6069
6070 discard_cleanups (old_cleanups);
6071 resume (currently_stepping (ecs->event_thread),
6072 ecs->event_thread->suspend.stop_signal);
6073 }
6074
6075 prepare_to_wait (ecs);
6076 }
6077
6078 /* This function normally comes after a resume, before
6079 handle_inferior_event exits. It takes care of any last bits of
6080 housekeeping, and sets the all-important wait_some_more flag. */
6081
6082 static void
6083 prepare_to_wait (struct execution_control_state *ecs)
6084 {
6085 if (debug_infrun)
6086 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6087
6088 /* This is the old end of the while loop. Let everybody know we
6089 want to wait for the inferior some more and get called again
6090 soon. */
6091 ecs->wait_some_more = 1;
6092 }
6093
6094 /* We are done with the step range of a step/next/si/ni command.
6095 Called once for each n of a "step n" operation. Notify observers
6096 if not in the middle of doing a "step N" operation for N > 1. */
6097
6098 static void
6099 end_stepping_range (struct execution_control_state *ecs)
6100 {
6101 ecs->event_thread->control.stop_step = 1;
6102 if (!ecs->event_thread->step_multi)
6103 observer_notify_end_stepping_range ();
6104 stop_waiting (ecs);
6105 }
6106
6107 /* Several print_*_reason functions to print why the inferior has stopped.
6108 We always print something when the inferior exits, or receives a signal.
6109 The rest of the cases are dealt with later on in normal_stop and
6110 print_it_typical. Ideally there should be a call to one of these
6111 print_*_reason functions functions from handle_inferior_event each time
6112 stop_waiting is called.
6113
6114 Note that we don't call these directly, instead we delegate that to
6115 the interpreters, through observers. Interpreters then call these
6116 with whatever uiout is right. */
6117
6118 void
6119 print_end_stepping_range_reason (struct ui_out *uiout)
6120 {
6121 /* For CLI-like interpreters, print nothing. */
6122
6123 if (ui_out_is_mi_like_p (uiout))
6124 {
6125 ui_out_field_string (uiout, "reason",
6126 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6127 }
6128 }
6129
6130 void
6131 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6132 {
6133 annotate_signalled ();
6134 if (ui_out_is_mi_like_p (uiout))
6135 ui_out_field_string
6136 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6137 ui_out_text (uiout, "\nProgram terminated with signal ");
6138 annotate_signal_name ();
6139 ui_out_field_string (uiout, "signal-name",
6140 gdb_signal_to_name (siggnal));
6141 annotate_signal_name_end ();
6142 ui_out_text (uiout, ", ");
6143 annotate_signal_string ();
6144 ui_out_field_string (uiout, "signal-meaning",
6145 gdb_signal_to_string (siggnal));
6146 annotate_signal_string_end ();
6147 ui_out_text (uiout, ".\n");
6148 ui_out_text (uiout, "The program no longer exists.\n");
6149 }
6150
6151 void
6152 print_exited_reason (struct ui_out *uiout, int exitstatus)
6153 {
6154 struct inferior *inf = current_inferior ();
6155 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6156
6157 annotate_exited (exitstatus);
6158 if (exitstatus)
6159 {
6160 if (ui_out_is_mi_like_p (uiout))
6161 ui_out_field_string (uiout, "reason",
6162 async_reason_lookup (EXEC_ASYNC_EXITED));
6163 ui_out_text (uiout, "[Inferior ");
6164 ui_out_text (uiout, plongest (inf->num));
6165 ui_out_text (uiout, " (");
6166 ui_out_text (uiout, pidstr);
6167 ui_out_text (uiout, ") exited with code ");
6168 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6169 ui_out_text (uiout, "]\n");
6170 }
6171 else
6172 {
6173 if (ui_out_is_mi_like_p (uiout))
6174 ui_out_field_string
6175 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6176 ui_out_text (uiout, "[Inferior ");
6177 ui_out_text (uiout, plongest (inf->num));
6178 ui_out_text (uiout, " (");
6179 ui_out_text (uiout, pidstr);
6180 ui_out_text (uiout, ") exited normally]\n");
6181 }
6182 }
6183
6184 void
6185 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6186 {
6187 annotate_signal ();
6188
6189 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6190 {
6191 struct thread_info *t = inferior_thread ();
6192
6193 ui_out_text (uiout, "\n[");
6194 ui_out_field_string (uiout, "thread-name",
6195 target_pid_to_str (t->ptid));
6196 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6197 ui_out_text (uiout, " stopped");
6198 }
6199 else
6200 {
6201 ui_out_text (uiout, "\nProgram received signal ");
6202 annotate_signal_name ();
6203 if (ui_out_is_mi_like_p (uiout))
6204 ui_out_field_string
6205 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6206 ui_out_field_string (uiout, "signal-name",
6207 gdb_signal_to_name (siggnal));
6208 annotate_signal_name_end ();
6209 ui_out_text (uiout, ", ");
6210 annotate_signal_string ();
6211 ui_out_field_string (uiout, "signal-meaning",
6212 gdb_signal_to_string (siggnal));
6213 annotate_signal_string_end ();
6214 }
6215 ui_out_text (uiout, ".\n");
6216 }
6217
6218 void
6219 print_no_history_reason (struct ui_out *uiout)
6220 {
6221 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6222 }
6223
6224 /* Print current location without a level number, if we have changed
6225 functions or hit a breakpoint. Print source line if we have one.
6226 bpstat_print contains the logic deciding in detail what to print,
6227 based on the event(s) that just occurred. */
6228
6229 void
6230 print_stop_event (struct target_waitstatus *ws)
6231 {
6232 int bpstat_ret;
6233 int source_flag;
6234 int do_frame_printing = 1;
6235 struct thread_info *tp = inferior_thread ();
6236
6237 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6238 switch (bpstat_ret)
6239 {
6240 case PRINT_UNKNOWN:
6241 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6242 should) carry around the function and does (or should) use
6243 that when doing a frame comparison. */
6244 if (tp->control.stop_step
6245 && frame_id_eq (tp->control.step_frame_id,
6246 get_frame_id (get_current_frame ()))
6247 && step_start_function == find_pc_function (stop_pc))
6248 {
6249 /* Finished step, just print source line. */
6250 source_flag = SRC_LINE;
6251 }
6252 else
6253 {
6254 /* Print location and source line. */
6255 source_flag = SRC_AND_LOC;
6256 }
6257 break;
6258 case PRINT_SRC_AND_LOC:
6259 /* Print location and source line. */
6260 source_flag = SRC_AND_LOC;
6261 break;
6262 case PRINT_SRC_ONLY:
6263 source_flag = SRC_LINE;
6264 break;
6265 case PRINT_NOTHING:
6266 /* Something bogus. */
6267 source_flag = SRC_LINE;
6268 do_frame_printing = 0;
6269 break;
6270 default:
6271 internal_error (__FILE__, __LINE__, _("Unknown value."));
6272 }
6273
6274 /* The behavior of this routine with respect to the source
6275 flag is:
6276 SRC_LINE: Print only source line
6277 LOCATION: Print only location
6278 SRC_AND_LOC: Print location and source line. */
6279 if (do_frame_printing)
6280 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6281
6282 /* Display the auto-display expressions. */
6283 do_displays ();
6284 }
6285
6286 /* Here to return control to GDB when the inferior stops for real.
6287 Print appropriate messages, remove breakpoints, give terminal our modes.
6288
6289 STOP_PRINT_FRAME nonzero means print the executing frame
6290 (pc, function, args, file, line number and line text).
6291 BREAKPOINTS_FAILED nonzero means stop was due to error
6292 attempting to insert breakpoints. */
6293
6294 void
6295 normal_stop (void)
6296 {
6297 struct target_waitstatus last;
6298 ptid_t last_ptid;
6299 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6300
6301 get_last_target_status (&last_ptid, &last);
6302
6303 /* If an exception is thrown from this point on, make sure to
6304 propagate GDB's knowledge of the executing state to the
6305 frontend/user running state. A QUIT is an easy exception to see
6306 here, so do this before any filtered output. */
6307 if (!non_stop)
6308 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6309 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6310 && last.kind != TARGET_WAITKIND_EXITED
6311 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6312 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6313
6314 /* As with the notification of thread events, we want to delay
6315 notifying the user that we've switched thread context until
6316 the inferior actually stops.
6317
6318 There's no point in saying anything if the inferior has exited.
6319 Note that SIGNALLED here means "exited with a signal", not
6320 "received a signal".
6321
6322 Also skip saying anything in non-stop mode. In that mode, as we
6323 don't want GDB to switch threads behind the user's back, to avoid
6324 races where the user is typing a command to apply to thread x,
6325 but GDB switches to thread y before the user finishes entering
6326 the command, fetch_inferior_event installs a cleanup to restore
6327 the current thread back to the thread the user had selected right
6328 after this event is handled, so we're not really switching, only
6329 informing of a stop. */
6330 if (!non_stop
6331 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6332 && target_has_execution
6333 && last.kind != TARGET_WAITKIND_SIGNALLED
6334 && last.kind != TARGET_WAITKIND_EXITED
6335 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6336 {
6337 target_terminal_ours_for_output ();
6338 printf_filtered (_("[Switching to %s]\n"),
6339 target_pid_to_str (inferior_ptid));
6340 annotate_thread_changed ();
6341 previous_inferior_ptid = inferior_ptid;
6342 }
6343
6344 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6345 {
6346 gdb_assert (sync_execution || !target_can_async_p ());
6347
6348 target_terminal_ours_for_output ();
6349 printf_filtered (_("No unwaited-for children left.\n"));
6350 }
6351
6352 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6353 {
6354 if (remove_breakpoints ())
6355 {
6356 target_terminal_ours_for_output ();
6357 printf_filtered (_("Cannot remove breakpoints because "
6358 "program is no longer writable.\nFurther "
6359 "execution is probably impossible.\n"));
6360 }
6361 }
6362
6363 /* If an auto-display called a function and that got a signal,
6364 delete that auto-display to avoid an infinite recursion. */
6365
6366 if (stopped_by_random_signal)
6367 disable_current_display ();
6368
6369 /* Don't print a message if in the middle of doing a "step n"
6370 operation for n > 1 */
6371 if (target_has_execution
6372 && last.kind != TARGET_WAITKIND_SIGNALLED
6373 && last.kind != TARGET_WAITKIND_EXITED
6374 && inferior_thread ()->step_multi
6375 && inferior_thread ()->control.stop_step)
6376 goto done;
6377
6378 target_terminal_ours ();
6379 async_enable_stdin ();
6380
6381 /* Set the current source location. This will also happen if we
6382 display the frame below, but the current SAL will be incorrect
6383 during a user hook-stop function. */
6384 if (has_stack_frames () && !stop_stack_dummy)
6385 set_current_sal_from_frame (get_current_frame ());
6386
6387 /* Let the user/frontend see the threads as stopped, but do nothing
6388 if the thread was running an infcall. We may be e.g., evaluating
6389 a breakpoint condition. In that case, the thread had state
6390 THREAD_RUNNING before the infcall, and shall remain set to
6391 running, all without informing the user/frontend about state
6392 transition changes. If this is actually a call command, then the
6393 thread was originally already stopped, so there's no state to
6394 finish either. */
6395 if (target_has_execution && inferior_thread ()->control.in_infcall)
6396 discard_cleanups (old_chain);
6397 else
6398 do_cleanups (old_chain);
6399
6400 /* Look up the hook_stop and run it (CLI internally handles problem
6401 of stop_command's pre-hook not existing). */
6402 if (stop_command)
6403 catch_errors (hook_stop_stub, stop_command,
6404 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6405
6406 if (!has_stack_frames ())
6407 goto done;
6408
6409 if (last.kind == TARGET_WAITKIND_SIGNALLED
6410 || last.kind == TARGET_WAITKIND_EXITED)
6411 goto done;
6412
6413 /* Select innermost stack frame - i.e., current frame is frame 0,
6414 and current location is based on that.
6415 Don't do this on return from a stack dummy routine,
6416 or if the program has exited. */
6417
6418 if (!stop_stack_dummy)
6419 {
6420 select_frame (get_current_frame ());
6421
6422 /* If --batch-silent is enabled then there's no need to print the current
6423 source location, and to try risks causing an error message about
6424 missing source files. */
6425 if (stop_print_frame && !batch_silent)
6426 print_stop_event (&last);
6427 }
6428
6429 /* Save the function value return registers, if we care.
6430 We might be about to restore their previous contents. */
6431 if (inferior_thread ()->control.proceed_to_finish
6432 && execution_direction != EXEC_REVERSE)
6433 {
6434 /* This should not be necessary. */
6435 if (stop_registers)
6436 regcache_xfree (stop_registers);
6437
6438 /* NB: The copy goes through to the target picking up the value of
6439 all the registers. */
6440 stop_registers = regcache_dup (get_current_regcache ());
6441 }
6442
6443 if (stop_stack_dummy == STOP_STACK_DUMMY)
6444 {
6445 /* Pop the empty frame that contains the stack dummy.
6446 This also restores inferior state prior to the call
6447 (struct infcall_suspend_state). */
6448 struct frame_info *frame = get_current_frame ();
6449
6450 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6451 frame_pop (frame);
6452 /* frame_pop() calls reinit_frame_cache as the last thing it
6453 does which means there's currently no selected frame. We
6454 don't need to re-establish a selected frame if the dummy call
6455 returns normally, that will be done by
6456 restore_infcall_control_state. However, we do have to handle
6457 the case where the dummy call is returning after being
6458 stopped (e.g. the dummy call previously hit a breakpoint).
6459 We can't know which case we have so just always re-establish
6460 a selected frame here. */
6461 select_frame (get_current_frame ());
6462 }
6463
6464 done:
6465 annotate_stopped ();
6466
6467 /* Suppress the stop observer if we're in the middle of:
6468
6469 - a step n (n > 1), as there still more steps to be done.
6470
6471 - a "finish" command, as the observer will be called in
6472 finish_command_continuation, so it can include the inferior
6473 function's return value.
6474
6475 - calling an inferior function, as we pretend we inferior didn't
6476 run at all. The return value of the call is handled by the
6477 expression evaluator, through call_function_by_hand. */
6478
6479 if (!target_has_execution
6480 || last.kind == TARGET_WAITKIND_SIGNALLED
6481 || last.kind == TARGET_WAITKIND_EXITED
6482 || last.kind == TARGET_WAITKIND_NO_RESUMED
6483 || (!(inferior_thread ()->step_multi
6484 && inferior_thread ()->control.stop_step)
6485 && !(inferior_thread ()->control.stop_bpstat
6486 && inferior_thread ()->control.proceed_to_finish)
6487 && !inferior_thread ()->control.in_infcall))
6488 {
6489 if (!ptid_equal (inferior_ptid, null_ptid))
6490 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6491 stop_print_frame);
6492 else
6493 observer_notify_normal_stop (NULL, stop_print_frame);
6494 }
6495
6496 if (target_has_execution)
6497 {
6498 if (last.kind != TARGET_WAITKIND_SIGNALLED
6499 && last.kind != TARGET_WAITKIND_EXITED)
6500 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6501 Delete any breakpoint that is to be deleted at the next stop. */
6502 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6503 }
6504
6505 /* Try to get rid of automatically added inferiors that are no
6506 longer needed. Keeping those around slows down things linearly.
6507 Note that this never removes the current inferior. */
6508 prune_inferiors ();
6509 }
6510
6511 static int
6512 hook_stop_stub (void *cmd)
6513 {
6514 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6515 return (0);
6516 }
6517 \f
6518 int
6519 signal_stop_state (int signo)
6520 {
6521 return signal_stop[signo];
6522 }
6523
6524 int
6525 signal_print_state (int signo)
6526 {
6527 return signal_print[signo];
6528 }
6529
6530 int
6531 signal_pass_state (int signo)
6532 {
6533 return signal_program[signo];
6534 }
6535
6536 static void
6537 signal_cache_update (int signo)
6538 {
6539 if (signo == -1)
6540 {
6541 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6542 signal_cache_update (signo);
6543
6544 return;
6545 }
6546
6547 signal_pass[signo] = (signal_stop[signo] == 0
6548 && signal_print[signo] == 0
6549 && signal_program[signo] == 1
6550 && signal_catch[signo] == 0);
6551 }
6552
6553 int
6554 signal_stop_update (int signo, int state)
6555 {
6556 int ret = signal_stop[signo];
6557
6558 signal_stop[signo] = state;
6559 signal_cache_update (signo);
6560 return ret;
6561 }
6562
6563 int
6564 signal_print_update (int signo, int state)
6565 {
6566 int ret = signal_print[signo];
6567
6568 signal_print[signo] = state;
6569 signal_cache_update (signo);
6570 return ret;
6571 }
6572
6573 int
6574 signal_pass_update (int signo, int state)
6575 {
6576 int ret = signal_program[signo];
6577
6578 signal_program[signo] = state;
6579 signal_cache_update (signo);
6580 return ret;
6581 }
6582
6583 /* Update the global 'signal_catch' from INFO and notify the
6584 target. */
6585
6586 void
6587 signal_catch_update (const unsigned int *info)
6588 {
6589 int i;
6590
6591 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6592 signal_catch[i] = info[i] > 0;
6593 signal_cache_update (-1);
6594 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6595 }
6596
6597 static void
6598 sig_print_header (void)
6599 {
6600 printf_filtered (_("Signal Stop\tPrint\tPass "
6601 "to program\tDescription\n"));
6602 }
6603
6604 static void
6605 sig_print_info (enum gdb_signal oursig)
6606 {
6607 const char *name = gdb_signal_to_name (oursig);
6608 int name_padding = 13 - strlen (name);
6609
6610 if (name_padding <= 0)
6611 name_padding = 0;
6612
6613 printf_filtered ("%s", name);
6614 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6615 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6616 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6617 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6618 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6619 }
6620
6621 /* Specify how various signals in the inferior should be handled. */
6622
6623 static void
6624 handle_command (char *args, int from_tty)
6625 {
6626 char **argv;
6627 int digits, wordlen;
6628 int sigfirst, signum, siglast;
6629 enum gdb_signal oursig;
6630 int allsigs;
6631 int nsigs;
6632 unsigned char *sigs;
6633 struct cleanup *old_chain;
6634
6635 if (args == NULL)
6636 {
6637 error_no_arg (_("signal to handle"));
6638 }
6639
6640 /* Allocate and zero an array of flags for which signals to handle. */
6641
6642 nsigs = (int) GDB_SIGNAL_LAST;
6643 sigs = (unsigned char *) alloca (nsigs);
6644 memset (sigs, 0, nsigs);
6645
6646 /* Break the command line up into args. */
6647
6648 argv = gdb_buildargv (args);
6649 old_chain = make_cleanup_freeargv (argv);
6650
6651 /* Walk through the args, looking for signal oursigs, signal names, and
6652 actions. Signal numbers and signal names may be interspersed with
6653 actions, with the actions being performed for all signals cumulatively
6654 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6655
6656 while (*argv != NULL)
6657 {
6658 wordlen = strlen (*argv);
6659 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6660 {;
6661 }
6662 allsigs = 0;
6663 sigfirst = siglast = -1;
6664
6665 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6666 {
6667 /* Apply action to all signals except those used by the
6668 debugger. Silently skip those. */
6669 allsigs = 1;
6670 sigfirst = 0;
6671 siglast = nsigs - 1;
6672 }
6673 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6674 {
6675 SET_SIGS (nsigs, sigs, signal_stop);
6676 SET_SIGS (nsigs, sigs, signal_print);
6677 }
6678 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6679 {
6680 UNSET_SIGS (nsigs, sigs, signal_program);
6681 }
6682 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6683 {
6684 SET_SIGS (nsigs, sigs, signal_print);
6685 }
6686 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6687 {
6688 SET_SIGS (nsigs, sigs, signal_program);
6689 }
6690 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6691 {
6692 UNSET_SIGS (nsigs, sigs, signal_stop);
6693 }
6694 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6695 {
6696 SET_SIGS (nsigs, sigs, signal_program);
6697 }
6698 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6699 {
6700 UNSET_SIGS (nsigs, sigs, signal_print);
6701 UNSET_SIGS (nsigs, sigs, signal_stop);
6702 }
6703 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6704 {
6705 UNSET_SIGS (nsigs, sigs, signal_program);
6706 }
6707 else if (digits > 0)
6708 {
6709 /* It is numeric. The numeric signal refers to our own
6710 internal signal numbering from target.h, not to host/target
6711 signal number. This is a feature; users really should be
6712 using symbolic names anyway, and the common ones like
6713 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6714
6715 sigfirst = siglast = (int)
6716 gdb_signal_from_command (atoi (*argv));
6717 if ((*argv)[digits] == '-')
6718 {
6719 siglast = (int)
6720 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6721 }
6722 if (sigfirst > siglast)
6723 {
6724 /* Bet he didn't figure we'd think of this case... */
6725 signum = sigfirst;
6726 sigfirst = siglast;
6727 siglast = signum;
6728 }
6729 }
6730 else
6731 {
6732 oursig = gdb_signal_from_name (*argv);
6733 if (oursig != GDB_SIGNAL_UNKNOWN)
6734 {
6735 sigfirst = siglast = (int) oursig;
6736 }
6737 else
6738 {
6739 /* Not a number and not a recognized flag word => complain. */
6740 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6741 }
6742 }
6743
6744 /* If any signal numbers or symbol names were found, set flags for
6745 which signals to apply actions to. */
6746
6747 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6748 {
6749 switch ((enum gdb_signal) signum)
6750 {
6751 case GDB_SIGNAL_TRAP:
6752 case GDB_SIGNAL_INT:
6753 if (!allsigs && !sigs[signum])
6754 {
6755 if (query (_("%s is used by the debugger.\n\
6756 Are you sure you want to change it? "),
6757 gdb_signal_to_name ((enum gdb_signal) signum)))
6758 {
6759 sigs[signum] = 1;
6760 }
6761 else
6762 {
6763 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6764 gdb_flush (gdb_stdout);
6765 }
6766 }
6767 break;
6768 case GDB_SIGNAL_0:
6769 case GDB_SIGNAL_DEFAULT:
6770 case GDB_SIGNAL_UNKNOWN:
6771 /* Make sure that "all" doesn't print these. */
6772 break;
6773 default:
6774 sigs[signum] = 1;
6775 break;
6776 }
6777 }
6778
6779 argv++;
6780 }
6781
6782 for (signum = 0; signum < nsigs; signum++)
6783 if (sigs[signum])
6784 {
6785 signal_cache_update (-1);
6786 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6787 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6788
6789 if (from_tty)
6790 {
6791 /* Show the results. */
6792 sig_print_header ();
6793 for (; signum < nsigs; signum++)
6794 if (sigs[signum])
6795 sig_print_info (signum);
6796 }
6797
6798 break;
6799 }
6800
6801 do_cleanups (old_chain);
6802 }
6803
6804 /* Complete the "handle" command. */
6805
6806 static VEC (char_ptr) *
6807 handle_completer (struct cmd_list_element *ignore,
6808 const char *text, const char *word)
6809 {
6810 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6811 static const char * const keywords[] =
6812 {
6813 "all",
6814 "stop",
6815 "ignore",
6816 "print",
6817 "pass",
6818 "nostop",
6819 "noignore",
6820 "noprint",
6821 "nopass",
6822 NULL,
6823 };
6824
6825 vec_signals = signal_completer (ignore, text, word);
6826 vec_keywords = complete_on_enum (keywords, word, word);
6827
6828 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6829 VEC_free (char_ptr, vec_signals);
6830 VEC_free (char_ptr, vec_keywords);
6831 return return_val;
6832 }
6833
6834 static void
6835 xdb_handle_command (char *args, int from_tty)
6836 {
6837 char **argv;
6838 struct cleanup *old_chain;
6839
6840 if (args == NULL)
6841 error_no_arg (_("xdb command"));
6842
6843 /* Break the command line up into args. */
6844
6845 argv = gdb_buildargv (args);
6846 old_chain = make_cleanup_freeargv (argv);
6847 if (argv[1] != (char *) NULL)
6848 {
6849 char *argBuf;
6850 int bufLen;
6851
6852 bufLen = strlen (argv[0]) + 20;
6853 argBuf = (char *) xmalloc (bufLen);
6854 if (argBuf)
6855 {
6856 int validFlag = 1;
6857 enum gdb_signal oursig;
6858
6859 oursig = gdb_signal_from_name (argv[0]);
6860 memset (argBuf, 0, bufLen);
6861 if (strcmp (argv[1], "Q") == 0)
6862 sprintf (argBuf, "%s %s", argv[0], "noprint");
6863 else
6864 {
6865 if (strcmp (argv[1], "s") == 0)
6866 {
6867 if (!signal_stop[oursig])
6868 sprintf (argBuf, "%s %s", argv[0], "stop");
6869 else
6870 sprintf (argBuf, "%s %s", argv[0], "nostop");
6871 }
6872 else if (strcmp (argv[1], "i") == 0)
6873 {
6874 if (!signal_program[oursig])
6875 sprintf (argBuf, "%s %s", argv[0], "pass");
6876 else
6877 sprintf (argBuf, "%s %s", argv[0], "nopass");
6878 }
6879 else if (strcmp (argv[1], "r") == 0)
6880 {
6881 if (!signal_print[oursig])
6882 sprintf (argBuf, "%s %s", argv[0], "print");
6883 else
6884 sprintf (argBuf, "%s %s", argv[0], "noprint");
6885 }
6886 else
6887 validFlag = 0;
6888 }
6889 if (validFlag)
6890 handle_command (argBuf, from_tty);
6891 else
6892 printf_filtered (_("Invalid signal handling flag.\n"));
6893 if (argBuf)
6894 xfree (argBuf);
6895 }
6896 }
6897 do_cleanups (old_chain);
6898 }
6899
6900 enum gdb_signal
6901 gdb_signal_from_command (int num)
6902 {
6903 if (num >= 1 && num <= 15)
6904 return (enum gdb_signal) num;
6905 error (_("Only signals 1-15 are valid as numeric signals.\n\
6906 Use \"info signals\" for a list of symbolic signals."));
6907 }
6908
6909 /* Print current contents of the tables set by the handle command.
6910 It is possible we should just be printing signals actually used
6911 by the current target (but for things to work right when switching
6912 targets, all signals should be in the signal tables). */
6913
6914 static void
6915 signals_info (char *signum_exp, int from_tty)
6916 {
6917 enum gdb_signal oursig;
6918
6919 sig_print_header ();
6920
6921 if (signum_exp)
6922 {
6923 /* First see if this is a symbol name. */
6924 oursig = gdb_signal_from_name (signum_exp);
6925 if (oursig == GDB_SIGNAL_UNKNOWN)
6926 {
6927 /* No, try numeric. */
6928 oursig =
6929 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6930 }
6931 sig_print_info (oursig);
6932 return;
6933 }
6934
6935 printf_filtered ("\n");
6936 /* These ugly casts brought to you by the native VAX compiler. */
6937 for (oursig = GDB_SIGNAL_FIRST;
6938 (int) oursig < (int) GDB_SIGNAL_LAST;
6939 oursig = (enum gdb_signal) ((int) oursig + 1))
6940 {
6941 QUIT;
6942
6943 if (oursig != GDB_SIGNAL_UNKNOWN
6944 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6945 sig_print_info (oursig);
6946 }
6947
6948 printf_filtered (_("\nUse the \"handle\" command "
6949 "to change these tables.\n"));
6950 }
6951
6952 /* Check if it makes sense to read $_siginfo from the current thread
6953 at this point. If not, throw an error. */
6954
6955 static void
6956 validate_siginfo_access (void)
6957 {
6958 /* No current inferior, no siginfo. */
6959 if (ptid_equal (inferior_ptid, null_ptid))
6960 error (_("No thread selected."));
6961
6962 /* Don't try to read from a dead thread. */
6963 if (is_exited (inferior_ptid))
6964 error (_("The current thread has terminated"));
6965
6966 /* ... or from a spinning thread. */
6967 if (is_running (inferior_ptid))
6968 error (_("Selected thread is running."));
6969 }
6970
6971 /* The $_siginfo convenience variable is a bit special. We don't know
6972 for sure the type of the value until we actually have a chance to
6973 fetch the data. The type can change depending on gdbarch, so it is
6974 also dependent on which thread you have selected.
6975
6976 1. making $_siginfo be an internalvar that creates a new value on
6977 access.
6978
6979 2. making the value of $_siginfo be an lval_computed value. */
6980
6981 /* This function implements the lval_computed support for reading a
6982 $_siginfo value. */
6983
6984 static void
6985 siginfo_value_read (struct value *v)
6986 {
6987 LONGEST transferred;
6988
6989 validate_siginfo_access ();
6990
6991 transferred =
6992 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6993 NULL,
6994 value_contents_all_raw (v),
6995 value_offset (v),
6996 TYPE_LENGTH (value_type (v)));
6997
6998 if (transferred != TYPE_LENGTH (value_type (v)))
6999 error (_("Unable to read siginfo"));
7000 }
7001
7002 /* This function implements the lval_computed support for writing a
7003 $_siginfo value. */
7004
7005 static void
7006 siginfo_value_write (struct value *v, struct value *fromval)
7007 {
7008 LONGEST transferred;
7009
7010 validate_siginfo_access ();
7011
7012 transferred = target_write (&current_target,
7013 TARGET_OBJECT_SIGNAL_INFO,
7014 NULL,
7015 value_contents_all_raw (fromval),
7016 value_offset (v),
7017 TYPE_LENGTH (value_type (fromval)));
7018
7019 if (transferred != TYPE_LENGTH (value_type (fromval)))
7020 error (_("Unable to write siginfo"));
7021 }
7022
7023 static const struct lval_funcs siginfo_value_funcs =
7024 {
7025 siginfo_value_read,
7026 siginfo_value_write
7027 };
7028
7029 /* Return a new value with the correct type for the siginfo object of
7030 the current thread using architecture GDBARCH. Return a void value
7031 if there's no object available. */
7032
7033 static struct value *
7034 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7035 void *ignore)
7036 {
7037 if (target_has_stack
7038 && !ptid_equal (inferior_ptid, null_ptid)
7039 && gdbarch_get_siginfo_type_p (gdbarch))
7040 {
7041 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7042
7043 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7044 }
7045
7046 return allocate_value (builtin_type (gdbarch)->builtin_void);
7047 }
7048
7049 \f
7050 /* infcall_suspend_state contains state about the program itself like its
7051 registers and any signal it received when it last stopped.
7052 This state must be restored regardless of how the inferior function call
7053 ends (either successfully, or after it hits a breakpoint or signal)
7054 if the program is to properly continue where it left off. */
7055
7056 struct infcall_suspend_state
7057 {
7058 struct thread_suspend_state thread_suspend;
7059 #if 0 /* Currently unused and empty structures are not valid C. */
7060 struct inferior_suspend_state inferior_suspend;
7061 #endif
7062
7063 /* Other fields: */
7064 CORE_ADDR stop_pc;
7065 struct regcache *registers;
7066
7067 /* Format of SIGINFO_DATA or NULL if it is not present. */
7068 struct gdbarch *siginfo_gdbarch;
7069
7070 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7071 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7072 content would be invalid. */
7073 gdb_byte *siginfo_data;
7074 };
7075
7076 struct infcall_suspend_state *
7077 save_infcall_suspend_state (void)
7078 {
7079 struct infcall_suspend_state *inf_state;
7080 struct thread_info *tp = inferior_thread ();
7081 #if 0
7082 struct inferior *inf = current_inferior ();
7083 #endif
7084 struct regcache *regcache = get_current_regcache ();
7085 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7086 gdb_byte *siginfo_data = NULL;
7087
7088 if (gdbarch_get_siginfo_type_p (gdbarch))
7089 {
7090 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7091 size_t len = TYPE_LENGTH (type);
7092 struct cleanup *back_to;
7093
7094 siginfo_data = xmalloc (len);
7095 back_to = make_cleanup (xfree, siginfo_data);
7096
7097 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7098 siginfo_data, 0, len) == len)
7099 discard_cleanups (back_to);
7100 else
7101 {
7102 /* Errors ignored. */
7103 do_cleanups (back_to);
7104 siginfo_data = NULL;
7105 }
7106 }
7107
7108 inf_state = XCNEW (struct infcall_suspend_state);
7109
7110 if (siginfo_data)
7111 {
7112 inf_state->siginfo_gdbarch = gdbarch;
7113 inf_state->siginfo_data = siginfo_data;
7114 }
7115
7116 inf_state->thread_suspend = tp->suspend;
7117 #if 0 /* Currently unused and empty structures are not valid C. */
7118 inf_state->inferior_suspend = inf->suspend;
7119 #endif
7120
7121 /* run_inferior_call will not use the signal due to its `proceed' call with
7122 GDB_SIGNAL_0 anyway. */
7123 tp->suspend.stop_signal = GDB_SIGNAL_0;
7124
7125 inf_state->stop_pc = stop_pc;
7126
7127 inf_state->registers = regcache_dup (regcache);
7128
7129 return inf_state;
7130 }
7131
7132 /* Restore inferior session state to INF_STATE. */
7133
7134 void
7135 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7136 {
7137 struct thread_info *tp = inferior_thread ();
7138 #if 0
7139 struct inferior *inf = current_inferior ();
7140 #endif
7141 struct regcache *regcache = get_current_regcache ();
7142 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7143
7144 tp->suspend = inf_state->thread_suspend;
7145 #if 0 /* Currently unused and empty structures are not valid C. */
7146 inf->suspend = inf_state->inferior_suspend;
7147 #endif
7148
7149 stop_pc = inf_state->stop_pc;
7150
7151 if (inf_state->siginfo_gdbarch == gdbarch)
7152 {
7153 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7154
7155 /* Errors ignored. */
7156 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7157 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7158 }
7159
7160 /* The inferior can be gone if the user types "print exit(0)"
7161 (and perhaps other times). */
7162 if (target_has_execution)
7163 /* NB: The register write goes through to the target. */
7164 regcache_cpy (regcache, inf_state->registers);
7165
7166 discard_infcall_suspend_state (inf_state);
7167 }
7168
7169 static void
7170 do_restore_infcall_suspend_state_cleanup (void *state)
7171 {
7172 restore_infcall_suspend_state (state);
7173 }
7174
7175 struct cleanup *
7176 make_cleanup_restore_infcall_suspend_state
7177 (struct infcall_suspend_state *inf_state)
7178 {
7179 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7180 }
7181
7182 void
7183 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7184 {
7185 regcache_xfree (inf_state->registers);
7186 xfree (inf_state->siginfo_data);
7187 xfree (inf_state);
7188 }
7189
7190 struct regcache *
7191 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7192 {
7193 return inf_state->registers;
7194 }
7195
7196 /* infcall_control_state contains state regarding gdb's control of the
7197 inferior itself like stepping control. It also contains session state like
7198 the user's currently selected frame. */
7199
7200 struct infcall_control_state
7201 {
7202 struct thread_control_state thread_control;
7203 struct inferior_control_state inferior_control;
7204
7205 /* Other fields: */
7206 enum stop_stack_kind stop_stack_dummy;
7207 int stopped_by_random_signal;
7208 int stop_after_trap;
7209
7210 /* ID if the selected frame when the inferior function call was made. */
7211 struct frame_id selected_frame_id;
7212 };
7213
7214 /* Save all of the information associated with the inferior<==>gdb
7215 connection. */
7216
7217 struct infcall_control_state *
7218 save_infcall_control_state (void)
7219 {
7220 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7221 struct thread_info *tp = inferior_thread ();
7222 struct inferior *inf = current_inferior ();
7223
7224 inf_status->thread_control = tp->control;
7225 inf_status->inferior_control = inf->control;
7226
7227 tp->control.step_resume_breakpoint = NULL;
7228 tp->control.exception_resume_breakpoint = NULL;
7229
7230 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7231 chain. If caller's caller is walking the chain, they'll be happier if we
7232 hand them back the original chain when restore_infcall_control_state is
7233 called. */
7234 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7235
7236 /* Other fields: */
7237 inf_status->stop_stack_dummy = stop_stack_dummy;
7238 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7239 inf_status->stop_after_trap = stop_after_trap;
7240
7241 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7242
7243 return inf_status;
7244 }
7245
7246 static int
7247 restore_selected_frame (void *args)
7248 {
7249 struct frame_id *fid = (struct frame_id *) args;
7250 struct frame_info *frame;
7251
7252 frame = frame_find_by_id (*fid);
7253
7254 /* If inf_status->selected_frame_id is NULL, there was no previously
7255 selected frame. */
7256 if (frame == NULL)
7257 {
7258 warning (_("Unable to restore previously selected frame."));
7259 return 0;
7260 }
7261
7262 select_frame (frame);
7263
7264 return (1);
7265 }
7266
7267 /* Restore inferior session state to INF_STATUS. */
7268
7269 void
7270 restore_infcall_control_state (struct infcall_control_state *inf_status)
7271 {
7272 struct thread_info *tp = inferior_thread ();
7273 struct inferior *inf = current_inferior ();
7274
7275 if (tp->control.step_resume_breakpoint)
7276 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7277
7278 if (tp->control.exception_resume_breakpoint)
7279 tp->control.exception_resume_breakpoint->disposition
7280 = disp_del_at_next_stop;
7281
7282 /* Handle the bpstat_copy of the chain. */
7283 bpstat_clear (&tp->control.stop_bpstat);
7284
7285 tp->control = inf_status->thread_control;
7286 inf->control = inf_status->inferior_control;
7287
7288 /* Other fields: */
7289 stop_stack_dummy = inf_status->stop_stack_dummy;
7290 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7291 stop_after_trap = inf_status->stop_after_trap;
7292
7293 if (target_has_stack)
7294 {
7295 /* The point of catch_errors is that if the stack is clobbered,
7296 walking the stack might encounter a garbage pointer and
7297 error() trying to dereference it. */
7298 if (catch_errors
7299 (restore_selected_frame, &inf_status->selected_frame_id,
7300 "Unable to restore previously selected frame:\n",
7301 RETURN_MASK_ERROR) == 0)
7302 /* Error in restoring the selected frame. Select the innermost
7303 frame. */
7304 select_frame (get_current_frame ());
7305 }
7306
7307 xfree (inf_status);
7308 }
7309
7310 static void
7311 do_restore_infcall_control_state_cleanup (void *sts)
7312 {
7313 restore_infcall_control_state (sts);
7314 }
7315
7316 struct cleanup *
7317 make_cleanup_restore_infcall_control_state
7318 (struct infcall_control_state *inf_status)
7319 {
7320 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7321 }
7322
7323 void
7324 discard_infcall_control_state (struct infcall_control_state *inf_status)
7325 {
7326 if (inf_status->thread_control.step_resume_breakpoint)
7327 inf_status->thread_control.step_resume_breakpoint->disposition
7328 = disp_del_at_next_stop;
7329
7330 if (inf_status->thread_control.exception_resume_breakpoint)
7331 inf_status->thread_control.exception_resume_breakpoint->disposition
7332 = disp_del_at_next_stop;
7333
7334 /* See save_infcall_control_state for info on stop_bpstat. */
7335 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7336
7337 xfree (inf_status);
7338 }
7339 \f
7340 /* restore_inferior_ptid() will be used by the cleanup machinery
7341 to restore the inferior_ptid value saved in a call to
7342 save_inferior_ptid(). */
7343
7344 static void
7345 restore_inferior_ptid (void *arg)
7346 {
7347 ptid_t *saved_ptid_ptr = arg;
7348
7349 inferior_ptid = *saved_ptid_ptr;
7350 xfree (arg);
7351 }
7352
7353 /* Save the value of inferior_ptid so that it may be restored by a
7354 later call to do_cleanups(). Returns the struct cleanup pointer
7355 needed for later doing the cleanup. */
7356
7357 struct cleanup *
7358 save_inferior_ptid (void)
7359 {
7360 ptid_t *saved_ptid_ptr;
7361
7362 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7363 *saved_ptid_ptr = inferior_ptid;
7364 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7365 }
7366
7367 /* See infrun.h. */
7368
7369 void
7370 clear_exit_convenience_vars (void)
7371 {
7372 clear_internalvar (lookup_internalvar ("_exitsignal"));
7373 clear_internalvar (lookup_internalvar ("_exitcode"));
7374 }
7375 \f
7376
7377 /* User interface for reverse debugging:
7378 Set exec-direction / show exec-direction commands
7379 (returns error unless target implements to_set_exec_direction method). */
7380
7381 int execution_direction = EXEC_FORWARD;
7382 static const char exec_forward[] = "forward";
7383 static const char exec_reverse[] = "reverse";
7384 static const char *exec_direction = exec_forward;
7385 static const char *const exec_direction_names[] = {
7386 exec_forward,
7387 exec_reverse,
7388 NULL
7389 };
7390
7391 static void
7392 set_exec_direction_func (char *args, int from_tty,
7393 struct cmd_list_element *cmd)
7394 {
7395 if (target_can_execute_reverse)
7396 {
7397 if (!strcmp (exec_direction, exec_forward))
7398 execution_direction = EXEC_FORWARD;
7399 else if (!strcmp (exec_direction, exec_reverse))
7400 execution_direction = EXEC_REVERSE;
7401 }
7402 else
7403 {
7404 exec_direction = exec_forward;
7405 error (_("Target does not support this operation."));
7406 }
7407 }
7408
7409 static void
7410 show_exec_direction_func (struct ui_file *out, int from_tty,
7411 struct cmd_list_element *cmd, const char *value)
7412 {
7413 switch (execution_direction) {
7414 case EXEC_FORWARD:
7415 fprintf_filtered (out, _("Forward.\n"));
7416 break;
7417 case EXEC_REVERSE:
7418 fprintf_filtered (out, _("Reverse.\n"));
7419 break;
7420 default:
7421 internal_error (__FILE__, __LINE__,
7422 _("bogus execution_direction value: %d"),
7423 (int) execution_direction);
7424 }
7425 }
7426
7427 static void
7428 show_schedule_multiple (struct ui_file *file, int from_tty,
7429 struct cmd_list_element *c, const char *value)
7430 {
7431 fprintf_filtered (file, _("Resuming the execution of threads "
7432 "of all processes is %s.\n"), value);
7433 }
7434
7435 /* Implementation of `siginfo' variable. */
7436
7437 static const struct internalvar_funcs siginfo_funcs =
7438 {
7439 siginfo_make_value,
7440 NULL,
7441 NULL
7442 };
7443
7444 void
7445 _initialize_infrun (void)
7446 {
7447 int i;
7448 int numsigs;
7449 struct cmd_list_element *c;
7450
7451 add_info ("signals", signals_info, _("\
7452 What debugger does when program gets various signals.\n\
7453 Specify a signal as argument to print info on that signal only."));
7454 add_info_alias ("handle", "signals", 0);
7455
7456 c = add_com ("handle", class_run, handle_command, _("\
7457 Specify how to handle signals.\n\
7458 Usage: handle SIGNAL [ACTIONS]\n\
7459 Args are signals and actions to apply to those signals.\n\
7460 If no actions are specified, the current settings for the specified signals\n\
7461 will be displayed instead.\n\
7462 \n\
7463 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7464 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7465 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7466 The special arg \"all\" is recognized to mean all signals except those\n\
7467 used by the debugger, typically SIGTRAP and SIGINT.\n\
7468 \n\
7469 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7470 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7471 Stop means reenter debugger if this signal happens (implies print).\n\
7472 Print means print a message if this signal happens.\n\
7473 Pass means let program see this signal; otherwise program doesn't know.\n\
7474 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7475 Pass and Stop may be combined.\n\
7476 \n\
7477 Multiple signals may be specified. Signal numbers and signal names\n\
7478 may be interspersed with actions, with the actions being performed for\n\
7479 all signals cumulatively specified."));
7480 set_cmd_completer (c, handle_completer);
7481
7482 if (xdb_commands)
7483 {
7484 add_com ("lz", class_info, signals_info, _("\
7485 What debugger does when program gets various signals.\n\
7486 Specify a signal as argument to print info on that signal only."));
7487 add_com ("z", class_run, xdb_handle_command, _("\
7488 Specify how to handle a signal.\n\
7489 Args are signals and actions to apply to those signals.\n\
7490 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7491 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7492 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7493 The special arg \"all\" is recognized to mean all signals except those\n\
7494 used by the debugger, typically SIGTRAP and SIGINT.\n\
7495 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7496 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7497 nopass), \"Q\" (noprint)\n\
7498 Stop means reenter debugger if this signal happens (implies print).\n\
7499 Print means print a message if this signal happens.\n\
7500 Pass means let program see this signal; otherwise program doesn't know.\n\
7501 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7502 Pass and Stop may be combined."));
7503 }
7504
7505 if (!dbx_commands)
7506 stop_command = add_cmd ("stop", class_obscure,
7507 not_just_help_class_command, _("\
7508 There is no `stop' command, but you can set a hook on `stop'.\n\
7509 This allows you to set a list of commands to be run each time execution\n\
7510 of the program stops."), &cmdlist);
7511
7512 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7513 Set inferior debugging."), _("\
7514 Show inferior debugging."), _("\
7515 When non-zero, inferior specific debugging is enabled."),
7516 NULL,
7517 show_debug_infrun,
7518 &setdebuglist, &showdebuglist);
7519
7520 add_setshow_boolean_cmd ("displaced", class_maintenance,
7521 &debug_displaced, _("\
7522 Set displaced stepping debugging."), _("\
7523 Show displaced stepping debugging."), _("\
7524 When non-zero, displaced stepping specific debugging is enabled."),
7525 NULL,
7526 show_debug_displaced,
7527 &setdebuglist, &showdebuglist);
7528
7529 add_setshow_boolean_cmd ("non-stop", no_class,
7530 &non_stop_1, _("\
7531 Set whether gdb controls the inferior in non-stop mode."), _("\
7532 Show whether gdb controls the inferior in non-stop mode."), _("\
7533 When debugging a multi-threaded program and this setting is\n\
7534 off (the default, also called all-stop mode), when one thread stops\n\
7535 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7536 all other threads in the program while you interact with the thread of\n\
7537 interest. When you continue or step a thread, you can allow the other\n\
7538 threads to run, or have them remain stopped, but while you inspect any\n\
7539 thread's state, all threads stop.\n\
7540 \n\
7541 In non-stop mode, when one thread stops, other threads can continue\n\
7542 to run freely. You'll be able to step each thread independently,\n\
7543 leave it stopped or free to run as needed."),
7544 set_non_stop,
7545 show_non_stop,
7546 &setlist,
7547 &showlist);
7548
7549 numsigs = (int) GDB_SIGNAL_LAST;
7550 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7551 signal_print = (unsigned char *)
7552 xmalloc (sizeof (signal_print[0]) * numsigs);
7553 signal_program = (unsigned char *)
7554 xmalloc (sizeof (signal_program[0]) * numsigs);
7555 signal_catch = (unsigned char *)
7556 xmalloc (sizeof (signal_catch[0]) * numsigs);
7557 signal_pass = (unsigned char *)
7558 xmalloc (sizeof (signal_pass[0]) * numsigs);
7559 for (i = 0; i < numsigs; i++)
7560 {
7561 signal_stop[i] = 1;
7562 signal_print[i] = 1;
7563 signal_program[i] = 1;
7564 signal_catch[i] = 0;
7565 }
7566
7567 /* Signals caused by debugger's own actions
7568 should not be given to the program afterwards. */
7569 signal_program[GDB_SIGNAL_TRAP] = 0;
7570 signal_program[GDB_SIGNAL_INT] = 0;
7571
7572 /* Signals that are not errors should not normally enter the debugger. */
7573 signal_stop[GDB_SIGNAL_ALRM] = 0;
7574 signal_print[GDB_SIGNAL_ALRM] = 0;
7575 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7576 signal_print[GDB_SIGNAL_VTALRM] = 0;
7577 signal_stop[GDB_SIGNAL_PROF] = 0;
7578 signal_print[GDB_SIGNAL_PROF] = 0;
7579 signal_stop[GDB_SIGNAL_CHLD] = 0;
7580 signal_print[GDB_SIGNAL_CHLD] = 0;
7581 signal_stop[GDB_SIGNAL_IO] = 0;
7582 signal_print[GDB_SIGNAL_IO] = 0;
7583 signal_stop[GDB_SIGNAL_POLL] = 0;
7584 signal_print[GDB_SIGNAL_POLL] = 0;
7585 signal_stop[GDB_SIGNAL_URG] = 0;
7586 signal_print[GDB_SIGNAL_URG] = 0;
7587 signal_stop[GDB_SIGNAL_WINCH] = 0;
7588 signal_print[GDB_SIGNAL_WINCH] = 0;
7589 signal_stop[GDB_SIGNAL_PRIO] = 0;
7590 signal_print[GDB_SIGNAL_PRIO] = 0;
7591
7592 /* These signals are used internally by user-level thread
7593 implementations. (See signal(5) on Solaris.) Like the above
7594 signals, a healthy program receives and handles them as part of
7595 its normal operation. */
7596 signal_stop[GDB_SIGNAL_LWP] = 0;
7597 signal_print[GDB_SIGNAL_LWP] = 0;
7598 signal_stop[GDB_SIGNAL_WAITING] = 0;
7599 signal_print[GDB_SIGNAL_WAITING] = 0;
7600 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7601 signal_print[GDB_SIGNAL_CANCEL] = 0;
7602
7603 /* Update cached state. */
7604 signal_cache_update (-1);
7605
7606 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7607 &stop_on_solib_events, _("\
7608 Set stopping for shared library events."), _("\
7609 Show stopping for shared library events."), _("\
7610 If nonzero, gdb will give control to the user when the dynamic linker\n\
7611 notifies gdb of shared library events. The most common event of interest\n\
7612 to the user would be loading/unloading of a new library."),
7613 set_stop_on_solib_events,
7614 show_stop_on_solib_events,
7615 &setlist, &showlist);
7616
7617 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7618 follow_fork_mode_kind_names,
7619 &follow_fork_mode_string, _("\
7620 Set debugger response to a program call of fork or vfork."), _("\
7621 Show debugger response to a program call of fork or vfork."), _("\
7622 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7623 parent - the original process is debugged after a fork\n\
7624 child - the new process is debugged after a fork\n\
7625 The unfollowed process will continue to run.\n\
7626 By default, the debugger will follow the parent process."),
7627 NULL,
7628 show_follow_fork_mode_string,
7629 &setlist, &showlist);
7630
7631 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7632 follow_exec_mode_names,
7633 &follow_exec_mode_string, _("\
7634 Set debugger response to a program call of exec."), _("\
7635 Show debugger response to a program call of exec."), _("\
7636 An exec call replaces the program image of a process.\n\
7637 \n\
7638 follow-exec-mode can be:\n\
7639 \n\
7640 new - the debugger creates a new inferior and rebinds the process\n\
7641 to this new inferior. The program the process was running before\n\
7642 the exec call can be restarted afterwards by restarting the original\n\
7643 inferior.\n\
7644 \n\
7645 same - the debugger keeps the process bound to the same inferior.\n\
7646 The new executable image replaces the previous executable loaded in\n\
7647 the inferior. Restarting the inferior after the exec call restarts\n\
7648 the executable the process was running after the exec call.\n\
7649 \n\
7650 By default, the debugger will use the same inferior."),
7651 NULL,
7652 show_follow_exec_mode_string,
7653 &setlist, &showlist);
7654
7655 add_setshow_enum_cmd ("scheduler-locking", class_run,
7656 scheduler_enums, &scheduler_mode, _("\
7657 Set mode for locking scheduler during execution."), _("\
7658 Show mode for locking scheduler during execution."), _("\
7659 off == no locking (threads may preempt at any time)\n\
7660 on == full locking (no thread except the current thread may run)\n\
7661 step == scheduler locked during every single-step operation.\n\
7662 In this mode, no other thread may run during a step command.\n\
7663 Other threads may run while stepping over a function call ('next')."),
7664 set_schedlock_func, /* traps on target vector */
7665 show_scheduler_mode,
7666 &setlist, &showlist);
7667
7668 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7669 Set mode for resuming threads of all processes."), _("\
7670 Show mode for resuming threads of all processes."), _("\
7671 When on, execution commands (such as 'continue' or 'next') resume all\n\
7672 threads of all processes. When off (which is the default), execution\n\
7673 commands only resume the threads of the current process. The set of\n\
7674 threads that are resumed is further refined by the scheduler-locking\n\
7675 mode (see help set scheduler-locking)."),
7676 NULL,
7677 show_schedule_multiple,
7678 &setlist, &showlist);
7679
7680 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7681 Set mode of the step operation."), _("\
7682 Show mode of the step operation."), _("\
7683 When set, doing a step over a function without debug line information\n\
7684 will stop at the first instruction of that function. Otherwise, the\n\
7685 function is skipped and the step command stops at a different source line."),
7686 NULL,
7687 show_step_stop_if_no_debug,
7688 &setlist, &showlist);
7689
7690 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7691 &can_use_displaced_stepping, _("\
7692 Set debugger's willingness to use displaced stepping."), _("\
7693 Show debugger's willingness to use displaced stepping."), _("\
7694 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7695 supported by the target architecture. If off, gdb will not use displaced\n\
7696 stepping to step over breakpoints, even if such is supported by the target\n\
7697 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7698 if the target architecture supports it and non-stop mode is active, but will not\n\
7699 use it in all-stop mode (see help set non-stop)."),
7700 NULL,
7701 show_can_use_displaced_stepping,
7702 &setlist, &showlist);
7703
7704 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7705 &exec_direction, _("Set direction of execution.\n\
7706 Options are 'forward' or 'reverse'."),
7707 _("Show direction of execution (forward/reverse)."),
7708 _("Tells gdb whether to execute forward or backward."),
7709 set_exec_direction_func, show_exec_direction_func,
7710 &setlist, &showlist);
7711
7712 /* Set/show detach-on-fork: user-settable mode. */
7713
7714 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7715 Set whether gdb will detach the child of a fork."), _("\
7716 Show whether gdb will detach the child of a fork."), _("\
7717 Tells gdb whether to detach the child of a fork."),
7718 NULL, NULL, &setlist, &showlist);
7719
7720 /* Set/show disable address space randomization mode. */
7721
7722 add_setshow_boolean_cmd ("disable-randomization", class_support,
7723 &disable_randomization, _("\
7724 Set disabling of debuggee's virtual address space randomization."), _("\
7725 Show disabling of debuggee's virtual address space randomization."), _("\
7726 When this mode is on (which is the default), randomization of the virtual\n\
7727 address space is disabled. Standalone programs run with the randomization\n\
7728 enabled by default on some platforms."),
7729 &set_disable_randomization,
7730 &show_disable_randomization,
7731 &setlist, &showlist);
7732
7733 /* ptid initializations */
7734 inferior_ptid = null_ptid;
7735 target_last_wait_ptid = minus_one_ptid;
7736
7737 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7738 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7739 observer_attach_thread_exit (infrun_thread_thread_exit);
7740 observer_attach_inferior_exit (infrun_inferior_exit);
7741
7742 /* Explicitly create without lookup, since that tries to create a
7743 value with a void typed value, and when we get here, gdbarch
7744 isn't initialized yet. At this point, we're quite sure there
7745 isn't another convenience variable of the same name. */
7746 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7747
7748 add_setshow_boolean_cmd ("observer", no_class,
7749 &observer_mode_1, _("\
7750 Set whether gdb controls the inferior in observer mode."), _("\
7751 Show whether gdb controls the inferior in observer mode."), _("\
7752 In observer mode, GDB can get data from the inferior, but not\n\
7753 affect its execution. Registers and memory may not be changed,\n\
7754 breakpoints may not be set, and the program cannot be interrupted\n\
7755 or signalled."),
7756 set_observer_mode,
7757 show_observer_mode,
7758 &setlist,
7759 &showlist);
7760 }
This page took 0.191554 seconds and 4 git commands to generate.