203ab5d5cad0b415695e141033bdd071c947241f
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54
55 /* Prototypes for local functions */
56
57 static void signals_info (char *, int);
58
59 static void handle_command (char *, int);
60
61 static void sig_print_info (enum target_signal);
62
63 static void sig_print_header (void);
64
65 static void resume_cleanups (void *);
66
67 static int hook_stop_stub (void *);
68
69 static int restore_selected_frame (void *);
70
71 static void build_infrun (void);
72
73 static int follow_fork (void);
74
75 static void set_schedlock_func (char *args, int from_tty,
76 struct cmd_list_element *c);
77
78 static int currently_stepping (struct thread_info *tp);
79
80 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
81 void *data);
82
83 static void xdb_handle_command (char *args, int from_tty);
84
85 static int prepare_to_proceed (int);
86
87 void _initialize_infrun (void);
88
89 void nullify_last_target_wait_ptid (void);
90
91 /* When set, stop the 'step' command if we enter a function which has
92 no line number information. The normal behavior is that we step
93 over such function. */
94 int step_stop_if_no_debug = 0;
95 static void
96 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
97 struct cmd_list_element *c, const char *value)
98 {
99 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
100 }
101
102 /* In asynchronous mode, but simulating synchronous execution. */
103
104 int sync_execution = 0;
105
106 /* wait_for_inferior and normal_stop use this to notify the user
107 when the inferior stopped in a different thread than it had been
108 running in. */
109
110 static ptid_t previous_inferior_ptid;
111
112 int debug_displaced = 0;
113 static void
114 show_debug_displaced (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
118 }
119
120 static int debug_infrun = 0;
121 static void
122 show_debug_infrun (struct ui_file *file, int from_tty,
123 struct cmd_list_element *c, const char *value)
124 {
125 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
126 }
127
128 /* If the program uses ELF-style shared libraries, then calls to
129 functions in shared libraries go through stubs, which live in a
130 table called the PLT (Procedure Linkage Table). The first time the
131 function is called, the stub sends control to the dynamic linker,
132 which looks up the function's real address, patches the stub so
133 that future calls will go directly to the function, and then passes
134 control to the function.
135
136 If we are stepping at the source level, we don't want to see any of
137 this --- we just want to skip over the stub and the dynamic linker.
138 The simple approach is to single-step until control leaves the
139 dynamic linker.
140
141 However, on some systems (e.g., Red Hat's 5.2 distribution) the
142 dynamic linker calls functions in the shared C library, so you
143 can't tell from the PC alone whether the dynamic linker is still
144 running. In this case, we use a step-resume breakpoint to get us
145 past the dynamic linker, as if we were using "next" to step over a
146 function call.
147
148 in_solib_dynsym_resolve_code() says whether we're in the dynamic
149 linker code or not. Normally, this means we single-step. However,
150 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
151 address where we can place a step-resume breakpoint to get past the
152 linker's symbol resolution function.
153
154 in_solib_dynsym_resolve_code() can generally be implemented in a
155 pretty portable way, by comparing the PC against the address ranges
156 of the dynamic linker's sections.
157
158 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
159 it depends on internal details of the dynamic linker. It's usually
160 not too hard to figure out where to put a breakpoint, but it
161 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
162 sanity checking. If it can't figure things out, returning zero and
163 getting the (possibly confusing) stepping behavior is better than
164 signalling an error, which will obscure the change in the
165 inferior's state. */
166
167 /* This function returns TRUE if pc is the address of an instruction
168 that lies within the dynamic linker (such as the event hook, or the
169 dld itself).
170
171 This function must be used only when a dynamic linker event has
172 been caught, and the inferior is being stepped out of the hook, or
173 undefined results are guaranteed. */
174
175 #ifndef SOLIB_IN_DYNAMIC_LINKER
176 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
177 #endif
178
179
180 /* Convert the #defines into values. This is temporary until wfi control
181 flow is completely sorted out. */
182
183 #ifndef CANNOT_STEP_HW_WATCHPOINTS
184 #define CANNOT_STEP_HW_WATCHPOINTS 0
185 #else
186 #undef CANNOT_STEP_HW_WATCHPOINTS
187 #define CANNOT_STEP_HW_WATCHPOINTS 1
188 #endif
189
190 /* Tables of how to react to signals; the user sets them. */
191
192 static unsigned char *signal_stop;
193 static unsigned char *signal_print;
194 static unsigned char *signal_program;
195
196 #define SET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 1; \
202 } while (0)
203
204 #define UNSET_SIGS(nsigs,sigs,flags) \
205 do { \
206 int signum = (nsigs); \
207 while (signum-- > 0) \
208 if ((sigs)[signum]) \
209 (flags)[signum] = 0; \
210 } while (0)
211
212 /* Value to pass to target_resume() to cause all threads to resume */
213
214 #define RESUME_ALL minus_one_ptid
215
216 /* Command list pointer for the "stop" placeholder. */
217
218 static struct cmd_list_element *stop_command;
219
220 /* Function inferior was in as of last step command. */
221
222 static struct symbol *step_start_function;
223
224 /* Nonzero if we want to give control to the user when we're notified
225 of shared library events by the dynamic linker. */
226 static int stop_on_solib_events;
227 static void
228 show_stop_on_solib_events (struct ui_file *file, int from_tty,
229 struct cmd_list_element *c, const char *value)
230 {
231 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
232 value);
233 }
234
235 /* Nonzero means expecting a trace trap
236 and should stop the inferior and return silently when it happens. */
237
238 int stop_after_trap;
239
240 /* Save register contents here when executing a "finish" command or are
241 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
242 Thus this contains the return value from the called function (assuming
243 values are returned in a register). */
244
245 struct regcache *stop_registers;
246
247 /* Nonzero after stop if current stack frame should be printed. */
248
249 static int stop_print_frame;
250
251 /* This is a cached copy of the pid/waitstatus of the last event
252 returned by target_wait()/deprecated_target_wait_hook(). This
253 information is returned by get_last_target_status(). */
254 static ptid_t target_last_wait_ptid;
255 static struct target_waitstatus target_last_waitstatus;
256
257 static void context_switch (ptid_t ptid);
258
259 void init_thread_stepping_state (struct thread_info *tss);
260
261 void init_infwait_state (void);
262
263 static const char follow_fork_mode_child[] = "child";
264 static const char follow_fork_mode_parent[] = "parent";
265
266 static const char *follow_fork_mode_kind_names[] = {
267 follow_fork_mode_child,
268 follow_fork_mode_parent,
269 NULL
270 };
271
272 static const char *follow_fork_mode_string = follow_fork_mode_parent;
273 static void
274 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
275 struct cmd_list_element *c, const char *value)
276 {
277 fprintf_filtered (file, _("\
278 Debugger response to a program call of fork or vfork is \"%s\".\n"),
279 value);
280 }
281 \f
282
283 /* Tell the target to follow the fork we're stopped at. Returns true
284 if the inferior should be resumed; false, if the target for some
285 reason decided it's best not to resume. */
286
287 static int
288 follow_fork (void)
289 {
290 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
291 int should_resume = 1;
292 struct thread_info *tp;
293
294 /* Copy user stepping state to the new inferior thread. FIXME: the
295 followed fork child thread should have a copy of most of the
296 parent thread structure's run control related fields, not just these.
297 Initialized to avoid "may be used uninitialized" warnings from gcc. */
298 struct breakpoint *step_resume_breakpoint = NULL;
299 CORE_ADDR step_range_start = 0;
300 CORE_ADDR step_range_end = 0;
301 struct frame_id step_frame_id = { 0 };
302
303 if (!non_stop)
304 {
305 ptid_t wait_ptid;
306 struct target_waitstatus wait_status;
307
308 /* Get the last target status returned by target_wait(). */
309 get_last_target_status (&wait_ptid, &wait_status);
310
311 /* If not stopped at a fork event, then there's nothing else to
312 do. */
313 if (wait_status.kind != TARGET_WAITKIND_FORKED
314 && wait_status.kind != TARGET_WAITKIND_VFORKED)
315 return 1;
316
317 /* Check if we switched over from WAIT_PTID, since the event was
318 reported. */
319 if (!ptid_equal (wait_ptid, minus_one_ptid)
320 && !ptid_equal (inferior_ptid, wait_ptid))
321 {
322 /* We did. Switch back to WAIT_PTID thread, to tell the
323 target to follow it (in either direction). We'll
324 afterwards refuse to resume, and inform the user what
325 happened. */
326 switch_to_thread (wait_ptid);
327 should_resume = 0;
328 }
329 }
330
331 tp = inferior_thread ();
332
333 /* If there were any forks/vforks that were caught and are now to be
334 followed, then do so now. */
335 switch (tp->pending_follow.kind)
336 {
337 case TARGET_WAITKIND_FORKED:
338 case TARGET_WAITKIND_VFORKED:
339 {
340 ptid_t parent, child;
341
342 /* If the user did a next/step, etc, over a fork call,
343 preserve the stepping state in the fork child. */
344 if (follow_child && should_resume)
345 {
346 step_resume_breakpoint
347 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
348 step_range_start = tp->step_range_start;
349 step_range_end = tp->step_range_end;
350 step_frame_id = tp->step_frame_id;
351
352 /* For now, delete the parent's sr breakpoint, otherwise,
353 parent/child sr breakpoints are considered duplicates,
354 and the child version will not be installed. Remove
355 this when the breakpoints module becomes aware of
356 inferiors and address spaces. */
357 delete_step_resume_breakpoint (tp);
358 tp->step_range_start = 0;
359 tp->step_range_end = 0;
360 tp->step_frame_id = null_frame_id;
361 }
362
363 parent = inferior_ptid;
364 child = tp->pending_follow.value.related_pid;
365
366 /* Tell the target to do whatever is necessary to follow
367 either parent or child. */
368 if (target_follow_fork (follow_child))
369 {
370 /* Target refused to follow, or there's some other reason
371 we shouldn't resume. */
372 should_resume = 0;
373 }
374 else
375 {
376 /* This pending follow fork event is now handled, one way
377 or another. The previous selected thread may be gone
378 from the lists by now, but if it is still around, need
379 to clear the pending follow request. */
380 tp = find_thread_ptid (parent);
381 if (tp)
382 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
383
384 /* This makes sure we don't try to apply the "Switched
385 over from WAIT_PID" logic above. */
386 nullify_last_target_wait_ptid ();
387
388 /* If we followed the child, switch to it... */
389 if (follow_child)
390 {
391 switch_to_thread (child);
392
393 /* ... and preserve the stepping state, in case the
394 user was stepping over the fork call. */
395 if (should_resume)
396 {
397 tp = inferior_thread ();
398 tp->step_resume_breakpoint = step_resume_breakpoint;
399 tp->step_range_start = step_range_start;
400 tp->step_range_end = step_range_end;
401 tp->step_frame_id = step_frame_id;
402 }
403 else
404 {
405 /* If we get here, it was because we're trying to
406 resume from a fork catchpoint, but, the user
407 has switched threads away from the thread that
408 forked. In that case, the resume command
409 issued is most likely not applicable to the
410 child, so just warn, and refuse to resume. */
411 warning (_("\
412 Not resuming: switched threads before following fork child.\n"));
413 }
414
415 /* Reset breakpoints in the child as appropriate. */
416 follow_inferior_reset_breakpoints ();
417 }
418 else
419 switch_to_thread (parent);
420 }
421 }
422 break;
423 case TARGET_WAITKIND_SPURIOUS:
424 /* Nothing to follow. */
425 break;
426 default:
427 internal_error (__FILE__, __LINE__,
428 "Unexpected pending_follow.kind %d\n",
429 tp->pending_follow.kind);
430 break;
431 }
432
433 return should_resume;
434 }
435
436 void
437 follow_inferior_reset_breakpoints (void)
438 {
439 struct thread_info *tp = inferior_thread ();
440
441 /* Was there a step_resume breakpoint? (There was if the user
442 did a "next" at the fork() call.) If so, explicitly reset its
443 thread number.
444
445 step_resumes are a form of bp that are made to be per-thread.
446 Since we created the step_resume bp when the parent process
447 was being debugged, and now are switching to the child process,
448 from the breakpoint package's viewpoint, that's a switch of
449 "threads". We must update the bp's notion of which thread
450 it is for, or it'll be ignored when it triggers. */
451
452 if (tp->step_resume_breakpoint)
453 breakpoint_re_set_thread (tp->step_resume_breakpoint);
454
455 /* Reinsert all breakpoints in the child. The user may have set
456 breakpoints after catching the fork, in which case those
457 were never set in the child, but only in the parent. This makes
458 sure the inserted breakpoints match the breakpoint list. */
459
460 breakpoint_re_set ();
461 insert_breakpoints ();
462 }
463
464 /* EXECD_PATHNAME is assumed to be non-NULL. */
465
466 static void
467 follow_exec (ptid_t pid, char *execd_pathname)
468 {
469 struct target_ops *tgt;
470 struct thread_info *th = inferior_thread ();
471
472 /* This is an exec event that we actually wish to pay attention to.
473 Refresh our symbol table to the newly exec'd program, remove any
474 momentary bp's, etc.
475
476 If there are breakpoints, they aren't really inserted now,
477 since the exec() transformed our inferior into a fresh set
478 of instructions.
479
480 We want to preserve symbolic breakpoints on the list, since
481 we have hopes that they can be reset after the new a.out's
482 symbol table is read.
483
484 However, any "raw" breakpoints must be removed from the list
485 (e.g., the solib bp's), since their address is probably invalid
486 now.
487
488 And, we DON'T want to call delete_breakpoints() here, since
489 that may write the bp's "shadow contents" (the instruction
490 value that was overwritten witha TRAP instruction). Since
491 we now have a new a.out, those shadow contents aren't valid. */
492 update_breakpoints_after_exec ();
493
494 /* If there was one, it's gone now. We cannot truly step-to-next
495 statement through an exec(). */
496 th->step_resume_breakpoint = NULL;
497 th->step_range_start = 0;
498 th->step_range_end = 0;
499
500 /* The target reports the exec event to the main thread, even if
501 some other thread does the exec, and even if the main thread was
502 already stopped --- if debugging in non-stop mode, it's possible
503 the user had the main thread held stopped in the previous image
504 --- release it now. This is the same behavior as step-over-exec
505 with scheduler-locking on in all-stop mode. */
506 th->stop_requested = 0;
507
508 /* What is this a.out's name? */
509 printf_unfiltered (_("Executing new program: %s\n"), execd_pathname);
510
511 /* We've followed the inferior through an exec. Therefore, the
512 inferior has essentially been killed & reborn. */
513
514 gdb_flush (gdb_stdout);
515
516 breakpoint_init_inferior (inf_execd);
517
518 if (gdb_sysroot && *gdb_sysroot)
519 {
520 char *name = alloca (strlen (gdb_sysroot)
521 + strlen (execd_pathname)
522 + 1);
523 strcpy (name, gdb_sysroot);
524 strcat (name, execd_pathname);
525 execd_pathname = name;
526 }
527
528 /* That a.out is now the one to use. */
529 exec_file_attach (execd_pathname, 0);
530
531 /* Reset the shared library package. This ensures that we get a
532 shlib event when the child reaches "_start", at which point the
533 dld will have had a chance to initialize the child. */
534 /* Also, loading a symbol file below may trigger symbol lookups, and
535 we don't want those to be satisfied by the libraries of the
536 previous incarnation of this process. */
537 no_shared_libraries (NULL, 0);
538
539 /* Load the main file's symbols. */
540 symbol_file_add_main (execd_pathname, 0);
541
542 #ifdef SOLIB_CREATE_INFERIOR_HOOK
543 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
544 #else
545 solib_create_inferior_hook ();
546 #endif
547
548 jit_inferior_created_hook ();
549
550 /* Reinsert all breakpoints. (Those which were symbolic have
551 been reset to the proper address in the new a.out, thanks
552 to symbol_file_command...) */
553 insert_breakpoints ();
554
555 /* The next resume of this inferior should bring it to the shlib
556 startup breakpoints. (If the user had also set bp's on
557 "main" from the old (parent) process, then they'll auto-
558 matically get reset there in the new process.) */
559 }
560
561 /* Non-zero if we just simulating a single-step. This is needed
562 because we cannot remove the breakpoints in the inferior process
563 until after the `wait' in `wait_for_inferior'. */
564 static int singlestep_breakpoints_inserted_p = 0;
565
566 /* The thread we inserted single-step breakpoints for. */
567 static ptid_t singlestep_ptid;
568
569 /* PC when we started this single-step. */
570 static CORE_ADDR singlestep_pc;
571
572 /* If another thread hit the singlestep breakpoint, we save the original
573 thread here so that we can resume single-stepping it later. */
574 static ptid_t saved_singlestep_ptid;
575 static int stepping_past_singlestep_breakpoint;
576
577 /* If not equal to null_ptid, this means that after stepping over breakpoint
578 is finished, we need to switch to deferred_step_ptid, and step it.
579
580 The use case is when one thread has hit a breakpoint, and then the user
581 has switched to another thread and issued 'step'. We need to step over
582 breakpoint in the thread which hit the breakpoint, but then continue
583 stepping the thread user has selected. */
584 static ptid_t deferred_step_ptid;
585 \f
586 /* Displaced stepping. */
587
588 /* In non-stop debugging mode, we must take special care to manage
589 breakpoints properly; in particular, the traditional strategy for
590 stepping a thread past a breakpoint it has hit is unsuitable.
591 'Displaced stepping' is a tactic for stepping one thread past a
592 breakpoint it has hit while ensuring that other threads running
593 concurrently will hit the breakpoint as they should.
594
595 The traditional way to step a thread T off a breakpoint in a
596 multi-threaded program in all-stop mode is as follows:
597
598 a0) Initially, all threads are stopped, and breakpoints are not
599 inserted.
600 a1) We single-step T, leaving breakpoints uninserted.
601 a2) We insert breakpoints, and resume all threads.
602
603 In non-stop debugging, however, this strategy is unsuitable: we
604 don't want to have to stop all threads in the system in order to
605 continue or step T past a breakpoint. Instead, we use displaced
606 stepping:
607
608 n0) Initially, T is stopped, other threads are running, and
609 breakpoints are inserted.
610 n1) We copy the instruction "under" the breakpoint to a separate
611 location, outside the main code stream, making any adjustments
612 to the instruction, register, and memory state as directed by
613 T's architecture.
614 n2) We single-step T over the instruction at its new location.
615 n3) We adjust the resulting register and memory state as directed
616 by T's architecture. This includes resetting T's PC to point
617 back into the main instruction stream.
618 n4) We resume T.
619
620 This approach depends on the following gdbarch methods:
621
622 - gdbarch_max_insn_length and gdbarch_displaced_step_location
623 indicate where to copy the instruction, and how much space must
624 be reserved there. We use these in step n1.
625
626 - gdbarch_displaced_step_copy_insn copies a instruction to a new
627 address, and makes any necessary adjustments to the instruction,
628 register contents, and memory. We use this in step n1.
629
630 - gdbarch_displaced_step_fixup adjusts registers and memory after
631 we have successfuly single-stepped the instruction, to yield the
632 same effect the instruction would have had if we had executed it
633 at its original address. We use this in step n3.
634
635 - gdbarch_displaced_step_free_closure provides cleanup.
636
637 The gdbarch_displaced_step_copy_insn and
638 gdbarch_displaced_step_fixup functions must be written so that
639 copying an instruction with gdbarch_displaced_step_copy_insn,
640 single-stepping across the copied instruction, and then applying
641 gdbarch_displaced_insn_fixup should have the same effects on the
642 thread's memory and registers as stepping the instruction in place
643 would have. Exactly which responsibilities fall to the copy and
644 which fall to the fixup is up to the author of those functions.
645
646 See the comments in gdbarch.sh for details.
647
648 Note that displaced stepping and software single-step cannot
649 currently be used in combination, although with some care I think
650 they could be made to. Software single-step works by placing
651 breakpoints on all possible subsequent instructions; if the
652 displaced instruction is a PC-relative jump, those breakpoints
653 could fall in very strange places --- on pages that aren't
654 executable, or at addresses that are not proper instruction
655 boundaries. (We do generally let other threads run while we wait
656 to hit the software single-step breakpoint, and they might
657 encounter such a corrupted instruction.) One way to work around
658 this would be to have gdbarch_displaced_step_copy_insn fully
659 simulate the effect of PC-relative instructions (and return NULL)
660 on architectures that use software single-stepping.
661
662 In non-stop mode, we can have independent and simultaneous step
663 requests, so more than one thread may need to simultaneously step
664 over a breakpoint. The current implementation assumes there is
665 only one scratch space per process. In this case, we have to
666 serialize access to the scratch space. If thread A wants to step
667 over a breakpoint, but we are currently waiting for some other
668 thread to complete a displaced step, we leave thread A stopped and
669 place it in the displaced_step_request_queue. Whenever a displaced
670 step finishes, we pick the next thread in the queue and start a new
671 displaced step operation on it. See displaced_step_prepare and
672 displaced_step_fixup for details. */
673
674 /* If this is not null_ptid, this is the thread carrying out a
675 displaced single-step. This thread's state will require fixing up
676 once it has completed its step. */
677 static ptid_t displaced_step_ptid;
678
679 struct displaced_step_request
680 {
681 ptid_t ptid;
682 struct displaced_step_request *next;
683 };
684
685 /* A queue of pending displaced stepping requests. */
686 struct displaced_step_request *displaced_step_request_queue;
687
688 /* The architecture the thread had when we stepped it. */
689 static struct gdbarch *displaced_step_gdbarch;
690
691 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
692 for post-step cleanup. */
693 static struct displaced_step_closure *displaced_step_closure;
694
695 /* The address of the original instruction, and the copy we made. */
696 static CORE_ADDR displaced_step_original, displaced_step_copy;
697
698 /* Saved contents of copy area. */
699 static gdb_byte *displaced_step_saved_copy;
700
701 /* Enum strings for "set|show displaced-stepping". */
702
703 static const char can_use_displaced_stepping_auto[] = "auto";
704 static const char can_use_displaced_stepping_on[] = "on";
705 static const char can_use_displaced_stepping_off[] = "off";
706 static const char *can_use_displaced_stepping_enum[] =
707 {
708 can_use_displaced_stepping_auto,
709 can_use_displaced_stepping_on,
710 can_use_displaced_stepping_off,
711 NULL,
712 };
713
714 /* If ON, and the architecture supports it, GDB will use displaced
715 stepping to step over breakpoints. If OFF, or if the architecture
716 doesn't support it, GDB will instead use the traditional
717 hold-and-step approach. If AUTO (which is the default), GDB will
718 decide which technique to use to step over breakpoints depending on
719 which of all-stop or non-stop mode is active --- displaced stepping
720 in non-stop mode; hold-and-step in all-stop mode. */
721
722 static const char *can_use_displaced_stepping =
723 can_use_displaced_stepping_auto;
724
725 static void
726 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
727 struct cmd_list_element *c,
728 const char *value)
729 {
730 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
731 fprintf_filtered (file, _("\
732 Debugger's willingness to use displaced stepping to step over \
733 breakpoints is %s (currently %s).\n"),
734 value, non_stop ? "on" : "off");
735 else
736 fprintf_filtered (file, _("\
737 Debugger's willingness to use displaced stepping to step over \
738 breakpoints is %s.\n"), value);
739 }
740
741 /* Return non-zero if displaced stepping can/should be used to step
742 over breakpoints. */
743
744 static int
745 use_displaced_stepping (struct gdbarch *gdbarch)
746 {
747 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
748 && non_stop)
749 || can_use_displaced_stepping == can_use_displaced_stepping_on)
750 && gdbarch_displaced_step_copy_insn_p (gdbarch)
751 && !RECORD_IS_USED);
752 }
753
754 /* Clean out any stray displaced stepping state. */
755 static void
756 displaced_step_clear (void)
757 {
758 /* Indicate that there is no cleanup pending. */
759 displaced_step_ptid = null_ptid;
760
761 if (displaced_step_closure)
762 {
763 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
764 displaced_step_closure);
765 displaced_step_closure = NULL;
766 }
767 }
768
769 static void
770 displaced_step_clear_cleanup (void *ignore)
771 {
772 displaced_step_clear ();
773 }
774
775 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
776 void
777 displaced_step_dump_bytes (struct ui_file *file,
778 const gdb_byte *buf,
779 size_t len)
780 {
781 int i;
782
783 for (i = 0; i < len; i++)
784 fprintf_unfiltered (file, "%02x ", buf[i]);
785 fputs_unfiltered ("\n", file);
786 }
787
788 /* Prepare to single-step, using displaced stepping.
789
790 Note that we cannot use displaced stepping when we have a signal to
791 deliver. If we have a signal to deliver and an instruction to step
792 over, then after the step, there will be no indication from the
793 target whether the thread entered a signal handler or ignored the
794 signal and stepped over the instruction successfully --- both cases
795 result in a simple SIGTRAP. In the first case we mustn't do a
796 fixup, and in the second case we must --- but we can't tell which.
797 Comments in the code for 'random signals' in handle_inferior_event
798 explain how we handle this case instead.
799
800 Returns 1 if preparing was successful -- this thread is going to be
801 stepped now; or 0 if displaced stepping this thread got queued. */
802 static int
803 displaced_step_prepare (ptid_t ptid)
804 {
805 struct cleanup *old_cleanups, *ignore_cleanups;
806 struct regcache *regcache = get_thread_regcache (ptid);
807 struct gdbarch *gdbarch = get_regcache_arch (regcache);
808 CORE_ADDR original, copy;
809 ULONGEST len;
810 struct displaced_step_closure *closure;
811
812 /* We should never reach this function if the architecture does not
813 support displaced stepping. */
814 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
815
816 /* For the first cut, we're displaced stepping one thread at a
817 time. */
818
819 if (!ptid_equal (displaced_step_ptid, null_ptid))
820 {
821 /* Already waiting for a displaced step to finish. Defer this
822 request and place in queue. */
823 struct displaced_step_request *req, *new_req;
824
825 if (debug_displaced)
826 fprintf_unfiltered (gdb_stdlog,
827 "displaced: defering step of %s\n",
828 target_pid_to_str (ptid));
829
830 new_req = xmalloc (sizeof (*new_req));
831 new_req->ptid = ptid;
832 new_req->next = NULL;
833
834 if (displaced_step_request_queue)
835 {
836 for (req = displaced_step_request_queue;
837 req && req->next;
838 req = req->next)
839 ;
840 req->next = new_req;
841 }
842 else
843 displaced_step_request_queue = new_req;
844
845 return 0;
846 }
847 else
848 {
849 if (debug_displaced)
850 fprintf_unfiltered (gdb_stdlog,
851 "displaced: stepping %s now\n",
852 target_pid_to_str (ptid));
853 }
854
855 displaced_step_clear ();
856
857 old_cleanups = save_inferior_ptid ();
858 inferior_ptid = ptid;
859
860 original = regcache_read_pc (regcache);
861
862 copy = gdbarch_displaced_step_location (gdbarch);
863 len = gdbarch_max_insn_length (gdbarch);
864
865 /* Save the original contents of the copy area. */
866 displaced_step_saved_copy = xmalloc (len);
867 ignore_cleanups = make_cleanup (free_current_contents,
868 &displaced_step_saved_copy);
869 read_memory (copy, displaced_step_saved_copy, len);
870 if (debug_displaced)
871 {
872 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
873 paddress (gdbarch, copy));
874 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
875 };
876
877 closure = gdbarch_displaced_step_copy_insn (gdbarch,
878 original, copy, regcache);
879
880 /* We don't support the fully-simulated case at present. */
881 gdb_assert (closure);
882
883 /* Save the information we need to fix things up if the step
884 succeeds. */
885 displaced_step_ptid = ptid;
886 displaced_step_gdbarch = gdbarch;
887 displaced_step_closure = closure;
888 displaced_step_original = original;
889 displaced_step_copy = copy;
890
891 make_cleanup (displaced_step_clear_cleanup, 0);
892
893 /* Resume execution at the copy. */
894 regcache_write_pc (regcache, copy);
895
896 discard_cleanups (ignore_cleanups);
897
898 do_cleanups (old_cleanups);
899
900 if (debug_displaced)
901 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
902 paddress (gdbarch, copy));
903
904 return 1;
905 }
906
907 static void
908 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
909 {
910 struct cleanup *ptid_cleanup = save_inferior_ptid ();
911 inferior_ptid = ptid;
912 write_memory (memaddr, myaddr, len);
913 do_cleanups (ptid_cleanup);
914 }
915
916 static void
917 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
918 {
919 struct cleanup *old_cleanups;
920
921 /* Was this event for the pid we displaced? */
922 if (ptid_equal (displaced_step_ptid, null_ptid)
923 || ! ptid_equal (displaced_step_ptid, event_ptid))
924 return;
925
926 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
927
928 /* Restore the contents of the copy area. */
929 {
930 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
931 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
932 displaced_step_saved_copy, len);
933 if (debug_displaced)
934 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
935 paddress (displaced_step_gdbarch,
936 displaced_step_copy));
937 }
938
939 /* Did the instruction complete successfully? */
940 if (signal == TARGET_SIGNAL_TRAP)
941 {
942 /* Fix up the resulting state. */
943 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
944 displaced_step_closure,
945 displaced_step_original,
946 displaced_step_copy,
947 get_thread_regcache (displaced_step_ptid));
948 }
949 else
950 {
951 /* Since the instruction didn't complete, all we can do is
952 relocate the PC. */
953 struct regcache *regcache = get_thread_regcache (event_ptid);
954 CORE_ADDR pc = regcache_read_pc (regcache);
955 pc = displaced_step_original + (pc - displaced_step_copy);
956 regcache_write_pc (regcache, pc);
957 }
958
959 do_cleanups (old_cleanups);
960
961 displaced_step_ptid = null_ptid;
962
963 /* Are there any pending displaced stepping requests? If so, run
964 one now. */
965 while (displaced_step_request_queue)
966 {
967 struct displaced_step_request *head;
968 ptid_t ptid;
969 struct regcache *regcache;
970 struct gdbarch *gdbarch;
971 CORE_ADDR actual_pc;
972
973 head = displaced_step_request_queue;
974 ptid = head->ptid;
975 displaced_step_request_queue = head->next;
976 xfree (head);
977
978 context_switch (ptid);
979
980 regcache = get_thread_regcache (ptid);
981 actual_pc = regcache_read_pc (regcache);
982
983 if (breakpoint_here_p (actual_pc))
984 {
985 if (debug_displaced)
986 fprintf_unfiltered (gdb_stdlog,
987 "displaced: stepping queued %s now\n",
988 target_pid_to_str (ptid));
989
990 displaced_step_prepare (ptid);
991
992 gdbarch = get_regcache_arch (regcache);
993
994 if (debug_displaced)
995 {
996 CORE_ADDR actual_pc = regcache_read_pc (regcache);
997 gdb_byte buf[4];
998
999 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1000 paddress (gdbarch, actual_pc));
1001 read_memory (actual_pc, buf, sizeof (buf));
1002 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1003 }
1004
1005 if (gdbarch_displaced_step_hw_singlestep
1006 (gdbarch, displaced_step_closure))
1007 target_resume (ptid, 1, TARGET_SIGNAL_0);
1008 else
1009 target_resume (ptid, 0, TARGET_SIGNAL_0);
1010
1011 /* Done, we're stepping a thread. */
1012 break;
1013 }
1014 else
1015 {
1016 int step;
1017 struct thread_info *tp = inferior_thread ();
1018
1019 /* The breakpoint we were sitting under has since been
1020 removed. */
1021 tp->trap_expected = 0;
1022
1023 /* Go back to what we were trying to do. */
1024 step = currently_stepping (tp);
1025
1026 if (debug_displaced)
1027 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1028 target_pid_to_str (tp->ptid), step);
1029
1030 target_resume (ptid, step, TARGET_SIGNAL_0);
1031 tp->stop_signal = TARGET_SIGNAL_0;
1032
1033 /* This request was discarded. See if there's any other
1034 thread waiting for its turn. */
1035 }
1036 }
1037 }
1038
1039 /* Update global variables holding ptids to hold NEW_PTID if they were
1040 holding OLD_PTID. */
1041 static void
1042 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1043 {
1044 struct displaced_step_request *it;
1045
1046 if (ptid_equal (inferior_ptid, old_ptid))
1047 inferior_ptid = new_ptid;
1048
1049 if (ptid_equal (singlestep_ptid, old_ptid))
1050 singlestep_ptid = new_ptid;
1051
1052 if (ptid_equal (displaced_step_ptid, old_ptid))
1053 displaced_step_ptid = new_ptid;
1054
1055 if (ptid_equal (deferred_step_ptid, old_ptid))
1056 deferred_step_ptid = new_ptid;
1057
1058 for (it = displaced_step_request_queue; it; it = it->next)
1059 if (ptid_equal (it->ptid, old_ptid))
1060 it->ptid = new_ptid;
1061 }
1062
1063 \f
1064 /* Resuming. */
1065
1066 /* Things to clean up if we QUIT out of resume (). */
1067 static void
1068 resume_cleanups (void *ignore)
1069 {
1070 normal_stop ();
1071 }
1072
1073 static const char schedlock_off[] = "off";
1074 static const char schedlock_on[] = "on";
1075 static const char schedlock_step[] = "step";
1076 static const char *scheduler_enums[] = {
1077 schedlock_off,
1078 schedlock_on,
1079 schedlock_step,
1080 NULL
1081 };
1082 static const char *scheduler_mode = schedlock_off;
1083 static void
1084 show_scheduler_mode (struct ui_file *file, int from_tty,
1085 struct cmd_list_element *c, const char *value)
1086 {
1087 fprintf_filtered (file, _("\
1088 Mode for locking scheduler during execution is \"%s\".\n"),
1089 value);
1090 }
1091
1092 static void
1093 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1094 {
1095 if (!target_can_lock_scheduler)
1096 {
1097 scheduler_mode = schedlock_off;
1098 error (_("Target '%s' cannot support this command."), target_shortname);
1099 }
1100 }
1101
1102 /* True if execution commands resume all threads of all processes by
1103 default; otherwise, resume only threads of the current inferior
1104 process. */
1105 int sched_multi = 0;
1106
1107 /* Try to setup for software single stepping over the specified location.
1108 Return 1 if target_resume() should use hardware single step.
1109
1110 GDBARCH the current gdbarch.
1111 PC the location to step over. */
1112
1113 static int
1114 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1115 {
1116 int hw_step = 1;
1117
1118 if (gdbarch_software_single_step_p (gdbarch)
1119 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1120 {
1121 hw_step = 0;
1122 /* Do not pull these breakpoints until after a `wait' in
1123 `wait_for_inferior' */
1124 singlestep_breakpoints_inserted_p = 1;
1125 singlestep_ptid = inferior_ptid;
1126 singlestep_pc = pc;
1127 }
1128 return hw_step;
1129 }
1130
1131 /* Resume the inferior, but allow a QUIT. This is useful if the user
1132 wants to interrupt some lengthy single-stepping operation
1133 (for child processes, the SIGINT goes to the inferior, and so
1134 we get a SIGINT random_signal, but for remote debugging and perhaps
1135 other targets, that's not true).
1136
1137 STEP nonzero if we should step (zero to continue instead).
1138 SIG is the signal to give the inferior (zero for none). */
1139 void
1140 resume (int step, enum target_signal sig)
1141 {
1142 int should_resume = 1;
1143 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1144 struct regcache *regcache = get_current_regcache ();
1145 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1146 struct thread_info *tp = inferior_thread ();
1147 CORE_ADDR pc = regcache_read_pc (regcache);
1148
1149 QUIT;
1150
1151 if (debug_infrun)
1152 fprintf_unfiltered (gdb_stdlog,
1153 "infrun: resume (step=%d, signal=%d), "
1154 "trap_expected=%d\n",
1155 step, sig, tp->trap_expected);
1156
1157 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1158 over an instruction that causes a page fault without triggering
1159 a hardware watchpoint. The kernel properly notices that it shouldn't
1160 stop, because the hardware watchpoint is not triggered, but it forgets
1161 the step request and continues the program normally.
1162 Work around the problem by removing hardware watchpoints if a step is
1163 requested, GDB will check for a hardware watchpoint trigger after the
1164 step anyway. */
1165 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1166 remove_hw_watchpoints ();
1167
1168
1169 /* Normally, by the time we reach `resume', the breakpoints are either
1170 removed or inserted, as appropriate. The exception is if we're sitting
1171 at a permanent breakpoint; we need to step over it, but permanent
1172 breakpoints can't be removed. So we have to test for it here. */
1173 if (breakpoint_here_p (pc) == permanent_breakpoint_here)
1174 {
1175 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1176 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1177 else
1178 error (_("\
1179 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1180 how to step past a permanent breakpoint on this architecture. Try using\n\
1181 a command like `return' or `jump' to continue execution."));
1182 }
1183
1184 /* If enabled, step over breakpoints by executing a copy of the
1185 instruction at a different address.
1186
1187 We can't use displaced stepping when we have a signal to deliver;
1188 the comments for displaced_step_prepare explain why. The
1189 comments in the handle_inferior event for dealing with 'random
1190 signals' explain what we do instead. */
1191 if (use_displaced_stepping (gdbarch)
1192 && (tp->trap_expected
1193 || (step && gdbarch_software_single_step_p (gdbarch)))
1194 && sig == TARGET_SIGNAL_0)
1195 {
1196 if (!displaced_step_prepare (inferior_ptid))
1197 {
1198 /* Got placed in displaced stepping queue. Will be resumed
1199 later when all the currently queued displaced stepping
1200 requests finish. The thread is not executing at this point,
1201 and the call to set_executing will be made later. But we
1202 need to call set_running here, since from frontend point of view,
1203 the thread is running. */
1204 set_running (inferior_ptid, 1);
1205 discard_cleanups (old_cleanups);
1206 return;
1207 }
1208
1209 step = gdbarch_displaced_step_hw_singlestep
1210 (gdbarch, displaced_step_closure);
1211 }
1212
1213 /* Do we need to do it the hard way, w/temp breakpoints? */
1214 else if (step)
1215 step = maybe_software_singlestep (gdbarch, pc);
1216
1217 if (should_resume)
1218 {
1219 ptid_t resume_ptid;
1220
1221 /* If STEP is set, it's a request to use hardware stepping
1222 facilities. But in that case, we should never
1223 use singlestep breakpoint. */
1224 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1225
1226 /* Decide the set of threads to ask the target to resume. Start
1227 by assuming everything will be resumed, than narrow the set
1228 by applying increasingly restricting conditions. */
1229
1230 /* By default, resume all threads of all processes. */
1231 resume_ptid = RESUME_ALL;
1232
1233 /* Maybe resume only all threads of the current process. */
1234 if (!sched_multi && target_supports_multi_process ())
1235 {
1236 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1237 }
1238
1239 /* Maybe resume a single thread after all. */
1240 if (singlestep_breakpoints_inserted_p
1241 && stepping_past_singlestep_breakpoint)
1242 {
1243 /* The situation here is as follows. In thread T1 we wanted to
1244 single-step. Lacking hardware single-stepping we've
1245 set breakpoint at the PC of the next instruction -- call it
1246 P. After resuming, we've hit that breakpoint in thread T2.
1247 Now we've removed original breakpoint, inserted breakpoint
1248 at P+1, and try to step to advance T2 past breakpoint.
1249 We need to step only T2, as if T1 is allowed to freely run,
1250 it can run past P, and if other threads are allowed to run,
1251 they can hit breakpoint at P+1, and nested hits of single-step
1252 breakpoints is not something we'd want -- that's complicated
1253 to support, and has no value. */
1254 resume_ptid = inferior_ptid;
1255 }
1256 else if ((step || singlestep_breakpoints_inserted_p)
1257 && tp->trap_expected)
1258 {
1259 /* We're allowing a thread to run past a breakpoint it has
1260 hit, by single-stepping the thread with the breakpoint
1261 removed. In which case, we need to single-step only this
1262 thread, and keep others stopped, as they can miss this
1263 breakpoint if allowed to run.
1264
1265 The current code actually removes all breakpoints when
1266 doing this, not just the one being stepped over, so if we
1267 let other threads run, we can actually miss any
1268 breakpoint, not just the one at PC. */
1269 resume_ptid = inferior_ptid;
1270 }
1271 else if (non_stop)
1272 {
1273 /* With non-stop mode on, threads are always handled
1274 individually. */
1275 resume_ptid = inferior_ptid;
1276 }
1277 else if ((scheduler_mode == schedlock_on)
1278 || (scheduler_mode == schedlock_step
1279 && (step || singlestep_breakpoints_inserted_p)))
1280 {
1281 /* User-settable 'scheduler' mode requires solo thread resume. */
1282 resume_ptid = inferior_ptid;
1283 }
1284
1285 if (gdbarch_cannot_step_breakpoint (gdbarch))
1286 {
1287 /* Most targets can step a breakpoint instruction, thus
1288 executing it normally. But if this one cannot, just
1289 continue and we will hit it anyway. */
1290 if (step && breakpoint_inserted_here_p (pc))
1291 step = 0;
1292 }
1293
1294 if (debug_displaced
1295 && use_displaced_stepping (gdbarch)
1296 && tp->trap_expected)
1297 {
1298 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1299 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1300 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1301 gdb_byte buf[4];
1302
1303 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1304 paddress (resume_gdbarch, actual_pc));
1305 read_memory (actual_pc, buf, sizeof (buf));
1306 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1307 }
1308
1309 /* Install inferior's terminal modes. */
1310 target_terminal_inferior ();
1311
1312 /* Avoid confusing the next resume, if the next stop/resume
1313 happens to apply to another thread. */
1314 tp->stop_signal = TARGET_SIGNAL_0;
1315
1316 target_resume (resume_ptid, step, sig);
1317 }
1318
1319 discard_cleanups (old_cleanups);
1320 }
1321 \f
1322 /* Proceeding. */
1323
1324 /* Clear out all variables saying what to do when inferior is continued.
1325 First do this, then set the ones you want, then call `proceed'. */
1326
1327 static void
1328 clear_proceed_status_thread (struct thread_info *tp)
1329 {
1330 if (debug_infrun)
1331 fprintf_unfiltered (gdb_stdlog,
1332 "infrun: clear_proceed_status_thread (%s)\n",
1333 target_pid_to_str (tp->ptid));
1334
1335 tp->trap_expected = 0;
1336 tp->step_range_start = 0;
1337 tp->step_range_end = 0;
1338 tp->step_frame_id = null_frame_id;
1339 tp->step_stack_frame_id = null_frame_id;
1340 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1341 tp->stop_requested = 0;
1342
1343 tp->stop_step = 0;
1344
1345 tp->proceed_to_finish = 0;
1346
1347 /* Discard any remaining commands or status from previous stop. */
1348 bpstat_clear (&tp->stop_bpstat);
1349 }
1350
1351 static int
1352 clear_proceed_status_callback (struct thread_info *tp, void *data)
1353 {
1354 if (is_exited (tp->ptid))
1355 return 0;
1356
1357 clear_proceed_status_thread (tp);
1358 return 0;
1359 }
1360
1361 void
1362 clear_proceed_status (void)
1363 {
1364 if (!ptid_equal (inferior_ptid, null_ptid))
1365 {
1366 struct inferior *inferior;
1367
1368 if (non_stop)
1369 {
1370 /* If in non-stop mode, only delete the per-thread status
1371 of the current thread. */
1372 clear_proceed_status_thread (inferior_thread ());
1373 }
1374 else
1375 {
1376 /* In all-stop mode, delete the per-thread status of
1377 *all* threads. */
1378 iterate_over_threads (clear_proceed_status_callback, NULL);
1379 }
1380
1381 inferior = current_inferior ();
1382 inferior->stop_soon = NO_STOP_QUIETLY;
1383 }
1384
1385 stop_after_trap = 0;
1386
1387 observer_notify_about_to_proceed ();
1388
1389 if (stop_registers)
1390 {
1391 regcache_xfree (stop_registers);
1392 stop_registers = NULL;
1393 }
1394 }
1395
1396 /* Check the current thread against the thread that reported the most recent
1397 event. If a step-over is required return TRUE and set the current thread
1398 to the old thread. Otherwise return FALSE.
1399
1400 This should be suitable for any targets that support threads. */
1401
1402 static int
1403 prepare_to_proceed (int step)
1404 {
1405 ptid_t wait_ptid;
1406 struct target_waitstatus wait_status;
1407 int schedlock_enabled;
1408
1409 /* With non-stop mode on, threads are always handled individually. */
1410 gdb_assert (! non_stop);
1411
1412 /* Get the last target status returned by target_wait(). */
1413 get_last_target_status (&wait_ptid, &wait_status);
1414
1415 /* Make sure we were stopped at a breakpoint. */
1416 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1417 || wait_status.value.sig != TARGET_SIGNAL_TRAP)
1418 {
1419 return 0;
1420 }
1421
1422 schedlock_enabled = (scheduler_mode == schedlock_on
1423 || (scheduler_mode == schedlock_step
1424 && step));
1425
1426 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1427 if (schedlock_enabled)
1428 return 0;
1429
1430 /* Don't switch over if we're about to resume some other process
1431 other than WAIT_PTID's, and schedule-multiple is off. */
1432 if (!sched_multi
1433 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1434 return 0;
1435
1436 /* Switched over from WAIT_PID. */
1437 if (!ptid_equal (wait_ptid, minus_one_ptid)
1438 && !ptid_equal (inferior_ptid, wait_ptid))
1439 {
1440 struct regcache *regcache = get_thread_regcache (wait_ptid);
1441
1442 if (breakpoint_here_p (regcache_read_pc (regcache)))
1443 {
1444 /* If stepping, remember current thread to switch back to. */
1445 if (step)
1446 deferred_step_ptid = inferior_ptid;
1447
1448 /* Switch back to WAIT_PID thread. */
1449 switch_to_thread (wait_ptid);
1450
1451 /* We return 1 to indicate that there is a breakpoint here,
1452 so we need to step over it before continuing to avoid
1453 hitting it straight away. */
1454 return 1;
1455 }
1456 }
1457
1458 return 0;
1459 }
1460
1461 /* Basic routine for continuing the program in various fashions.
1462
1463 ADDR is the address to resume at, or -1 for resume where stopped.
1464 SIGGNAL is the signal to give it, or 0 for none,
1465 or -1 for act according to how it stopped.
1466 STEP is nonzero if should trap after one instruction.
1467 -1 means return after that and print nothing.
1468 You should probably set various step_... variables
1469 before calling here, if you are stepping.
1470
1471 You should call clear_proceed_status before calling proceed. */
1472
1473 void
1474 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1475 {
1476 struct regcache *regcache;
1477 struct gdbarch *gdbarch;
1478 struct thread_info *tp;
1479 CORE_ADDR pc;
1480 int oneproc = 0;
1481
1482 /* If we're stopped at a fork/vfork, follow the branch set by the
1483 "set follow-fork-mode" command; otherwise, we'll just proceed
1484 resuming the current thread. */
1485 if (!follow_fork ())
1486 {
1487 /* The target for some reason decided not to resume. */
1488 normal_stop ();
1489 return;
1490 }
1491
1492 regcache = get_current_regcache ();
1493 gdbarch = get_regcache_arch (regcache);
1494 pc = regcache_read_pc (regcache);
1495
1496 if (step > 0)
1497 step_start_function = find_pc_function (pc);
1498 if (step < 0)
1499 stop_after_trap = 1;
1500
1501 if (addr == (CORE_ADDR) -1)
1502 {
1503 if (pc == stop_pc && breakpoint_here_p (pc)
1504 && execution_direction != EXEC_REVERSE)
1505 /* There is a breakpoint at the address we will resume at,
1506 step one instruction before inserting breakpoints so that
1507 we do not stop right away (and report a second hit at this
1508 breakpoint).
1509
1510 Note, we don't do this in reverse, because we won't
1511 actually be executing the breakpoint insn anyway.
1512 We'll be (un-)executing the previous instruction. */
1513
1514 oneproc = 1;
1515 else if (gdbarch_single_step_through_delay_p (gdbarch)
1516 && gdbarch_single_step_through_delay (gdbarch,
1517 get_current_frame ()))
1518 /* We stepped onto an instruction that needs to be stepped
1519 again before re-inserting the breakpoint, do so. */
1520 oneproc = 1;
1521 }
1522 else
1523 {
1524 regcache_write_pc (regcache, addr);
1525 }
1526
1527 if (debug_infrun)
1528 fprintf_unfiltered (gdb_stdlog,
1529 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1530 paddress (gdbarch, addr), siggnal, step);
1531
1532 if (non_stop)
1533 /* In non-stop, each thread is handled individually. The context
1534 must already be set to the right thread here. */
1535 ;
1536 else
1537 {
1538 /* In a multi-threaded task we may select another thread and
1539 then continue or step.
1540
1541 But if the old thread was stopped at a breakpoint, it will
1542 immediately cause another breakpoint stop without any
1543 execution (i.e. it will report a breakpoint hit incorrectly).
1544 So we must step over it first.
1545
1546 prepare_to_proceed checks the current thread against the
1547 thread that reported the most recent event. If a step-over
1548 is required it returns TRUE and sets the current thread to
1549 the old thread. */
1550 if (prepare_to_proceed (step))
1551 oneproc = 1;
1552 }
1553
1554 /* prepare_to_proceed may change the current thread. */
1555 tp = inferior_thread ();
1556
1557 if (oneproc)
1558 {
1559 tp->trap_expected = 1;
1560 /* If displaced stepping is enabled, we can step over the
1561 breakpoint without hitting it, so leave all breakpoints
1562 inserted. Otherwise we need to disable all breakpoints, step
1563 one instruction, and then re-add them when that step is
1564 finished. */
1565 if (!use_displaced_stepping (gdbarch))
1566 remove_breakpoints ();
1567 }
1568
1569 /* We can insert breakpoints if we're not trying to step over one,
1570 or if we are stepping over one but we're using displaced stepping
1571 to do so. */
1572 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1573 insert_breakpoints ();
1574
1575 if (!non_stop)
1576 {
1577 /* Pass the last stop signal to the thread we're resuming,
1578 irrespective of whether the current thread is the thread that
1579 got the last event or not. This was historically GDB's
1580 behaviour before keeping a stop_signal per thread. */
1581
1582 struct thread_info *last_thread;
1583 ptid_t last_ptid;
1584 struct target_waitstatus last_status;
1585
1586 get_last_target_status (&last_ptid, &last_status);
1587 if (!ptid_equal (inferior_ptid, last_ptid)
1588 && !ptid_equal (last_ptid, null_ptid)
1589 && !ptid_equal (last_ptid, minus_one_ptid))
1590 {
1591 last_thread = find_thread_ptid (last_ptid);
1592 if (last_thread)
1593 {
1594 tp->stop_signal = last_thread->stop_signal;
1595 last_thread->stop_signal = TARGET_SIGNAL_0;
1596 }
1597 }
1598 }
1599
1600 if (siggnal != TARGET_SIGNAL_DEFAULT)
1601 tp->stop_signal = siggnal;
1602 /* If this signal should not be seen by program,
1603 give it zero. Used for debugging signals. */
1604 else if (!signal_program[tp->stop_signal])
1605 tp->stop_signal = TARGET_SIGNAL_0;
1606
1607 annotate_starting ();
1608
1609 /* Make sure that output from GDB appears before output from the
1610 inferior. */
1611 gdb_flush (gdb_stdout);
1612
1613 /* Refresh prev_pc value just prior to resuming. This used to be
1614 done in stop_stepping, however, setting prev_pc there did not handle
1615 scenarios such as inferior function calls or returning from
1616 a function via the return command. In those cases, the prev_pc
1617 value was not set properly for subsequent commands. The prev_pc value
1618 is used to initialize the starting line number in the ecs. With an
1619 invalid value, the gdb next command ends up stopping at the position
1620 represented by the next line table entry past our start position.
1621 On platforms that generate one line table entry per line, this
1622 is not a problem. However, on the ia64, the compiler generates
1623 extraneous line table entries that do not increase the line number.
1624 When we issue the gdb next command on the ia64 after an inferior call
1625 or a return command, we often end up a few instructions forward, still
1626 within the original line we started.
1627
1628 An attempt was made to have init_execution_control_state () refresh
1629 the prev_pc value before calculating the line number. This approach
1630 did not work because on platforms that use ptrace, the pc register
1631 cannot be read unless the inferior is stopped. At that point, we
1632 are not guaranteed the inferior is stopped and so the regcache_read_pc ()
1633 call can fail. Setting the prev_pc value here ensures the value is
1634 updated correctly when the inferior is stopped. */
1635 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1636
1637 /* Fill in with reasonable starting values. */
1638 init_thread_stepping_state (tp);
1639
1640 /* Reset to normal state. */
1641 init_infwait_state ();
1642
1643 /* Resume inferior. */
1644 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1645
1646 /* Wait for it to stop (if not standalone)
1647 and in any case decode why it stopped, and act accordingly. */
1648 /* Do this only if we are not using the event loop, or if the target
1649 does not support asynchronous execution. */
1650 if (!target_can_async_p ())
1651 {
1652 wait_for_inferior (0);
1653 normal_stop ();
1654 }
1655 }
1656 \f
1657
1658 /* Start remote-debugging of a machine over a serial link. */
1659
1660 void
1661 start_remote (int from_tty)
1662 {
1663 struct inferior *inferior;
1664 init_wait_for_inferior ();
1665
1666 inferior = current_inferior ();
1667 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1668
1669 /* Always go on waiting for the target, regardless of the mode. */
1670 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1671 indicate to wait_for_inferior that a target should timeout if
1672 nothing is returned (instead of just blocking). Because of this,
1673 targets expecting an immediate response need to, internally, set
1674 things up so that the target_wait() is forced to eventually
1675 timeout. */
1676 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1677 differentiate to its caller what the state of the target is after
1678 the initial open has been performed. Here we're assuming that
1679 the target has stopped. It should be possible to eventually have
1680 target_open() return to the caller an indication that the target
1681 is currently running and GDB state should be set to the same as
1682 for an async run. */
1683 wait_for_inferior (0);
1684
1685 /* Now that the inferior has stopped, do any bookkeeping like
1686 loading shared libraries. We want to do this before normal_stop,
1687 so that the displayed frame is up to date. */
1688 post_create_inferior (&current_target, from_tty);
1689
1690 normal_stop ();
1691 }
1692
1693 /* Initialize static vars when a new inferior begins. */
1694
1695 void
1696 init_wait_for_inferior (void)
1697 {
1698 /* These are meaningless until the first time through wait_for_inferior. */
1699
1700 breakpoint_init_inferior (inf_starting);
1701
1702 clear_proceed_status ();
1703
1704 stepping_past_singlestep_breakpoint = 0;
1705 deferred_step_ptid = null_ptid;
1706
1707 target_last_wait_ptid = minus_one_ptid;
1708
1709 previous_inferior_ptid = null_ptid;
1710 init_infwait_state ();
1711
1712 displaced_step_clear ();
1713
1714 /* Discard any skipped inlined frames. */
1715 clear_inline_frame_state (minus_one_ptid);
1716 }
1717
1718 \f
1719 /* This enum encodes possible reasons for doing a target_wait, so that
1720 wfi can call target_wait in one place. (Ultimately the call will be
1721 moved out of the infinite loop entirely.) */
1722
1723 enum infwait_states
1724 {
1725 infwait_normal_state,
1726 infwait_thread_hop_state,
1727 infwait_step_watch_state,
1728 infwait_nonstep_watch_state
1729 };
1730
1731 /* Why did the inferior stop? Used to print the appropriate messages
1732 to the interface from within handle_inferior_event(). */
1733 enum inferior_stop_reason
1734 {
1735 /* Step, next, nexti, stepi finished. */
1736 END_STEPPING_RANGE,
1737 /* Inferior terminated by signal. */
1738 SIGNAL_EXITED,
1739 /* Inferior exited. */
1740 EXITED,
1741 /* Inferior received signal, and user asked to be notified. */
1742 SIGNAL_RECEIVED,
1743 /* Reverse execution -- target ran out of history info. */
1744 NO_HISTORY
1745 };
1746
1747 /* The PTID we'll do a target_wait on.*/
1748 ptid_t waiton_ptid;
1749
1750 /* Current inferior wait state. */
1751 enum infwait_states infwait_state;
1752
1753 /* Data to be passed around while handling an event. This data is
1754 discarded between events. */
1755 struct execution_control_state
1756 {
1757 ptid_t ptid;
1758 /* The thread that got the event, if this was a thread event; NULL
1759 otherwise. */
1760 struct thread_info *event_thread;
1761
1762 struct target_waitstatus ws;
1763 int random_signal;
1764 CORE_ADDR stop_func_start;
1765 CORE_ADDR stop_func_end;
1766 char *stop_func_name;
1767 int new_thread_event;
1768 int wait_some_more;
1769 };
1770
1771 static void init_execution_control_state (struct execution_control_state *ecs);
1772
1773 static void handle_inferior_event (struct execution_control_state *ecs);
1774
1775 static void handle_step_into_function (struct gdbarch *gdbarch,
1776 struct execution_control_state *ecs);
1777 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
1778 struct execution_control_state *ecs);
1779 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
1780 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
1781 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
1782 struct symtab_and_line sr_sal,
1783 struct frame_id sr_id);
1784 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
1785
1786 static void stop_stepping (struct execution_control_state *ecs);
1787 static void prepare_to_wait (struct execution_control_state *ecs);
1788 static void keep_going (struct execution_control_state *ecs);
1789 static void print_stop_reason (enum inferior_stop_reason stop_reason,
1790 int stop_info);
1791
1792 /* Callback for iterate over threads. If the thread is stopped, but
1793 the user/frontend doesn't know about that yet, go through
1794 normal_stop, as if the thread had just stopped now. ARG points at
1795 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
1796 ptid_is_pid(PTID) is true, applies to all threads of the process
1797 pointed at by PTID. Otherwise, apply only to the thread pointed by
1798 PTID. */
1799
1800 static int
1801 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
1802 {
1803 ptid_t ptid = * (ptid_t *) arg;
1804
1805 if ((ptid_equal (info->ptid, ptid)
1806 || ptid_equal (minus_one_ptid, ptid)
1807 || (ptid_is_pid (ptid)
1808 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
1809 && is_running (info->ptid)
1810 && !is_executing (info->ptid))
1811 {
1812 struct cleanup *old_chain;
1813 struct execution_control_state ecss;
1814 struct execution_control_state *ecs = &ecss;
1815
1816 memset (ecs, 0, sizeof (*ecs));
1817
1818 old_chain = make_cleanup_restore_current_thread ();
1819
1820 switch_to_thread (info->ptid);
1821
1822 /* Go through handle_inferior_event/normal_stop, so we always
1823 have consistent output as if the stop event had been
1824 reported. */
1825 ecs->ptid = info->ptid;
1826 ecs->event_thread = find_thread_ptid (info->ptid);
1827 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
1828 ecs->ws.value.sig = TARGET_SIGNAL_0;
1829
1830 handle_inferior_event (ecs);
1831
1832 if (!ecs->wait_some_more)
1833 {
1834 struct thread_info *tp;
1835
1836 normal_stop ();
1837
1838 /* Finish off the continuations. The continations
1839 themselves are responsible for realising the thread
1840 didn't finish what it was supposed to do. */
1841 tp = inferior_thread ();
1842 do_all_intermediate_continuations_thread (tp);
1843 do_all_continuations_thread (tp);
1844 }
1845
1846 do_cleanups (old_chain);
1847 }
1848
1849 return 0;
1850 }
1851
1852 /* This function is attached as a "thread_stop_requested" observer.
1853 Cleanup local state that assumed the PTID was to be resumed, and
1854 report the stop to the frontend. */
1855
1856 static void
1857 infrun_thread_stop_requested (ptid_t ptid)
1858 {
1859 struct displaced_step_request *it, *next, *prev = NULL;
1860
1861 /* PTID was requested to stop. Remove it from the displaced
1862 stepping queue, so we don't try to resume it automatically. */
1863 for (it = displaced_step_request_queue; it; it = next)
1864 {
1865 next = it->next;
1866
1867 if (ptid_equal (it->ptid, ptid)
1868 || ptid_equal (minus_one_ptid, ptid)
1869 || (ptid_is_pid (ptid)
1870 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
1871 {
1872 if (displaced_step_request_queue == it)
1873 displaced_step_request_queue = it->next;
1874 else
1875 prev->next = it->next;
1876
1877 xfree (it);
1878 }
1879 else
1880 prev = it;
1881 }
1882
1883 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
1884 }
1885
1886 static void
1887 infrun_thread_thread_exit (struct thread_info *tp, int silent)
1888 {
1889 if (ptid_equal (target_last_wait_ptid, tp->ptid))
1890 nullify_last_target_wait_ptid ();
1891 }
1892
1893 /* Callback for iterate_over_threads. */
1894
1895 static int
1896 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
1897 {
1898 if (is_exited (info->ptid))
1899 return 0;
1900
1901 delete_step_resume_breakpoint (info);
1902 return 0;
1903 }
1904
1905 /* In all-stop, delete the step resume breakpoint of any thread that
1906 had one. In non-stop, delete the step resume breakpoint of the
1907 thread that just stopped. */
1908
1909 static void
1910 delete_step_thread_step_resume_breakpoint (void)
1911 {
1912 if (!target_has_execution
1913 || ptid_equal (inferior_ptid, null_ptid))
1914 /* If the inferior has exited, we have already deleted the step
1915 resume breakpoints out of GDB's lists. */
1916 return;
1917
1918 if (non_stop)
1919 {
1920 /* If in non-stop mode, only delete the step-resume or
1921 longjmp-resume breakpoint of the thread that just stopped
1922 stepping. */
1923 struct thread_info *tp = inferior_thread ();
1924 delete_step_resume_breakpoint (tp);
1925 }
1926 else
1927 /* In all-stop mode, delete all step-resume and longjmp-resume
1928 breakpoints of any thread that had them. */
1929 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
1930 }
1931
1932 /* A cleanup wrapper. */
1933
1934 static void
1935 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
1936 {
1937 delete_step_thread_step_resume_breakpoint ();
1938 }
1939
1940 /* Pretty print the results of target_wait, for debugging purposes. */
1941
1942 static void
1943 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
1944 const struct target_waitstatus *ws)
1945 {
1946 char *status_string = target_waitstatus_to_string (ws);
1947 struct ui_file *tmp_stream = mem_fileopen ();
1948 char *text;
1949
1950 /* The text is split over several lines because it was getting too long.
1951 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
1952 output as a unit; we want only one timestamp printed if debug_timestamp
1953 is set. */
1954
1955 fprintf_unfiltered (tmp_stream,
1956 "infrun: target_wait (%d", PIDGET (waiton_ptid));
1957 if (PIDGET (waiton_ptid) != -1)
1958 fprintf_unfiltered (tmp_stream,
1959 " [%s]", target_pid_to_str (waiton_ptid));
1960 fprintf_unfiltered (tmp_stream, ", status) =\n");
1961 fprintf_unfiltered (tmp_stream,
1962 "infrun: %d [%s],\n",
1963 PIDGET (result_ptid), target_pid_to_str (result_ptid));
1964 fprintf_unfiltered (tmp_stream,
1965 "infrun: %s\n",
1966 status_string);
1967
1968 text = ui_file_xstrdup (tmp_stream, NULL);
1969
1970 /* This uses %s in part to handle %'s in the text, but also to avoid
1971 a gcc error: the format attribute requires a string literal. */
1972 fprintf_unfiltered (gdb_stdlog, "%s", text);
1973
1974 xfree (status_string);
1975 xfree (text);
1976 ui_file_delete (tmp_stream);
1977 }
1978
1979 /* Wait for control to return from inferior to debugger.
1980
1981 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
1982 as if they were SIGTRAP signals. This can be useful during
1983 the startup sequence on some targets such as HP/UX, where
1984 we receive an EXEC event instead of the expected SIGTRAP.
1985
1986 If inferior gets a signal, we may decide to start it up again
1987 instead of returning. That is why there is a loop in this function.
1988 When this function actually returns it means the inferior
1989 should be left stopped and GDB should read more commands. */
1990
1991 void
1992 wait_for_inferior (int treat_exec_as_sigtrap)
1993 {
1994 struct cleanup *old_cleanups;
1995 struct execution_control_state ecss;
1996 struct execution_control_state *ecs;
1997
1998 if (debug_infrun)
1999 fprintf_unfiltered
2000 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2001 treat_exec_as_sigtrap);
2002
2003 old_cleanups =
2004 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2005
2006 ecs = &ecss;
2007 memset (ecs, 0, sizeof (*ecs));
2008
2009 /* We'll update this if & when we switch to a new thread. */
2010 previous_inferior_ptid = inferior_ptid;
2011
2012 while (1)
2013 {
2014 struct cleanup *old_chain;
2015
2016 /* We have to invalidate the registers BEFORE calling target_wait
2017 because they can be loaded from the target while in target_wait.
2018 This makes remote debugging a bit more efficient for those
2019 targets that provide critical registers as part of their normal
2020 status mechanism. */
2021
2022 overlay_cache_invalid = 1;
2023 registers_changed ();
2024
2025 if (deprecated_target_wait_hook)
2026 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2027 else
2028 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2029
2030 if (debug_infrun)
2031 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2032
2033 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2034 {
2035 xfree (ecs->ws.value.execd_pathname);
2036 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2037 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2038 }
2039
2040 /* If an error happens while handling the event, propagate GDB's
2041 knowledge of the executing state to the frontend/user running
2042 state. */
2043 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2044
2045 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2046 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2047 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2048
2049 /* Now figure out what to do with the result of the result. */
2050 handle_inferior_event (ecs);
2051
2052 /* No error, don't finish the state yet. */
2053 discard_cleanups (old_chain);
2054
2055 if (!ecs->wait_some_more)
2056 break;
2057 }
2058
2059 do_cleanups (old_cleanups);
2060 }
2061
2062 /* Asynchronous version of wait_for_inferior. It is called by the
2063 event loop whenever a change of state is detected on the file
2064 descriptor corresponding to the target. It can be called more than
2065 once to complete a single execution command. In such cases we need
2066 to keep the state in a global variable ECSS. If it is the last time
2067 that this function is called for a single execution command, then
2068 report to the user that the inferior has stopped, and do the
2069 necessary cleanups. */
2070
2071 void
2072 fetch_inferior_event (void *client_data)
2073 {
2074 struct execution_control_state ecss;
2075 struct execution_control_state *ecs = &ecss;
2076 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2077 struct cleanup *ts_old_chain;
2078 int was_sync = sync_execution;
2079
2080 memset (ecs, 0, sizeof (*ecs));
2081
2082 /* We'll update this if & when we switch to a new thread. */
2083 previous_inferior_ptid = inferior_ptid;
2084
2085 if (non_stop)
2086 /* In non-stop mode, the user/frontend should not notice a thread
2087 switch due to internal events. Make sure we reverse to the
2088 user selected thread and frame after handling the event and
2089 running any breakpoint commands. */
2090 make_cleanup_restore_current_thread ();
2091
2092 /* We have to invalidate the registers BEFORE calling target_wait
2093 because they can be loaded from the target while in target_wait.
2094 This makes remote debugging a bit more efficient for those
2095 targets that provide critical registers as part of their normal
2096 status mechanism. */
2097
2098 overlay_cache_invalid = 1;
2099 registers_changed ();
2100
2101 if (deprecated_target_wait_hook)
2102 ecs->ptid =
2103 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2104 else
2105 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2106
2107 if (debug_infrun)
2108 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2109
2110 if (non_stop
2111 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2112 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2113 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2114 /* In non-stop mode, each thread is handled individually. Switch
2115 early, so the global state is set correctly for this
2116 thread. */
2117 context_switch (ecs->ptid);
2118
2119 /* If an error happens while handling the event, propagate GDB's
2120 knowledge of the executing state to the frontend/user running
2121 state. */
2122 if (!non_stop)
2123 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2124 else
2125 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2126
2127 /* Now figure out what to do with the result of the result. */
2128 handle_inferior_event (ecs);
2129
2130 if (!ecs->wait_some_more)
2131 {
2132 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2133
2134 delete_step_thread_step_resume_breakpoint ();
2135
2136 /* We may not find an inferior if this was a process exit. */
2137 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2138 normal_stop ();
2139
2140 if (target_has_execution
2141 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2142 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2143 && ecs->event_thread->step_multi
2144 && ecs->event_thread->stop_step)
2145 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2146 else
2147 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2148 }
2149
2150 /* No error, don't finish the thread states yet. */
2151 discard_cleanups (ts_old_chain);
2152
2153 /* Revert thread and frame. */
2154 do_cleanups (old_chain);
2155
2156 /* If the inferior was in sync execution mode, and now isn't,
2157 restore the prompt. */
2158 if (was_sync && !sync_execution)
2159 display_gdb_prompt (0);
2160 }
2161
2162 /* Record the frame and location we're currently stepping through. */
2163 void
2164 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2165 {
2166 struct thread_info *tp = inferior_thread ();
2167
2168 tp->step_frame_id = get_frame_id (frame);
2169 tp->step_stack_frame_id = get_stack_frame_id (frame);
2170
2171 tp->current_symtab = sal.symtab;
2172 tp->current_line = sal.line;
2173 }
2174
2175 /* Prepare an execution control state for looping through a
2176 wait_for_inferior-type loop. */
2177
2178 static void
2179 init_execution_control_state (struct execution_control_state *ecs)
2180 {
2181 ecs->random_signal = 0;
2182 }
2183
2184 /* Clear context switchable stepping state. */
2185
2186 void
2187 init_thread_stepping_state (struct thread_info *tss)
2188 {
2189 tss->stepping_over_breakpoint = 0;
2190 tss->step_after_step_resume_breakpoint = 0;
2191 tss->stepping_through_solib_after_catch = 0;
2192 tss->stepping_through_solib_catchpoints = NULL;
2193 }
2194
2195 /* Return the cached copy of the last pid/waitstatus returned by
2196 target_wait()/deprecated_target_wait_hook(). The data is actually
2197 cached by handle_inferior_event(), which gets called immediately
2198 after target_wait()/deprecated_target_wait_hook(). */
2199
2200 void
2201 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2202 {
2203 *ptidp = target_last_wait_ptid;
2204 *status = target_last_waitstatus;
2205 }
2206
2207 void
2208 nullify_last_target_wait_ptid (void)
2209 {
2210 target_last_wait_ptid = minus_one_ptid;
2211 }
2212
2213 /* Switch thread contexts. */
2214
2215 static void
2216 context_switch (ptid_t ptid)
2217 {
2218 if (debug_infrun)
2219 {
2220 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2221 target_pid_to_str (inferior_ptid));
2222 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2223 target_pid_to_str (ptid));
2224 }
2225
2226 switch_to_thread (ptid);
2227 }
2228
2229 static void
2230 adjust_pc_after_break (struct execution_control_state *ecs)
2231 {
2232 struct regcache *regcache;
2233 struct gdbarch *gdbarch;
2234 CORE_ADDR breakpoint_pc;
2235
2236 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2237 we aren't, just return.
2238
2239 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2240 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2241 implemented by software breakpoints should be handled through the normal
2242 breakpoint layer.
2243
2244 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2245 different signals (SIGILL or SIGEMT for instance), but it is less
2246 clear where the PC is pointing afterwards. It may not match
2247 gdbarch_decr_pc_after_break. I don't know any specific target that
2248 generates these signals at breakpoints (the code has been in GDB since at
2249 least 1992) so I can not guess how to handle them here.
2250
2251 In earlier versions of GDB, a target with
2252 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2253 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2254 target with both of these set in GDB history, and it seems unlikely to be
2255 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2256
2257 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2258 return;
2259
2260 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2261 return;
2262
2263 /* In reverse execution, when a breakpoint is hit, the instruction
2264 under it has already been de-executed. The reported PC always
2265 points at the breakpoint address, so adjusting it further would
2266 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2267 architecture:
2268
2269 B1 0x08000000 : INSN1
2270 B2 0x08000001 : INSN2
2271 0x08000002 : INSN3
2272 PC -> 0x08000003 : INSN4
2273
2274 Say you're stopped at 0x08000003 as above. Reverse continuing
2275 from that point should hit B2 as below. Reading the PC when the
2276 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2277 been de-executed already.
2278
2279 B1 0x08000000 : INSN1
2280 B2 PC -> 0x08000001 : INSN2
2281 0x08000002 : INSN3
2282 0x08000003 : INSN4
2283
2284 We can't apply the same logic as for forward execution, because
2285 we would wrongly adjust the PC to 0x08000000, since there's a
2286 breakpoint at PC - 1. We'd then report a hit on B1, although
2287 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2288 behaviour. */
2289 if (execution_direction == EXEC_REVERSE)
2290 return;
2291
2292 /* If this target does not decrement the PC after breakpoints, then
2293 we have nothing to do. */
2294 regcache = get_thread_regcache (ecs->ptid);
2295 gdbarch = get_regcache_arch (regcache);
2296 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2297 return;
2298
2299 /* Find the location where (if we've hit a breakpoint) the
2300 breakpoint would be. */
2301 breakpoint_pc = regcache_read_pc (regcache)
2302 - gdbarch_decr_pc_after_break (gdbarch);
2303
2304 /* Check whether there actually is a software breakpoint inserted at
2305 that location.
2306
2307 If in non-stop mode, a race condition is possible where we've
2308 removed a breakpoint, but stop events for that breakpoint were
2309 already queued and arrive later. To suppress those spurious
2310 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2311 and retire them after a number of stop events are reported. */
2312 if (software_breakpoint_inserted_here_p (breakpoint_pc)
2313 || (non_stop && moribund_breakpoint_here_p (breakpoint_pc)))
2314 {
2315 struct cleanup *old_cleanups = NULL;
2316 if (RECORD_IS_USED)
2317 old_cleanups = record_gdb_operation_disable_set ();
2318
2319 /* When using hardware single-step, a SIGTRAP is reported for both
2320 a completed single-step and a software breakpoint. Need to
2321 differentiate between the two, as the latter needs adjusting
2322 but the former does not.
2323
2324 The SIGTRAP can be due to a completed hardware single-step only if
2325 - we didn't insert software single-step breakpoints
2326 - the thread to be examined is still the current thread
2327 - this thread is currently being stepped
2328
2329 If any of these events did not occur, we must have stopped due
2330 to hitting a software breakpoint, and have to back up to the
2331 breakpoint address.
2332
2333 As a special case, we could have hardware single-stepped a
2334 software breakpoint. In this case (prev_pc == breakpoint_pc),
2335 we also need to back up to the breakpoint address. */
2336
2337 if (singlestep_breakpoints_inserted_p
2338 || !ptid_equal (ecs->ptid, inferior_ptid)
2339 || !currently_stepping (ecs->event_thread)
2340 || ecs->event_thread->prev_pc == breakpoint_pc)
2341 regcache_write_pc (regcache, breakpoint_pc);
2342
2343 if (RECORD_IS_USED)
2344 do_cleanups (old_cleanups);
2345 }
2346 }
2347
2348 void
2349 init_infwait_state (void)
2350 {
2351 waiton_ptid = pid_to_ptid (-1);
2352 infwait_state = infwait_normal_state;
2353 }
2354
2355 void
2356 error_is_running (void)
2357 {
2358 error (_("\
2359 Cannot execute this command while the selected thread is running."));
2360 }
2361
2362 void
2363 ensure_not_running (void)
2364 {
2365 if (is_running (inferior_ptid))
2366 error_is_running ();
2367 }
2368
2369 static int
2370 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2371 {
2372 for (frame = get_prev_frame (frame);
2373 frame != NULL;
2374 frame = get_prev_frame (frame))
2375 {
2376 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2377 return 1;
2378 if (get_frame_type (frame) != INLINE_FRAME)
2379 break;
2380 }
2381
2382 return 0;
2383 }
2384
2385 /* Auxiliary function that handles syscall entry/return events.
2386 It returns 1 if the inferior should keep going (and GDB
2387 should ignore the event), or 0 if the event deserves to be
2388 processed. */
2389
2390 static int
2391 handle_syscall_event (struct execution_control_state *ecs)
2392 {
2393 struct regcache *regcache;
2394 struct gdbarch *gdbarch;
2395 int syscall_number;
2396
2397 if (!ptid_equal (ecs->ptid, inferior_ptid))
2398 context_switch (ecs->ptid);
2399
2400 regcache = get_thread_regcache (ecs->ptid);
2401 gdbarch = get_regcache_arch (regcache);
2402 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2403 stop_pc = regcache_read_pc (regcache);
2404
2405 target_last_waitstatus.value.syscall_number = syscall_number;
2406
2407 if (catch_syscall_enabled () > 0
2408 && catching_syscall_number (syscall_number) > 0)
2409 {
2410 if (debug_infrun)
2411 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2412 syscall_number);
2413
2414 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2415 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2416
2417 if (!ecs->random_signal)
2418 {
2419 /* Catchpoint hit. */
2420 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2421 return 0;
2422 }
2423 }
2424
2425 /* If no catchpoint triggered for this, then keep going. */
2426 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2427 keep_going (ecs);
2428 return 1;
2429 }
2430
2431 /* Given an execution control state that has been freshly filled in
2432 by an event from the inferior, figure out what it means and take
2433 appropriate action. */
2434
2435 static void
2436 handle_inferior_event (struct execution_control_state *ecs)
2437 {
2438 struct frame_info *frame;
2439 struct gdbarch *gdbarch;
2440 int sw_single_step_trap_p = 0;
2441 int stopped_by_watchpoint;
2442 int stepped_after_stopped_by_watchpoint = 0;
2443 struct symtab_and_line stop_pc_sal;
2444 enum stop_kind stop_soon;
2445
2446 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2447 {
2448 /* We had an event in the inferior, but we are not interested in
2449 handling it at this level. The lower layers have already
2450 done what needs to be done, if anything.
2451
2452 One of the possible circumstances for this is when the
2453 inferior produces output for the console. The inferior has
2454 not stopped, and we are ignoring the event. Another possible
2455 circumstance is any event which the lower level knows will be
2456 reported multiple times without an intervening resume. */
2457 if (debug_infrun)
2458 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2459 prepare_to_wait (ecs);
2460 return;
2461 }
2462
2463 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2464 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2465 {
2466 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2467 gdb_assert (inf);
2468 stop_soon = inf->stop_soon;
2469 }
2470 else
2471 stop_soon = NO_STOP_QUIETLY;
2472
2473 /* Cache the last pid/waitstatus. */
2474 target_last_wait_ptid = ecs->ptid;
2475 target_last_waitstatus = ecs->ws;
2476
2477 /* Always clear state belonging to the previous time we stopped. */
2478 stop_stack_dummy = 0;
2479
2480 /* If it's a new process, add it to the thread database */
2481
2482 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2483 && !ptid_equal (ecs->ptid, minus_one_ptid)
2484 && !in_thread_list (ecs->ptid));
2485
2486 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2487 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2488 add_thread (ecs->ptid);
2489
2490 ecs->event_thread = find_thread_ptid (ecs->ptid);
2491
2492 /* Dependent on valid ECS->EVENT_THREAD. */
2493 adjust_pc_after_break (ecs);
2494
2495 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2496 reinit_frame_cache ();
2497
2498 breakpoint_retire_moribund ();
2499
2500 /* Mark the non-executing threads accordingly. In all-stop, all
2501 threads of all processes are stopped when we get any event
2502 reported. In non-stop mode, only the event thread stops. If
2503 we're handling a process exit in non-stop mode, there's nothing
2504 to do, as threads of the dead process are gone, and threads of
2505 any other process were left running. */
2506 if (!non_stop)
2507 set_executing (minus_one_ptid, 0);
2508 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2509 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2510 set_executing (inferior_ptid, 0);
2511
2512 switch (infwait_state)
2513 {
2514 case infwait_thread_hop_state:
2515 if (debug_infrun)
2516 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2517 break;
2518
2519 case infwait_normal_state:
2520 if (debug_infrun)
2521 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2522 break;
2523
2524 case infwait_step_watch_state:
2525 if (debug_infrun)
2526 fprintf_unfiltered (gdb_stdlog,
2527 "infrun: infwait_step_watch_state\n");
2528
2529 stepped_after_stopped_by_watchpoint = 1;
2530 break;
2531
2532 case infwait_nonstep_watch_state:
2533 if (debug_infrun)
2534 fprintf_unfiltered (gdb_stdlog,
2535 "infrun: infwait_nonstep_watch_state\n");
2536 insert_breakpoints ();
2537
2538 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2539 handle things like signals arriving and other things happening
2540 in combination correctly? */
2541 stepped_after_stopped_by_watchpoint = 1;
2542 break;
2543
2544 default:
2545 internal_error (__FILE__, __LINE__, _("bad switch"));
2546 }
2547
2548 infwait_state = infwait_normal_state;
2549 waiton_ptid = pid_to_ptid (-1);
2550
2551 switch (ecs->ws.kind)
2552 {
2553 case TARGET_WAITKIND_LOADED:
2554 if (debug_infrun)
2555 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2556 /* Ignore gracefully during startup of the inferior, as it might
2557 be the shell which has just loaded some objects, otherwise
2558 add the symbols for the newly loaded objects. Also ignore at
2559 the beginning of an attach or remote session; we will query
2560 the full list of libraries once the connection is
2561 established. */
2562 if (stop_soon == NO_STOP_QUIETLY)
2563 {
2564 /* Check for any newly added shared libraries if we're
2565 supposed to be adding them automatically. Switch
2566 terminal for any messages produced by
2567 breakpoint_re_set. */
2568 target_terminal_ours_for_output ();
2569 /* NOTE: cagney/2003-11-25: Make certain that the target
2570 stack's section table is kept up-to-date. Architectures,
2571 (e.g., PPC64), use the section table to perform
2572 operations such as address => section name and hence
2573 require the table to contain all sections (including
2574 those found in shared libraries). */
2575 #ifdef SOLIB_ADD
2576 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2577 #else
2578 solib_add (NULL, 0, &current_target, auto_solib_add);
2579 #endif
2580 target_terminal_inferior ();
2581
2582 /* If requested, stop when the dynamic linker notifies
2583 gdb of events. This allows the user to get control
2584 and place breakpoints in initializer routines for
2585 dynamically loaded objects (among other things). */
2586 if (stop_on_solib_events)
2587 {
2588 stop_stepping (ecs);
2589 return;
2590 }
2591
2592 /* NOTE drow/2007-05-11: This might be a good place to check
2593 for "catch load". */
2594 }
2595
2596 /* If we are skipping through a shell, or through shared library
2597 loading that we aren't interested in, resume the program. If
2598 we're running the program normally, also resume. But stop if
2599 we're attaching or setting up a remote connection. */
2600 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2601 {
2602 /* Loading of shared libraries might have changed breakpoint
2603 addresses. Make sure new breakpoints are inserted. */
2604 if (stop_soon == NO_STOP_QUIETLY
2605 && !breakpoints_always_inserted_mode ())
2606 insert_breakpoints ();
2607 resume (0, TARGET_SIGNAL_0);
2608 prepare_to_wait (ecs);
2609 return;
2610 }
2611
2612 break;
2613
2614 case TARGET_WAITKIND_SPURIOUS:
2615 if (debug_infrun)
2616 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2617 resume (0, TARGET_SIGNAL_0);
2618 prepare_to_wait (ecs);
2619 return;
2620
2621 case TARGET_WAITKIND_EXITED:
2622 if (debug_infrun)
2623 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2624 inferior_ptid = ecs->ptid;
2625 target_terminal_ours (); /* Must do this before mourn anyway */
2626 print_stop_reason (EXITED, ecs->ws.value.integer);
2627
2628 /* Record the exit code in the convenience variable $_exitcode, so
2629 that the user can inspect this again later. */
2630 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2631 (LONGEST) ecs->ws.value.integer);
2632 gdb_flush (gdb_stdout);
2633 target_mourn_inferior ();
2634 singlestep_breakpoints_inserted_p = 0;
2635 stop_print_frame = 0;
2636 stop_stepping (ecs);
2637 return;
2638
2639 case TARGET_WAITKIND_SIGNALLED:
2640 if (debug_infrun)
2641 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2642 inferior_ptid = ecs->ptid;
2643 stop_print_frame = 0;
2644 target_terminal_ours (); /* Must do this before mourn anyway */
2645
2646 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2647 reach here unless the inferior is dead. However, for years
2648 target_kill() was called here, which hints that fatal signals aren't
2649 really fatal on some systems. If that's true, then some changes
2650 may be needed. */
2651 target_mourn_inferior ();
2652
2653 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2654 singlestep_breakpoints_inserted_p = 0;
2655 stop_stepping (ecs);
2656 return;
2657
2658 /* The following are the only cases in which we keep going;
2659 the above cases end in a continue or goto. */
2660 case TARGET_WAITKIND_FORKED:
2661 case TARGET_WAITKIND_VFORKED:
2662 if (debug_infrun)
2663 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2664
2665 if (!ptid_equal (ecs->ptid, inferior_ptid))
2666 {
2667 context_switch (ecs->ptid);
2668 reinit_frame_cache ();
2669 }
2670
2671 /* Immediately detach breakpoints from the child before there's
2672 any chance of letting the user delete breakpoints from the
2673 breakpoint lists. If we don't do this early, it's easy to
2674 leave left over traps in the child, vis: "break foo; catch
2675 fork; c; <fork>; del; c; <child calls foo>". We only follow
2676 the fork on the last `continue', and by that time the
2677 breakpoint at "foo" is long gone from the breakpoint table.
2678 If we vforked, then we don't need to unpatch here, since both
2679 parent and child are sharing the same memory pages; we'll
2680 need to unpatch at follow/detach time instead to be certain
2681 that new breakpoints added between catchpoint hit time and
2682 vfork follow are detached. */
2683 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2684 {
2685 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2686
2687 /* This won't actually modify the breakpoint list, but will
2688 physically remove the breakpoints from the child. */
2689 detach_breakpoints (child_pid);
2690 }
2691
2692 /* In case the event is caught by a catchpoint, remember that
2693 the event is to be followed at the next resume of the thread,
2694 and not immediately. */
2695 ecs->event_thread->pending_follow = ecs->ws;
2696
2697 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2698
2699 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2700
2701 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2702
2703 /* If no catchpoint triggered for this, then keep going. */
2704 if (ecs->random_signal)
2705 {
2706 int should_resume;
2707
2708 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2709
2710 should_resume = follow_fork ();
2711
2712 ecs->event_thread = inferior_thread ();
2713 ecs->ptid = inferior_ptid;
2714
2715 if (should_resume)
2716 keep_going (ecs);
2717 else
2718 stop_stepping (ecs);
2719 return;
2720 }
2721 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2722 goto process_event_stop_test;
2723
2724 case TARGET_WAITKIND_EXECD:
2725 if (debug_infrun)
2726 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
2727
2728 if (!ptid_equal (ecs->ptid, inferior_ptid))
2729 {
2730 context_switch (ecs->ptid);
2731 reinit_frame_cache ();
2732 }
2733
2734 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2735
2736 /* This causes the eventpoints and symbol table to be reset.
2737 Must do this now, before trying to determine whether to
2738 stop. */
2739 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
2740
2741 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2742 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2743
2744 /* Note that this may be referenced from inside
2745 bpstat_stop_status above, through inferior_has_execd. */
2746 xfree (ecs->ws.value.execd_pathname);
2747 ecs->ws.value.execd_pathname = NULL;
2748
2749 /* If no catchpoint triggered for this, then keep going. */
2750 if (ecs->random_signal)
2751 {
2752 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2753 keep_going (ecs);
2754 return;
2755 }
2756 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2757 goto process_event_stop_test;
2758
2759 /* Be careful not to try to gather much state about a thread
2760 that's in a syscall. It's frequently a losing proposition. */
2761 case TARGET_WAITKIND_SYSCALL_ENTRY:
2762 if (debug_infrun)
2763 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
2764 /* Getting the current syscall number */
2765 if (handle_syscall_event (ecs) != 0)
2766 return;
2767 goto process_event_stop_test;
2768
2769 /* Before examining the threads further, step this thread to
2770 get it entirely out of the syscall. (We get notice of the
2771 event when the thread is just on the verge of exiting a
2772 syscall. Stepping one instruction seems to get it back
2773 into user code.) */
2774 case TARGET_WAITKIND_SYSCALL_RETURN:
2775 if (debug_infrun)
2776 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
2777 if (handle_syscall_event (ecs) != 0)
2778 return;
2779 goto process_event_stop_test;
2780
2781 case TARGET_WAITKIND_STOPPED:
2782 if (debug_infrun)
2783 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
2784 ecs->event_thread->stop_signal = ecs->ws.value.sig;
2785 break;
2786
2787 case TARGET_WAITKIND_NO_HISTORY:
2788 /* Reverse execution: target ran out of history info. */
2789 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2790 print_stop_reason (NO_HISTORY, 0);
2791 stop_stepping (ecs);
2792 return;
2793 }
2794
2795 if (ecs->new_thread_event)
2796 {
2797 if (non_stop)
2798 /* Non-stop assumes that the target handles adding new threads
2799 to the thread list. */
2800 internal_error (__FILE__, __LINE__, "\
2801 targets should add new threads to the thread list themselves in non-stop mode.");
2802
2803 /* We may want to consider not doing a resume here in order to
2804 give the user a chance to play with the new thread. It might
2805 be good to make that a user-settable option. */
2806
2807 /* At this point, all threads are stopped (happens automatically
2808 in either the OS or the native code). Therefore we need to
2809 continue all threads in order to make progress. */
2810
2811 if (!ptid_equal (ecs->ptid, inferior_ptid))
2812 context_switch (ecs->ptid);
2813 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
2814 prepare_to_wait (ecs);
2815 return;
2816 }
2817
2818 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
2819 {
2820 /* Do we need to clean up the state of a thread that has
2821 completed a displaced single-step? (Doing so usually affects
2822 the PC, so do it here, before we set stop_pc.) */
2823 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
2824
2825 /* If we either finished a single-step or hit a breakpoint, but
2826 the user wanted this thread to be stopped, pretend we got a
2827 SIG0 (generic unsignaled stop). */
2828
2829 if (ecs->event_thread->stop_requested
2830 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2831 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2832 }
2833
2834 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2835
2836 if (debug_infrun)
2837 {
2838 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2839 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2840
2841 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
2842 paddress (gdbarch, stop_pc));
2843 if (target_stopped_by_watchpoint ())
2844 {
2845 CORE_ADDR addr;
2846 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
2847
2848 if (target_stopped_data_address (&current_target, &addr))
2849 fprintf_unfiltered (gdb_stdlog,
2850 "infrun: stopped data address = %s\n",
2851 paddress (gdbarch, addr));
2852 else
2853 fprintf_unfiltered (gdb_stdlog,
2854 "infrun: (no data address available)\n");
2855 }
2856 }
2857
2858 if (stepping_past_singlestep_breakpoint)
2859 {
2860 gdb_assert (singlestep_breakpoints_inserted_p);
2861 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
2862 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
2863
2864 stepping_past_singlestep_breakpoint = 0;
2865
2866 /* We've either finished single-stepping past the single-step
2867 breakpoint, or stopped for some other reason. It would be nice if
2868 we could tell, but we can't reliably. */
2869 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2870 {
2871 if (debug_infrun)
2872 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
2873 /* Pull the single step breakpoints out of the target. */
2874 remove_single_step_breakpoints ();
2875 singlestep_breakpoints_inserted_p = 0;
2876
2877 ecs->random_signal = 0;
2878 ecs->event_thread->trap_expected = 0;
2879
2880 context_switch (saved_singlestep_ptid);
2881 if (deprecated_context_hook)
2882 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
2883
2884 resume (1, TARGET_SIGNAL_0);
2885 prepare_to_wait (ecs);
2886 return;
2887 }
2888 }
2889
2890 if (!ptid_equal (deferred_step_ptid, null_ptid))
2891 {
2892 /* In non-stop mode, there's never a deferred_step_ptid set. */
2893 gdb_assert (!non_stop);
2894
2895 /* If we stopped for some other reason than single-stepping, ignore
2896 the fact that we were supposed to switch back. */
2897 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2898 {
2899 if (debug_infrun)
2900 fprintf_unfiltered (gdb_stdlog,
2901 "infrun: handling deferred step\n");
2902
2903 /* Pull the single step breakpoints out of the target. */
2904 if (singlestep_breakpoints_inserted_p)
2905 {
2906 remove_single_step_breakpoints ();
2907 singlestep_breakpoints_inserted_p = 0;
2908 }
2909
2910 /* Note: We do not call context_switch at this point, as the
2911 context is already set up for stepping the original thread. */
2912 switch_to_thread (deferred_step_ptid);
2913 deferred_step_ptid = null_ptid;
2914 /* Suppress spurious "Switching to ..." message. */
2915 previous_inferior_ptid = inferior_ptid;
2916
2917 resume (1, TARGET_SIGNAL_0);
2918 prepare_to_wait (ecs);
2919 return;
2920 }
2921
2922 deferred_step_ptid = null_ptid;
2923 }
2924
2925 /* See if a thread hit a thread-specific breakpoint that was meant for
2926 another thread. If so, then step that thread past the breakpoint,
2927 and continue it. */
2928
2929 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2930 {
2931 int thread_hop_needed = 0;
2932
2933 /* Check if a regular breakpoint has been hit before checking
2934 for a potential single step breakpoint. Otherwise, GDB will
2935 not see this breakpoint hit when stepping onto breakpoints. */
2936 if (regular_breakpoint_inserted_here_p (stop_pc))
2937 {
2938 ecs->random_signal = 0;
2939 if (!breakpoint_thread_match (stop_pc, ecs->ptid))
2940 thread_hop_needed = 1;
2941 }
2942 else if (singlestep_breakpoints_inserted_p)
2943 {
2944 /* We have not context switched yet, so this should be true
2945 no matter which thread hit the singlestep breakpoint. */
2946 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
2947 if (debug_infrun)
2948 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
2949 "trap for %s\n",
2950 target_pid_to_str (ecs->ptid));
2951
2952 ecs->random_signal = 0;
2953 /* The call to in_thread_list is necessary because PTIDs sometimes
2954 change when we go from single-threaded to multi-threaded. If
2955 the singlestep_ptid is still in the list, assume that it is
2956 really different from ecs->ptid. */
2957 if (!ptid_equal (singlestep_ptid, ecs->ptid)
2958 && in_thread_list (singlestep_ptid))
2959 {
2960 /* If the PC of the thread we were trying to single-step
2961 has changed, discard this event (which we were going
2962 to ignore anyway), and pretend we saw that thread
2963 trap. This prevents us continuously moving the
2964 single-step breakpoint forward, one instruction at a
2965 time. If the PC has changed, then the thread we were
2966 trying to single-step has trapped or been signalled,
2967 but the event has not been reported to GDB yet.
2968
2969 There might be some cases where this loses signal
2970 information, if a signal has arrived at exactly the
2971 same time that the PC changed, but this is the best
2972 we can do with the information available. Perhaps we
2973 should arrange to report all events for all threads
2974 when they stop, or to re-poll the remote looking for
2975 this particular thread (i.e. temporarily enable
2976 schedlock). */
2977
2978 CORE_ADDR new_singlestep_pc
2979 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
2980
2981 if (new_singlestep_pc != singlestep_pc)
2982 {
2983 enum target_signal stop_signal;
2984
2985 if (debug_infrun)
2986 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
2987 " but expected thread advanced also\n");
2988
2989 /* The current context still belongs to
2990 singlestep_ptid. Don't swap here, since that's
2991 the context we want to use. Just fudge our
2992 state and continue. */
2993 stop_signal = ecs->event_thread->stop_signal;
2994 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2995 ecs->ptid = singlestep_ptid;
2996 ecs->event_thread = find_thread_ptid (ecs->ptid);
2997 ecs->event_thread->stop_signal = stop_signal;
2998 stop_pc = new_singlestep_pc;
2999 }
3000 else
3001 {
3002 if (debug_infrun)
3003 fprintf_unfiltered (gdb_stdlog,
3004 "infrun: unexpected thread\n");
3005
3006 thread_hop_needed = 1;
3007 stepping_past_singlestep_breakpoint = 1;
3008 saved_singlestep_ptid = singlestep_ptid;
3009 }
3010 }
3011 }
3012
3013 if (thread_hop_needed)
3014 {
3015 struct regcache *thread_regcache;
3016 int remove_status = 0;
3017
3018 if (debug_infrun)
3019 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3020
3021 /* Switch context before touching inferior memory, the
3022 previous thread may have exited. */
3023 if (!ptid_equal (inferior_ptid, ecs->ptid))
3024 context_switch (ecs->ptid);
3025
3026 /* Saw a breakpoint, but it was hit by the wrong thread.
3027 Just continue. */
3028
3029 if (singlestep_breakpoints_inserted_p)
3030 {
3031 /* Pull the single step breakpoints out of the target. */
3032 remove_single_step_breakpoints ();
3033 singlestep_breakpoints_inserted_p = 0;
3034 }
3035
3036 /* If the arch can displace step, don't remove the
3037 breakpoints. */
3038 thread_regcache = get_thread_regcache (ecs->ptid);
3039 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3040 remove_status = remove_breakpoints ();
3041
3042 /* Did we fail to remove breakpoints? If so, try
3043 to set the PC past the bp. (There's at least
3044 one situation in which we can fail to remove
3045 the bp's: On HP-UX's that use ttrace, we can't
3046 change the address space of a vforking child
3047 process until the child exits (well, okay, not
3048 then either :-) or execs. */
3049 if (remove_status != 0)
3050 error (_("Cannot step over breakpoint hit in wrong thread"));
3051 else
3052 { /* Single step */
3053 if (!non_stop)
3054 {
3055 /* Only need to require the next event from this
3056 thread in all-stop mode. */
3057 waiton_ptid = ecs->ptid;
3058 infwait_state = infwait_thread_hop_state;
3059 }
3060
3061 ecs->event_thread->stepping_over_breakpoint = 1;
3062 keep_going (ecs);
3063 return;
3064 }
3065 }
3066 else if (singlestep_breakpoints_inserted_p)
3067 {
3068 sw_single_step_trap_p = 1;
3069 ecs->random_signal = 0;
3070 }
3071 }
3072 else
3073 ecs->random_signal = 1;
3074
3075 /* See if something interesting happened to the non-current thread. If
3076 so, then switch to that thread. */
3077 if (!ptid_equal (ecs->ptid, inferior_ptid))
3078 {
3079 if (debug_infrun)
3080 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3081
3082 context_switch (ecs->ptid);
3083
3084 if (deprecated_context_hook)
3085 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3086 }
3087
3088 /* At this point, get hold of the now-current thread's frame. */
3089 frame = get_current_frame ();
3090 gdbarch = get_frame_arch (frame);
3091
3092 if (singlestep_breakpoints_inserted_p)
3093 {
3094 /* Pull the single step breakpoints out of the target. */
3095 remove_single_step_breakpoints ();
3096 singlestep_breakpoints_inserted_p = 0;
3097 }
3098
3099 if (stepped_after_stopped_by_watchpoint)
3100 stopped_by_watchpoint = 0;
3101 else
3102 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3103
3104 /* If necessary, step over this watchpoint. We'll be back to display
3105 it in a moment. */
3106 if (stopped_by_watchpoint
3107 && (target_have_steppable_watchpoint
3108 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3109 {
3110 /* At this point, we are stopped at an instruction which has
3111 attempted to write to a piece of memory under control of
3112 a watchpoint. The instruction hasn't actually executed
3113 yet. If we were to evaluate the watchpoint expression
3114 now, we would get the old value, and therefore no change
3115 would seem to have occurred.
3116
3117 In order to make watchpoints work `right', we really need
3118 to complete the memory write, and then evaluate the
3119 watchpoint expression. We do this by single-stepping the
3120 target.
3121
3122 It may not be necessary to disable the watchpoint to stop over
3123 it. For example, the PA can (with some kernel cooperation)
3124 single step over a watchpoint without disabling the watchpoint.
3125
3126 It is far more common to need to disable a watchpoint to step
3127 the inferior over it. If we have non-steppable watchpoints,
3128 we must disable the current watchpoint; it's simplest to
3129 disable all watchpoints and breakpoints. */
3130 int hw_step = 1;
3131
3132 if (!target_have_steppable_watchpoint)
3133 remove_breakpoints ();
3134 /* Single step */
3135 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3136 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3137 waiton_ptid = ecs->ptid;
3138 if (target_have_steppable_watchpoint)
3139 infwait_state = infwait_step_watch_state;
3140 else
3141 infwait_state = infwait_nonstep_watch_state;
3142 prepare_to_wait (ecs);
3143 return;
3144 }
3145
3146 ecs->stop_func_start = 0;
3147 ecs->stop_func_end = 0;
3148 ecs->stop_func_name = 0;
3149 /* Don't care about return value; stop_func_start and stop_func_name
3150 will both be 0 if it doesn't work. */
3151 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3152 &ecs->stop_func_start, &ecs->stop_func_end);
3153 ecs->stop_func_start
3154 += gdbarch_deprecated_function_start_offset (gdbarch);
3155 ecs->event_thread->stepping_over_breakpoint = 0;
3156 bpstat_clear (&ecs->event_thread->stop_bpstat);
3157 ecs->event_thread->stop_step = 0;
3158 stop_print_frame = 1;
3159 ecs->random_signal = 0;
3160 stopped_by_random_signal = 0;
3161
3162 /* Hide inlined functions starting here, unless we just performed stepi or
3163 nexti. After stepi and nexti, always show the innermost frame (not any
3164 inline function call sites). */
3165 if (ecs->event_thread->step_range_end != 1)
3166 skip_inline_frames (ecs->ptid);
3167
3168 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3169 && ecs->event_thread->trap_expected
3170 && gdbarch_single_step_through_delay_p (gdbarch)
3171 && currently_stepping (ecs->event_thread))
3172 {
3173 /* We're trying to step off a breakpoint. Turns out that we're
3174 also on an instruction that needs to be stepped multiple
3175 times before it's been fully executing. E.g., architectures
3176 with a delay slot. It needs to be stepped twice, once for
3177 the instruction and once for the delay slot. */
3178 int step_through_delay
3179 = gdbarch_single_step_through_delay (gdbarch, frame);
3180 if (debug_infrun && step_through_delay)
3181 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3182 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3183 {
3184 /* The user issued a continue when stopped at a breakpoint.
3185 Set up for another trap and get out of here. */
3186 ecs->event_thread->stepping_over_breakpoint = 1;
3187 keep_going (ecs);
3188 return;
3189 }
3190 else if (step_through_delay)
3191 {
3192 /* The user issued a step when stopped at a breakpoint.
3193 Maybe we should stop, maybe we should not - the delay
3194 slot *might* correspond to a line of source. In any
3195 case, don't decide that here, just set
3196 ecs->stepping_over_breakpoint, making sure we
3197 single-step again before breakpoints are re-inserted. */
3198 ecs->event_thread->stepping_over_breakpoint = 1;
3199 }
3200 }
3201
3202 /* Look at the cause of the stop, and decide what to do.
3203 The alternatives are:
3204 1) stop_stepping and return; to really stop and return to the debugger,
3205 2) keep_going and return to start up again
3206 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3207 3) set ecs->random_signal to 1, and the decision between 1 and 2
3208 will be made according to the signal handling tables. */
3209
3210 /* First, distinguish signals caused by the debugger from signals
3211 that have to do with the program's own actions. Note that
3212 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3213 on the operating system version. Here we detect when a SIGILL or
3214 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3215 something similar for SIGSEGV, since a SIGSEGV will be generated
3216 when we're trying to execute a breakpoint instruction on a
3217 non-executable stack. This happens for call dummy breakpoints
3218 for architectures like SPARC that place call dummies on the
3219 stack.
3220
3221 If we're doing a displaced step past a breakpoint, then the
3222 breakpoint is always inserted at the original instruction;
3223 non-standard signals can't be explained by the breakpoint. */
3224 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3225 || (! ecs->event_thread->trap_expected
3226 && breakpoint_inserted_here_p (stop_pc)
3227 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_ILL
3228 || ecs->event_thread->stop_signal == TARGET_SIGNAL_SEGV
3229 || ecs->event_thread->stop_signal == TARGET_SIGNAL_EMT))
3230 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3231 || stop_soon == STOP_QUIETLY_REMOTE)
3232 {
3233 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3234 {
3235 if (debug_infrun)
3236 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3237 stop_print_frame = 0;
3238 stop_stepping (ecs);
3239 return;
3240 }
3241
3242 /* This is originated from start_remote(), start_inferior() and
3243 shared libraries hook functions. */
3244 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3245 {
3246 if (debug_infrun)
3247 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3248 stop_stepping (ecs);
3249 return;
3250 }
3251
3252 /* This originates from attach_command(). We need to overwrite
3253 the stop_signal here, because some kernels don't ignore a
3254 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3255 See more comments in inferior.h. On the other hand, if we
3256 get a non-SIGSTOP, report it to the user - assume the backend
3257 will handle the SIGSTOP if it should show up later.
3258
3259 Also consider that the attach is complete when we see a
3260 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3261 target extended-remote report it instead of a SIGSTOP
3262 (e.g. gdbserver). We already rely on SIGTRAP being our
3263 signal, so this is no exception.
3264
3265 Also consider that the attach is complete when we see a
3266 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3267 the target to stop all threads of the inferior, in case the
3268 low level attach operation doesn't stop them implicitly. If
3269 they weren't stopped implicitly, then the stub will report a
3270 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3271 other than GDB's request. */
3272 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3273 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3274 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3275 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3276 {
3277 stop_stepping (ecs);
3278 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3279 return;
3280 }
3281
3282 /* See if there is a breakpoint at the current PC. */
3283 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
3284
3285 /* Following in case break condition called a
3286 function. */
3287 stop_print_frame = 1;
3288
3289 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3290 at one stage in the past included checks for an inferior
3291 function call's call dummy's return breakpoint. The original
3292 comment, that went with the test, read:
3293
3294 ``End of a stack dummy. Some systems (e.g. Sony news) give
3295 another signal besides SIGTRAP, so check here as well as
3296 above.''
3297
3298 If someone ever tries to get call dummys on a
3299 non-executable stack to work (where the target would stop
3300 with something like a SIGSEGV), then those tests might need
3301 to be re-instated. Given, however, that the tests were only
3302 enabled when momentary breakpoints were not being used, I
3303 suspect that it won't be the case.
3304
3305 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3306 be necessary for call dummies on a non-executable stack on
3307 SPARC. */
3308
3309 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3310 ecs->random_signal
3311 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3312 || ecs->event_thread->trap_expected
3313 || (ecs->event_thread->step_range_end
3314 && ecs->event_thread->step_resume_breakpoint == NULL));
3315 else
3316 {
3317 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3318 if (!ecs->random_signal)
3319 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3320 }
3321 }
3322
3323 /* When we reach this point, we've pretty much decided
3324 that the reason for stopping must've been a random
3325 (unexpected) signal. */
3326
3327 else
3328 ecs->random_signal = 1;
3329
3330 process_event_stop_test:
3331
3332 /* Re-fetch current thread's frame in case we did a
3333 "goto process_event_stop_test" above. */
3334 frame = get_current_frame ();
3335 gdbarch = get_frame_arch (frame);
3336
3337 /* For the program's own signals, act according to
3338 the signal handling tables. */
3339
3340 if (ecs->random_signal)
3341 {
3342 /* Signal not for debugging purposes. */
3343 int printed = 0;
3344
3345 if (debug_infrun)
3346 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3347 ecs->event_thread->stop_signal);
3348
3349 stopped_by_random_signal = 1;
3350
3351 if (signal_print[ecs->event_thread->stop_signal])
3352 {
3353 printed = 1;
3354 target_terminal_ours_for_output ();
3355 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3356 }
3357 /* Always stop on signals if we're either just gaining control
3358 of the program, or the user explicitly requested this thread
3359 to remain stopped. */
3360 if (stop_soon != NO_STOP_QUIETLY
3361 || ecs->event_thread->stop_requested
3362 || signal_stop_state (ecs->event_thread->stop_signal))
3363 {
3364 stop_stepping (ecs);
3365 return;
3366 }
3367 /* If not going to stop, give terminal back
3368 if we took it away. */
3369 else if (printed)
3370 target_terminal_inferior ();
3371
3372 /* Clear the signal if it should not be passed. */
3373 if (signal_program[ecs->event_thread->stop_signal] == 0)
3374 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3375
3376 if (ecs->event_thread->prev_pc == stop_pc
3377 && ecs->event_thread->trap_expected
3378 && ecs->event_thread->step_resume_breakpoint == NULL)
3379 {
3380 /* We were just starting a new sequence, attempting to
3381 single-step off of a breakpoint and expecting a SIGTRAP.
3382 Instead this signal arrives. This signal will take us out
3383 of the stepping range so GDB needs to remember to, when
3384 the signal handler returns, resume stepping off that
3385 breakpoint. */
3386 /* To simplify things, "continue" is forced to use the same
3387 code paths as single-step - set a breakpoint at the
3388 signal return address and then, once hit, step off that
3389 breakpoint. */
3390 if (debug_infrun)
3391 fprintf_unfiltered (gdb_stdlog,
3392 "infrun: signal arrived while stepping over "
3393 "breakpoint\n");
3394
3395 insert_step_resume_breakpoint_at_frame (frame);
3396 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3397 keep_going (ecs);
3398 return;
3399 }
3400
3401 if (ecs->event_thread->step_range_end != 0
3402 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3403 && (ecs->event_thread->step_range_start <= stop_pc
3404 && stop_pc < ecs->event_thread->step_range_end)
3405 && frame_id_eq (get_stack_frame_id (frame),
3406 ecs->event_thread->step_stack_frame_id)
3407 && ecs->event_thread->step_resume_breakpoint == NULL)
3408 {
3409 /* The inferior is about to take a signal that will take it
3410 out of the single step range. Set a breakpoint at the
3411 current PC (which is presumably where the signal handler
3412 will eventually return) and then allow the inferior to
3413 run free.
3414
3415 Note that this is only needed for a signal delivered
3416 while in the single-step range. Nested signals aren't a
3417 problem as they eventually all return. */
3418 if (debug_infrun)
3419 fprintf_unfiltered (gdb_stdlog,
3420 "infrun: signal may take us out of "
3421 "single-step range\n");
3422
3423 insert_step_resume_breakpoint_at_frame (frame);
3424 keep_going (ecs);
3425 return;
3426 }
3427
3428 /* Note: step_resume_breakpoint may be non-NULL. This occures
3429 when either there's a nested signal, or when there's a
3430 pending signal enabled just as the signal handler returns
3431 (leaving the inferior at the step-resume-breakpoint without
3432 actually executing it). Either way continue until the
3433 breakpoint is really hit. */
3434 keep_going (ecs);
3435 return;
3436 }
3437
3438 /* Handle cases caused by hitting a breakpoint. */
3439 {
3440 CORE_ADDR jmp_buf_pc;
3441 struct bpstat_what what;
3442
3443 what = bpstat_what (ecs->event_thread->stop_bpstat);
3444
3445 if (what.call_dummy)
3446 {
3447 stop_stack_dummy = 1;
3448 }
3449
3450 switch (what.main_action)
3451 {
3452 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3453 /* If we hit the breakpoint at longjmp while stepping, we
3454 install a momentary breakpoint at the target of the
3455 jmp_buf. */
3456
3457 if (debug_infrun)
3458 fprintf_unfiltered (gdb_stdlog,
3459 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3460
3461 ecs->event_thread->stepping_over_breakpoint = 1;
3462
3463 if (!gdbarch_get_longjmp_target_p (gdbarch)
3464 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3465 {
3466 if (debug_infrun)
3467 fprintf_unfiltered (gdb_stdlog, "\
3468 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3469 keep_going (ecs);
3470 return;
3471 }
3472
3473 /* We're going to replace the current step-resume breakpoint
3474 with a longjmp-resume breakpoint. */
3475 delete_step_resume_breakpoint (ecs->event_thread);
3476
3477 /* Insert a breakpoint at resume address. */
3478 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3479
3480 keep_going (ecs);
3481 return;
3482
3483 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3484 if (debug_infrun)
3485 fprintf_unfiltered (gdb_stdlog,
3486 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3487
3488 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3489 delete_step_resume_breakpoint (ecs->event_thread);
3490
3491 ecs->event_thread->stop_step = 1;
3492 print_stop_reason (END_STEPPING_RANGE, 0);
3493 stop_stepping (ecs);
3494 return;
3495
3496 case BPSTAT_WHAT_SINGLE:
3497 if (debug_infrun)
3498 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3499 ecs->event_thread->stepping_over_breakpoint = 1;
3500 /* Still need to check other stuff, at least the case
3501 where we are stepping and step out of the right range. */
3502 break;
3503
3504 case BPSTAT_WHAT_STOP_NOISY:
3505 if (debug_infrun)
3506 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3507 stop_print_frame = 1;
3508
3509 /* We are about to nuke the step_resume_breakpointt via the
3510 cleanup chain, so no need to worry about it here. */
3511
3512 stop_stepping (ecs);
3513 return;
3514
3515 case BPSTAT_WHAT_STOP_SILENT:
3516 if (debug_infrun)
3517 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3518 stop_print_frame = 0;
3519
3520 /* We are about to nuke the step_resume_breakpoin via the
3521 cleanup chain, so no need to worry about it here. */
3522
3523 stop_stepping (ecs);
3524 return;
3525
3526 case BPSTAT_WHAT_STEP_RESUME:
3527 if (debug_infrun)
3528 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3529
3530 delete_step_resume_breakpoint (ecs->event_thread);
3531 if (ecs->event_thread->step_after_step_resume_breakpoint)
3532 {
3533 /* Back when the step-resume breakpoint was inserted, we
3534 were trying to single-step off a breakpoint. Go back
3535 to doing that. */
3536 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3537 ecs->event_thread->stepping_over_breakpoint = 1;
3538 keep_going (ecs);
3539 return;
3540 }
3541 if (stop_pc == ecs->stop_func_start
3542 && execution_direction == EXEC_REVERSE)
3543 {
3544 /* We are stepping over a function call in reverse, and
3545 just hit the step-resume breakpoint at the start
3546 address of the function. Go back to single-stepping,
3547 which should take us back to the function call. */
3548 ecs->event_thread->stepping_over_breakpoint = 1;
3549 keep_going (ecs);
3550 return;
3551 }
3552 break;
3553
3554 case BPSTAT_WHAT_CHECK_SHLIBS:
3555 {
3556 if (debug_infrun)
3557 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3558
3559 /* Check for any newly added shared libraries if we're
3560 supposed to be adding them automatically. Switch
3561 terminal for any messages produced by
3562 breakpoint_re_set. */
3563 target_terminal_ours_for_output ();
3564 /* NOTE: cagney/2003-11-25: Make certain that the target
3565 stack's section table is kept up-to-date. Architectures,
3566 (e.g., PPC64), use the section table to perform
3567 operations such as address => section name and hence
3568 require the table to contain all sections (including
3569 those found in shared libraries). */
3570 #ifdef SOLIB_ADD
3571 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3572 #else
3573 solib_add (NULL, 0, &current_target, auto_solib_add);
3574 #endif
3575 target_terminal_inferior ();
3576
3577 /* If requested, stop when the dynamic linker notifies
3578 gdb of events. This allows the user to get control
3579 and place breakpoints in initializer routines for
3580 dynamically loaded objects (among other things). */
3581 if (stop_on_solib_events || stop_stack_dummy)
3582 {
3583 stop_stepping (ecs);
3584 return;
3585 }
3586 else
3587 {
3588 /* We want to step over this breakpoint, then keep going. */
3589 ecs->event_thread->stepping_over_breakpoint = 1;
3590 break;
3591 }
3592 }
3593 break;
3594
3595 case BPSTAT_WHAT_CHECK_JIT:
3596 if (debug_infrun)
3597 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
3598
3599 /* Switch terminal for any messages produced by breakpoint_re_set. */
3600 target_terminal_ours_for_output ();
3601
3602 jit_event_handler (gdbarch);
3603
3604 target_terminal_inferior ();
3605
3606 /* We want to step over this breakpoint, then keep going. */
3607 ecs->event_thread->stepping_over_breakpoint = 1;
3608
3609 break;
3610
3611 case BPSTAT_WHAT_LAST:
3612 /* Not a real code, but listed here to shut up gcc -Wall. */
3613
3614 case BPSTAT_WHAT_KEEP_CHECKING:
3615 break;
3616 }
3617 }
3618
3619 /* We come here if we hit a breakpoint but should not
3620 stop for it. Possibly we also were stepping
3621 and should stop for that. So fall through and
3622 test for stepping. But, if not stepping,
3623 do not stop. */
3624
3625 /* In all-stop mode, if we're currently stepping but have stopped in
3626 some other thread, we need to switch back to the stepped thread. */
3627 if (!non_stop)
3628 {
3629 struct thread_info *tp;
3630 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3631 ecs->event_thread);
3632 if (tp)
3633 {
3634 /* However, if the current thread is blocked on some internal
3635 breakpoint, and we simply need to step over that breakpoint
3636 to get it going again, do that first. */
3637 if ((ecs->event_thread->trap_expected
3638 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3639 || ecs->event_thread->stepping_over_breakpoint)
3640 {
3641 keep_going (ecs);
3642 return;
3643 }
3644
3645 /* If the stepping thread exited, then don't try to switch
3646 back and resume it, which could fail in several different
3647 ways depending on the target. Instead, just keep going.
3648
3649 We can find a stepping dead thread in the thread list in
3650 two cases:
3651
3652 - The target supports thread exit events, and when the
3653 target tries to delete the thread from the thread list,
3654 inferior_ptid pointed at the exiting thread. In such
3655 case, calling delete_thread does not really remove the
3656 thread from the list; instead, the thread is left listed,
3657 with 'exited' state.
3658
3659 - The target's debug interface does not support thread
3660 exit events, and so we have no idea whatsoever if the
3661 previously stepping thread is still alive. For that
3662 reason, we need to synchronously query the target
3663 now. */
3664 if (is_exited (tp->ptid)
3665 || !target_thread_alive (tp->ptid))
3666 {
3667 if (debug_infrun)
3668 fprintf_unfiltered (gdb_stdlog, "\
3669 infrun: not switching back to stepped thread, it has vanished\n");
3670
3671 delete_thread (tp->ptid);
3672 keep_going (ecs);
3673 return;
3674 }
3675
3676 /* Otherwise, we no longer expect a trap in the current thread.
3677 Clear the trap_expected flag before switching back -- this is
3678 what keep_going would do as well, if we called it. */
3679 ecs->event_thread->trap_expected = 0;
3680
3681 if (debug_infrun)
3682 fprintf_unfiltered (gdb_stdlog,
3683 "infrun: switching back to stepped thread\n");
3684
3685 ecs->event_thread = tp;
3686 ecs->ptid = tp->ptid;
3687 context_switch (ecs->ptid);
3688 keep_going (ecs);
3689 return;
3690 }
3691 }
3692
3693 /* Are we stepping to get the inferior out of the dynamic linker's
3694 hook (and possibly the dld itself) after catching a shlib
3695 event? */
3696 if (ecs->event_thread->stepping_through_solib_after_catch)
3697 {
3698 #if defined(SOLIB_ADD)
3699 /* Have we reached our destination? If not, keep going. */
3700 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
3701 {
3702 if (debug_infrun)
3703 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
3704 ecs->event_thread->stepping_over_breakpoint = 1;
3705 keep_going (ecs);
3706 return;
3707 }
3708 #endif
3709 if (debug_infrun)
3710 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
3711 /* Else, stop and report the catchpoint(s) whose triggering
3712 caused us to begin stepping. */
3713 ecs->event_thread->stepping_through_solib_after_catch = 0;
3714 bpstat_clear (&ecs->event_thread->stop_bpstat);
3715 ecs->event_thread->stop_bpstat
3716 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
3717 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
3718 stop_print_frame = 1;
3719 stop_stepping (ecs);
3720 return;
3721 }
3722
3723 if (ecs->event_thread->step_resume_breakpoint)
3724 {
3725 if (debug_infrun)
3726 fprintf_unfiltered (gdb_stdlog,
3727 "infrun: step-resume breakpoint is inserted\n");
3728
3729 /* Having a step-resume breakpoint overrides anything
3730 else having to do with stepping commands until
3731 that breakpoint is reached. */
3732 keep_going (ecs);
3733 return;
3734 }
3735
3736 if (ecs->event_thread->step_range_end == 0)
3737 {
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
3740 /* Likewise if we aren't even stepping. */
3741 keep_going (ecs);
3742 return;
3743 }
3744
3745 /* If stepping through a line, keep going if still within it.
3746
3747 Note that step_range_end is the address of the first instruction
3748 beyond the step range, and NOT the address of the last instruction
3749 within it!
3750
3751 Note also that during reverse execution, we may be stepping
3752 through a function epilogue and therefore must detect when
3753 the current-frame changes in the middle of a line. */
3754
3755 if (stop_pc >= ecs->event_thread->step_range_start
3756 && stop_pc < ecs->event_thread->step_range_end
3757 && (execution_direction != EXEC_REVERSE
3758 || frame_id_eq (get_frame_id (frame),
3759 ecs->event_thread->step_frame_id)))
3760 {
3761 if (debug_infrun)
3762 fprintf_unfiltered
3763 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
3764 paddress (gdbarch, ecs->event_thread->step_range_start),
3765 paddress (gdbarch, ecs->event_thread->step_range_end));
3766
3767 /* When stepping backward, stop at beginning of line range
3768 (unless it's the function entry point, in which case
3769 keep going back to the call point). */
3770 if (stop_pc == ecs->event_thread->step_range_start
3771 && stop_pc != ecs->stop_func_start
3772 && execution_direction == EXEC_REVERSE)
3773 {
3774 ecs->event_thread->stop_step = 1;
3775 print_stop_reason (END_STEPPING_RANGE, 0);
3776 stop_stepping (ecs);
3777 }
3778 else
3779 keep_going (ecs);
3780
3781 return;
3782 }
3783
3784 /* We stepped out of the stepping range. */
3785
3786 /* If we are stepping at the source level and entered the runtime
3787 loader dynamic symbol resolution code...
3788
3789 EXEC_FORWARD: we keep on single stepping until we exit the run
3790 time loader code and reach the callee's address.
3791
3792 EXEC_REVERSE: we've already executed the callee (backward), and
3793 the runtime loader code is handled just like any other
3794 undebuggable function call. Now we need only keep stepping
3795 backward through the trampoline code, and that's handled further
3796 down, so there is nothing for us to do here. */
3797
3798 if (execution_direction != EXEC_REVERSE
3799 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3800 && in_solib_dynsym_resolve_code (stop_pc))
3801 {
3802 CORE_ADDR pc_after_resolver =
3803 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
3804
3805 if (debug_infrun)
3806 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
3807
3808 if (pc_after_resolver)
3809 {
3810 /* Set up a step-resume breakpoint at the address
3811 indicated by SKIP_SOLIB_RESOLVER. */
3812 struct symtab_and_line sr_sal;
3813 init_sal (&sr_sal);
3814 sr_sal.pc = pc_after_resolver;
3815
3816 insert_step_resume_breakpoint_at_sal (gdbarch,
3817 sr_sal, null_frame_id);
3818 }
3819
3820 keep_going (ecs);
3821 return;
3822 }
3823
3824 if (ecs->event_thread->step_range_end != 1
3825 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3826 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
3827 && get_frame_type (frame) == SIGTRAMP_FRAME)
3828 {
3829 if (debug_infrun)
3830 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
3831 /* The inferior, while doing a "step" or "next", has ended up in
3832 a signal trampoline (either by a signal being delivered or by
3833 the signal handler returning). Just single-step until the
3834 inferior leaves the trampoline (either by calling the handler
3835 or returning). */
3836 keep_going (ecs);
3837 return;
3838 }
3839
3840 /* Check for subroutine calls. The check for the current frame
3841 equalling the step ID is not necessary - the check of the
3842 previous frame's ID is sufficient - but it is a common case and
3843 cheaper than checking the previous frame's ID.
3844
3845 NOTE: frame_id_eq will never report two invalid frame IDs as
3846 being equal, so to get into this block, both the current and
3847 previous frame must have valid frame IDs. */
3848 /* The outer_frame_id check is a heuristic to detect stepping
3849 through startup code. If we step over an instruction which
3850 sets the stack pointer from an invalid value to a valid value,
3851 we may detect that as a subroutine call from the mythical
3852 "outermost" function. This could be fixed by marking
3853 outermost frames as !stack_p,code_p,special_p. Then the
3854 initial outermost frame, before sp was valid, would
3855 have code_addr == &_start. See the commend in frame_id_eq
3856 for more. */
3857 if (!frame_id_eq (get_stack_frame_id (frame),
3858 ecs->event_thread->step_stack_frame_id)
3859 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
3860 ecs->event_thread->step_stack_frame_id)
3861 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
3862 outer_frame_id)
3863 || step_start_function != find_pc_function (stop_pc))))
3864 {
3865 CORE_ADDR real_stop_pc;
3866
3867 if (debug_infrun)
3868 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
3869
3870 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
3871 || ((ecs->event_thread->step_range_end == 1)
3872 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
3873 ecs->stop_func_start)))
3874 {
3875 /* I presume that step_over_calls is only 0 when we're
3876 supposed to be stepping at the assembly language level
3877 ("stepi"). Just stop. */
3878 /* Also, maybe we just did a "nexti" inside a prolog, so we
3879 thought it was a subroutine call but it was not. Stop as
3880 well. FENN */
3881 /* And this works the same backward as frontward. MVS */
3882 ecs->event_thread->stop_step = 1;
3883 print_stop_reason (END_STEPPING_RANGE, 0);
3884 stop_stepping (ecs);
3885 return;
3886 }
3887
3888 /* Reverse stepping through solib trampolines. */
3889
3890 if (execution_direction == EXEC_REVERSE
3891 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
3892 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
3893 || (ecs->stop_func_start == 0
3894 && in_solib_dynsym_resolve_code (stop_pc))))
3895 {
3896 /* Any solib trampoline code can be handled in reverse
3897 by simply continuing to single-step. We have already
3898 executed the solib function (backwards), and a few
3899 steps will take us back through the trampoline to the
3900 caller. */
3901 keep_going (ecs);
3902 return;
3903 }
3904
3905 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
3906 {
3907 /* We're doing a "next".
3908
3909 Normal (forward) execution: set a breakpoint at the
3910 callee's return address (the address at which the caller
3911 will resume).
3912
3913 Reverse (backward) execution. set the step-resume
3914 breakpoint at the start of the function that we just
3915 stepped into (backwards), and continue to there. When we
3916 get there, we'll need to single-step back to the caller. */
3917
3918 if (execution_direction == EXEC_REVERSE)
3919 {
3920 struct symtab_and_line sr_sal;
3921
3922 /* Normal function call return (static or dynamic). */
3923 init_sal (&sr_sal);
3924 sr_sal.pc = ecs->stop_func_start;
3925 insert_step_resume_breakpoint_at_sal (gdbarch,
3926 sr_sal, null_frame_id);
3927 }
3928 else
3929 insert_step_resume_breakpoint_at_caller (frame);
3930
3931 keep_going (ecs);
3932 return;
3933 }
3934
3935 /* If we are in a function call trampoline (a stub between the
3936 calling routine and the real function), locate the real
3937 function. That's what tells us (a) whether we want to step
3938 into it at all, and (b) what prologue we want to run to the
3939 end of, if we do step into it. */
3940 real_stop_pc = skip_language_trampoline (frame, stop_pc);
3941 if (real_stop_pc == 0)
3942 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
3943 if (real_stop_pc != 0)
3944 ecs->stop_func_start = real_stop_pc;
3945
3946 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
3947 {
3948 struct symtab_and_line sr_sal;
3949 init_sal (&sr_sal);
3950 sr_sal.pc = ecs->stop_func_start;
3951
3952 insert_step_resume_breakpoint_at_sal (gdbarch,
3953 sr_sal, null_frame_id);
3954 keep_going (ecs);
3955 return;
3956 }
3957
3958 /* If we have line number information for the function we are
3959 thinking of stepping into, step into it.
3960
3961 If there are several symtabs at that PC (e.g. with include
3962 files), just want to know whether *any* of them have line
3963 numbers. find_pc_line handles this. */
3964 {
3965 struct symtab_and_line tmp_sal;
3966
3967 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
3968 if (tmp_sal.line != 0)
3969 {
3970 if (execution_direction == EXEC_REVERSE)
3971 handle_step_into_function_backward (gdbarch, ecs);
3972 else
3973 handle_step_into_function (gdbarch, ecs);
3974 return;
3975 }
3976 }
3977
3978 /* If we have no line number and the step-stop-if-no-debug is
3979 set, we stop the step so that the user has a chance to switch
3980 in assembly mode. */
3981 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3982 && step_stop_if_no_debug)
3983 {
3984 ecs->event_thread->stop_step = 1;
3985 print_stop_reason (END_STEPPING_RANGE, 0);
3986 stop_stepping (ecs);
3987 return;
3988 }
3989
3990 if (execution_direction == EXEC_REVERSE)
3991 {
3992 /* Set a breakpoint at callee's start address.
3993 From there we can step once and be back in the caller. */
3994 struct symtab_and_line sr_sal;
3995 init_sal (&sr_sal);
3996 sr_sal.pc = ecs->stop_func_start;
3997 insert_step_resume_breakpoint_at_sal (gdbarch,
3998 sr_sal, null_frame_id);
3999 }
4000 else
4001 /* Set a breakpoint at callee's return address (the address
4002 at which the caller will resume). */
4003 insert_step_resume_breakpoint_at_caller (frame);
4004
4005 keep_going (ecs);
4006 return;
4007 }
4008
4009 /* Reverse stepping through solib trampolines. */
4010
4011 if (execution_direction == EXEC_REVERSE
4012 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4013 {
4014 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4015 || (ecs->stop_func_start == 0
4016 && in_solib_dynsym_resolve_code (stop_pc)))
4017 {
4018 /* Any solib trampoline code can be handled in reverse
4019 by simply continuing to single-step. We have already
4020 executed the solib function (backwards), and a few
4021 steps will take us back through the trampoline to the
4022 caller. */
4023 keep_going (ecs);
4024 return;
4025 }
4026 else if (in_solib_dynsym_resolve_code (stop_pc))
4027 {
4028 /* Stepped backward into the solib dynsym resolver.
4029 Set a breakpoint at its start and continue, then
4030 one more step will take us out. */
4031 struct symtab_and_line sr_sal;
4032 init_sal (&sr_sal);
4033 sr_sal.pc = ecs->stop_func_start;
4034 insert_step_resume_breakpoint_at_sal (gdbarch,
4035 sr_sal, null_frame_id);
4036 keep_going (ecs);
4037 return;
4038 }
4039 }
4040
4041 /* If we're in the return path from a shared library trampoline,
4042 we want to proceed through the trampoline when stepping. */
4043 if (gdbarch_in_solib_return_trampoline (gdbarch,
4044 stop_pc, ecs->stop_func_name))
4045 {
4046 /* Determine where this trampoline returns. */
4047 CORE_ADDR real_stop_pc;
4048 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4049
4050 if (debug_infrun)
4051 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4052
4053 /* Only proceed through if we know where it's going. */
4054 if (real_stop_pc)
4055 {
4056 /* And put the step-breakpoint there and go until there. */
4057 struct symtab_and_line sr_sal;
4058
4059 init_sal (&sr_sal); /* initialize to zeroes */
4060 sr_sal.pc = real_stop_pc;
4061 sr_sal.section = find_pc_overlay (sr_sal.pc);
4062
4063 /* Do not specify what the fp should be when we stop since
4064 on some machines the prologue is where the new fp value
4065 is established. */
4066 insert_step_resume_breakpoint_at_sal (gdbarch,
4067 sr_sal, null_frame_id);
4068
4069 /* Restart without fiddling with the step ranges or
4070 other state. */
4071 keep_going (ecs);
4072 return;
4073 }
4074 }
4075
4076 stop_pc_sal = find_pc_line (stop_pc, 0);
4077
4078 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4079 the trampoline processing logic, however, there are some trampolines
4080 that have no names, so we should do trampoline handling first. */
4081 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4082 && ecs->stop_func_name == NULL
4083 && stop_pc_sal.line == 0)
4084 {
4085 if (debug_infrun)
4086 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4087
4088 /* The inferior just stepped into, or returned to, an
4089 undebuggable function (where there is no debugging information
4090 and no line number corresponding to the address where the
4091 inferior stopped). Since we want to skip this kind of code,
4092 we keep going until the inferior returns from this
4093 function - unless the user has asked us not to (via
4094 set step-mode) or we no longer know how to get back
4095 to the call site. */
4096 if (step_stop_if_no_debug
4097 || !frame_id_p (frame_unwind_caller_id (frame)))
4098 {
4099 /* If we have no line number and the step-stop-if-no-debug
4100 is set, we stop the step so that the user has a chance to
4101 switch in assembly mode. */
4102 ecs->event_thread->stop_step = 1;
4103 print_stop_reason (END_STEPPING_RANGE, 0);
4104 stop_stepping (ecs);
4105 return;
4106 }
4107 else
4108 {
4109 /* Set a breakpoint at callee's return address (the address
4110 at which the caller will resume). */
4111 insert_step_resume_breakpoint_at_caller (frame);
4112 keep_going (ecs);
4113 return;
4114 }
4115 }
4116
4117 if (ecs->event_thread->step_range_end == 1)
4118 {
4119 /* It is stepi or nexti. We always want to stop stepping after
4120 one instruction. */
4121 if (debug_infrun)
4122 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4123 ecs->event_thread->stop_step = 1;
4124 print_stop_reason (END_STEPPING_RANGE, 0);
4125 stop_stepping (ecs);
4126 return;
4127 }
4128
4129 if (stop_pc_sal.line == 0)
4130 {
4131 /* We have no line number information. That means to stop
4132 stepping (does this always happen right after one instruction,
4133 when we do "s" in a function with no line numbers,
4134 or can this happen as a result of a return or longjmp?). */
4135 if (debug_infrun)
4136 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4137 ecs->event_thread->stop_step = 1;
4138 print_stop_reason (END_STEPPING_RANGE, 0);
4139 stop_stepping (ecs);
4140 return;
4141 }
4142
4143 /* Look for "calls" to inlined functions, part one. If the inline
4144 frame machinery detected some skipped call sites, we have entered
4145 a new inline function. */
4146
4147 if (frame_id_eq (get_frame_id (get_current_frame ()),
4148 ecs->event_thread->step_frame_id)
4149 && inline_skipped_frames (ecs->ptid))
4150 {
4151 struct symtab_and_line call_sal;
4152
4153 if (debug_infrun)
4154 fprintf_unfiltered (gdb_stdlog,
4155 "infrun: stepped into inlined function\n");
4156
4157 find_frame_sal (get_current_frame (), &call_sal);
4158
4159 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4160 {
4161 /* For "step", we're going to stop. But if the call site
4162 for this inlined function is on the same source line as
4163 we were previously stepping, go down into the function
4164 first. Otherwise stop at the call site. */
4165
4166 if (call_sal.line == ecs->event_thread->current_line
4167 && call_sal.symtab == ecs->event_thread->current_symtab)
4168 step_into_inline_frame (ecs->ptid);
4169
4170 ecs->event_thread->stop_step = 1;
4171 print_stop_reason (END_STEPPING_RANGE, 0);
4172 stop_stepping (ecs);
4173 return;
4174 }
4175 else
4176 {
4177 /* For "next", we should stop at the call site if it is on a
4178 different source line. Otherwise continue through the
4179 inlined function. */
4180 if (call_sal.line == ecs->event_thread->current_line
4181 && call_sal.symtab == ecs->event_thread->current_symtab)
4182 keep_going (ecs);
4183 else
4184 {
4185 ecs->event_thread->stop_step = 1;
4186 print_stop_reason (END_STEPPING_RANGE, 0);
4187 stop_stepping (ecs);
4188 }
4189 return;
4190 }
4191 }
4192
4193 /* Look for "calls" to inlined functions, part two. If we are still
4194 in the same real function we were stepping through, but we have
4195 to go further up to find the exact frame ID, we are stepping
4196 through a more inlined call beyond its call site. */
4197
4198 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4199 && !frame_id_eq (get_frame_id (get_current_frame ()),
4200 ecs->event_thread->step_frame_id)
4201 && stepped_in_from (get_current_frame (),
4202 ecs->event_thread->step_frame_id))
4203 {
4204 if (debug_infrun)
4205 fprintf_unfiltered (gdb_stdlog,
4206 "infrun: stepping through inlined function\n");
4207
4208 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4209 keep_going (ecs);
4210 else
4211 {
4212 ecs->event_thread->stop_step = 1;
4213 print_stop_reason (END_STEPPING_RANGE, 0);
4214 stop_stepping (ecs);
4215 }
4216 return;
4217 }
4218
4219 if ((stop_pc == stop_pc_sal.pc)
4220 && (ecs->event_thread->current_line != stop_pc_sal.line
4221 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4222 {
4223 /* We are at the start of a different line. So stop. Note that
4224 we don't stop if we step into the middle of a different line.
4225 That is said to make things like for (;;) statements work
4226 better. */
4227 if (debug_infrun)
4228 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4229 ecs->event_thread->stop_step = 1;
4230 print_stop_reason (END_STEPPING_RANGE, 0);
4231 stop_stepping (ecs);
4232 return;
4233 }
4234
4235 /* We aren't done stepping.
4236
4237 Optimize by setting the stepping range to the line.
4238 (We might not be in the original line, but if we entered a
4239 new line in mid-statement, we continue stepping. This makes
4240 things like for(;;) statements work better.) */
4241
4242 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4243 ecs->event_thread->step_range_end = stop_pc_sal.end;
4244 set_step_info (frame, stop_pc_sal);
4245
4246 if (debug_infrun)
4247 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4248 keep_going (ecs);
4249 }
4250
4251 /* Is thread TP in the middle of single-stepping? */
4252
4253 static int
4254 currently_stepping (struct thread_info *tp)
4255 {
4256 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4257 || tp->trap_expected
4258 || tp->stepping_through_solib_after_catch
4259 || bpstat_should_step ());
4260 }
4261
4262 /* Returns true if any thread *but* the one passed in "data" is in the
4263 middle of stepping or of handling a "next". */
4264
4265 static int
4266 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4267 {
4268 if (tp == data)
4269 return 0;
4270
4271 return (tp->step_range_end
4272 || tp->trap_expected
4273 || tp->stepping_through_solib_after_catch);
4274 }
4275
4276 /* Inferior has stepped into a subroutine call with source code that
4277 we should not step over. Do step to the first line of code in
4278 it. */
4279
4280 static void
4281 handle_step_into_function (struct gdbarch *gdbarch,
4282 struct execution_control_state *ecs)
4283 {
4284 struct symtab *s;
4285 struct symtab_and_line stop_func_sal, sr_sal;
4286
4287 s = find_pc_symtab (stop_pc);
4288 if (s && s->language != language_asm)
4289 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4290 ecs->stop_func_start);
4291
4292 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4293 /* Use the step_resume_break to step until the end of the prologue,
4294 even if that involves jumps (as it seems to on the vax under
4295 4.2). */
4296 /* If the prologue ends in the middle of a source line, continue to
4297 the end of that source line (if it is still within the function).
4298 Otherwise, just go to end of prologue. */
4299 if (stop_func_sal.end
4300 && stop_func_sal.pc != ecs->stop_func_start
4301 && stop_func_sal.end < ecs->stop_func_end)
4302 ecs->stop_func_start = stop_func_sal.end;
4303
4304 /* Architectures which require breakpoint adjustment might not be able
4305 to place a breakpoint at the computed address. If so, the test
4306 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4307 ecs->stop_func_start to an address at which a breakpoint may be
4308 legitimately placed.
4309
4310 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4311 made, GDB will enter an infinite loop when stepping through
4312 optimized code consisting of VLIW instructions which contain
4313 subinstructions corresponding to different source lines. On
4314 FR-V, it's not permitted to place a breakpoint on any but the
4315 first subinstruction of a VLIW instruction. When a breakpoint is
4316 set, GDB will adjust the breakpoint address to the beginning of
4317 the VLIW instruction. Thus, we need to make the corresponding
4318 adjustment here when computing the stop address. */
4319
4320 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4321 {
4322 ecs->stop_func_start
4323 = gdbarch_adjust_breakpoint_address (gdbarch,
4324 ecs->stop_func_start);
4325 }
4326
4327 if (ecs->stop_func_start == stop_pc)
4328 {
4329 /* We are already there: stop now. */
4330 ecs->event_thread->stop_step = 1;
4331 print_stop_reason (END_STEPPING_RANGE, 0);
4332 stop_stepping (ecs);
4333 return;
4334 }
4335 else
4336 {
4337 /* Put the step-breakpoint there and go until there. */
4338 init_sal (&sr_sal); /* initialize to zeroes */
4339 sr_sal.pc = ecs->stop_func_start;
4340 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4341
4342 /* Do not specify what the fp should be when we stop since on
4343 some machines the prologue is where the new fp value is
4344 established. */
4345 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4346
4347 /* And make sure stepping stops right away then. */
4348 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4349 }
4350 keep_going (ecs);
4351 }
4352
4353 /* Inferior has stepped backward into a subroutine call with source
4354 code that we should not step over. Do step to the beginning of the
4355 last line of code in it. */
4356
4357 static void
4358 handle_step_into_function_backward (struct gdbarch *gdbarch,
4359 struct execution_control_state *ecs)
4360 {
4361 struct symtab *s;
4362 struct symtab_and_line stop_func_sal, sr_sal;
4363
4364 s = find_pc_symtab (stop_pc);
4365 if (s && s->language != language_asm)
4366 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4367 ecs->stop_func_start);
4368
4369 stop_func_sal = find_pc_line (stop_pc, 0);
4370
4371 /* OK, we're just going to keep stepping here. */
4372 if (stop_func_sal.pc == stop_pc)
4373 {
4374 /* We're there already. Just stop stepping now. */
4375 ecs->event_thread->stop_step = 1;
4376 print_stop_reason (END_STEPPING_RANGE, 0);
4377 stop_stepping (ecs);
4378 }
4379 else
4380 {
4381 /* Else just reset the step range and keep going.
4382 No step-resume breakpoint, they don't work for
4383 epilogues, which can have multiple entry paths. */
4384 ecs->event_thread->step_range_start = stop_func_sal.pc;
4385 ecs->event_thread->step_range_end = stop_func_sal.end;
4386 keep_going (ecs);
4387 }
4388 return;
4389 }
4390
4391 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4392 This is used to both functions and to skip over code. */
4393
4394 static void
4395 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4396 struct symtab_and_line sr_sal,
4397 struct frame_id sr_id)
4398 {
4399 /* There should never be more than one step-resume or longjmp-resume
4400 breakpoint per thread, so we should never be setting a new
4401 step_resume_breakpoint when one is already active. */
4402 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4403
4404 if (debug_infrun)
4405 fprintf_unfiltered (gdb_stdlog,
4406 "infrun: inserting step-resume breakpoint at %s\n",
4407 paddress (gdbarch, sr_sal.pc));
4408
4409 inferior_thread ()->step_resume_breakpoint
4410 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4411 }
4412
4413 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4414 to skip a potential signal handler.
4415
4416 This is called with the interrupted function's frame. The signal
4417 handler, when it returns, will resume the interrupted function at
4418 RETURN_FRAME.pc. */
4419
4420 static void
4421 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4422 {
4423 struct symtab_and_line sr_sal;
4424 struct gdbarch *gdbarch;
4425
4426 gdb_assert (return_frame != NULL);
4427 init_sal (&sr_sal); /* initialize to zeros */
4428
4429 gdbarch = get_frame_arch (return_frame);
4430 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4431 sr_sal.section = find_pc_overlay (sr_sal.pc);
4432
4433 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4434 get_stack_frame_id (return_frame));
4435 }
4436
4437 /* Similar to insert_step_resume_breakpoint_at_frame, except
4438 but a breakpoint at the previous frame's PC. This is used to
4439 skip a function after stepping into it (for "next" or if the called
4440 function has no debugging information).
4441
4442 The current function has almost always been reached by single
4443 stepping a call or return instruction. NEXT_FRAME belongs to the
4444 current function, and the breakpoint will be set at the caller's
4445 resume address.
4446
4447 This is a separate function rather than reusing
4448 insert_step_resume_breakpoint_at_frame in order to avoid
4449 get_prev_frame, which may stop prematurely (see the implementation
4450 of frame_unwind_caller_id for an example). */
4451
4452 static void
4453 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4454 {
4455 struct symtab_and_line sr_sal;
4456 struct gdbarch *gdbarch;
4457
4458 /* We shouldn't have gotten here if we don't know where the call site
4459 is. */
4460 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4461
4462 init_sal (&sr_sal); /* initialize to zeros */
4463
4464 gdbarch = frame_unwind_caller_arch (next_frame);
4465 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4466 frame_unwind_caller_pc (next_frame));
4467 sr_sal.section = find_pc_overlay (sr_sal.pc);
4468
4469 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4470 frame_unwind_caller_id (next_frame));
4471 }
4472
4473 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4474 new breakpoint at the target of a jmp_buf. The handling of
4475 longjmp-resume uses the same mechanisms used for handling
4476 "step-resume" breakpoints. */
4477
4478 static void
4479 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4480 {
4481 /* There should never be more than one step-resume or longjmp-resume
4482 breakpoint per thread, so we should never be setting a new
4483 longjmp_resume_breakpoint when one is already active. */
4484 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4485
4486 if (debug_infrun)
4487 fprintf_unfiltered (gdb_stdlog,
4488 "infrun: inserting longjmp-resume breakpoint at %s\n",
4489 paddress (gdbarch, pc));
4490
4491 inferior_thread ()->step_resume_breakpoint =
4492 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4493 }
4494
4495 static void
4496 stop_stepping (struct execution_control_state *ecs)
4497 {
4498 if (debug_infrun)
4499 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4500
4501 /* Let callers know we don't want to wait for the inferior anymore. */
4502 ecs->wait_some_more = 0;
4503 }
4504
4505 /* This function handles various cases where we need to continue
4506 waiting for the inferior. */
4507 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4508
4509 static void
4510 keep_going (struct execution_control_state *ecs)
4511 {
4512 /* Save the pc before execution, to compare with pc after stop. */
4513 ecs->event_thread->prev_pc
4514 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4515
4516 /* If we did not do break;, it means we should keep running the
4517 inferior and not return to debugger. */
4518
4519 if (ecs->event_thread->trap_expected
4520 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4521 {
4522 /* We took a signal (which we are supposed to pass through to
4523 the inferior, else we'd not get here) and we haven't yet
4524 gotten our trap. Simply continue. */
4525 resume (currently_stepping (ecs->event_thread),
4526 ecs->event_thread->stop_signal);
4527 }
4528 else
4529 {
4530 /* Either the trap was not expected, but we are continuing
4531 anyway (the user asked that this signal be passed to the
4532 child)
4533 -- or --
4534 The signal was SIGTRAP, e.g. it was our signal, but we
4535 decided we should resume from it.
4536
4537 We're going to run this baby now!
4538
4539 Note that insert_breakpoints won't try to re-insert
4540 already inserted breakpoints. Therefore, we don't
4541 care if breakpoints were already inserted, or not. */
4542
4543 if (ecs->event_thread->stepping_over_breakpoint)
4544 {
4545 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4546 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4547 /* Since we can't do a displaced step, we have to remove
4548 the breakpoint while we step it. To keep things
4549 simple, we remove them all. */
4550 remove_breakpoints ();
4551 }
4552 else
4553 {
4554 struct gdb_exception e;
4555 /* Stop stepping when inserting breakpoints
4556 has failed. */
4557 TRY_CATCH (e, RETURN_MASK_ERROR)
4558 {
4559 insert_breakpoints ();
4560 }
4561 if (e.reason < 0)
4562 {
4563 stop_stepping (ecs);
4564 return;
4565 }
4566 }
4567
4568 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4569
4570 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4571 specifies that such a signal should be delivered to the
4572 target program).
4573
4574 Typically, this would occure when a user is debugging a
4575 target monitor on a simulator: the target monitor sets a
4576 breakpoint; the simulator encounters this break-point and
4577 halts the simulation handing control to GDB; GDB, noteing
4578 that the break-point isn't valid, returns control back to the
4579 simulator; the simulator then delivers the hardware
4580 equivalent of a SIGNAL_TRAP to the program being debugged. */
4581
4582 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4583 && !signal_program[ecs->event_thread->stop_signal])
4584 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4585
4586 resume (currently_stepping (ecs->event_thread),
4587 ecs->event_thread->stop_signal);
4588 }
4589
4590 prepare_to_wait (ecs);
4591 }
4592
4593 /* This function normally comes after a resume, before
4594 handle_inferior_event exits. It takes care of any last bits of
4595 housekeeping, and sets the all-important wait_some_more flag. */
4596
4597 static void
4598 prepare_to_wait (struct execution_control_state *ecs)
4599 {
4600 if (debug_infrun)
4601 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4602
4603 /* This is the old end of the while loop. Let everybody know we
4604 want to wait for the inferior some more and get called again
4605 soon. */
4606 ecs->wait_some_more = 1;
4607 }
4608
4609 /* Print why the inferior has stopped. We always print something when
4610 the inferior exits, or receives a signal. The rest of the cases are
4611 dealt with later on in normal_stop() and print_it_typical(). Ideally
4612 there should be a call to this function from handle_inferior_event()
4613 each time stop_stepping() is called.*/
4614 static void
4615 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4616 {
4617 switch (stop_reason)
4618 {
4619 case END_STEPPING_RANGE:
4620 /* We are done with a step/next/si/ni command. */
4621 /* For now print nothing. */
4622 /* Print a message only if not in the middle of doing a "step n"
4623 operation for n > 1 */
4624 if (!inferior_thread ()->step_multi
4625 || !inferior_thread ()->stop_step)
4626 if (ui_out_is_mi_like_p (uiout))
4627 ui_out_field_string
4628 (uiout, "reason",
4629 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4630 break;
4631 case SIGNAL_EXITED:
4632 /* The inferior was terminated by a signal. */
4633 annotate_signalled ();
4634 if (ui_out_is_mi_like_p (uiout))
4635 ui_out_field_string
4636 (uiout, "reason",
4637 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4638 ui_out_text (uiout, "\nProgram terminated with signal ");
4639 annotate_signal_name ();
4640 ui_out_field_string (uiout, "signal-name",
4641 target_signal_to_name (stop_info));
4642 annotate_signal_name_end ();
4643 ui_out_text (uiout, ", ");
4644 annotate_signal_string ();
4645 ui_out_field_string (uiout, "signal-meaning",
4646 target_signal_to_string (stop_info));
4647 annotate_signal_string_end ();
4648 ui_out_text (uiout, ".\n");
4649 ui_out_text (uiout, "The program no longer exists.\n");
4650 break;
4651 case EXITED:
4652 /* The inferior program is finished. */
4653 annotate_exited (stop_info);
4654 if (stop_info)
4655 {
4656 if (ui_out_is_mi_like_p (uiout))
4657 ui_out_field_string (uiout, "reason",
4658 async_reason_lookup (EXEC_ASYNC_EXITED));
4659 ui_out_text (uiout, "\nProgram exited with code ");
4660 ui_out_field_fmt (uiout, "exit-code", "0%o",
4661 (unsigned int) stop_info);
4662 ui_out_text (uiout, ".\n");
4663 }
4664 else
4665 {
4666 if (ui_out_is_mi_like_p (uiout))
4667 ui_out_field_string
4668 (uiout, "reason",
4669 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
4670 ui_out_text (uiout, "\nProgram exited normally.\n");
4671 }
4672 /* Support the --return-child-result option. */
4673 return_child_result_value = stop_info;
4674 break;
4675 case SIGNAL_RECEIVED:
4676 /* Signal received. The signal table tells us to print about
4677 it. */
4678 annotate_signal ();
4679
4680 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
4681 {
4682 struct thread_info *t = inferior_thread ();
4683
4684 ui_out_text (uiout, "\n[");
4685 ui_out_field_string (uiout, "thread-name",
4686 target_pid_to_str (t->ptid));
4687 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
4688 ui_out_text (uiout, " stopped");
4689 }
4690 else
4691 {
4692 ui_out_text (uiout, "\nProgram received signal ");
4693 annotate_signal_name ();
4694 if (ui_out_is_mi_like_p (uiout))
4695 ui_out_field_string
4696 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
4697 ui_out_field_string (uiout, "signal-name",
4698 target_signal_to_name (stop_info));
4699 annotate_signal_name_end ();
4700 ui_out_text (uiout, ", ");
4701 annotate_signal_string ();
4702 ui_out_field_string (uiout, "signal-meaning",
4703 target_signal_to_string (stop_info));
4704 annotate_signal_string_end ();
4705 }
4706 ui_out_text (uiout, ".\n");
4707 break;
4708 case NO_HISTORY:
4709 /* Reverse execution: target ran out of history info. */
4710 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
4711 break;
4712 default:
4713 internal_error (__FILE__, __LINE__,
4714 _("print_stop_reason: unrecognized enum value"));
4715 break;
4716 }
4717 }
4718 \f
4719
4720 /* Here to return control to GDB when the inferior stops for real.
4721 Print appropriate messages, remove breakpoints, give terminal our modes.
4722
4723 STOP_PRINT_FRAME nonzero means print the executing frame
4724 (pc, function, args, file, line number and line text).
4725 BREAKPOINTS_FAILED nonzero means stop was due to error
4726 attempting to insert breakpoints. */
4727
4728 void
4729 normal_stop (void)
4730 {
4731 struct target_waitstatus last;
4732 ptid_t last_ptid;
4733 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
4734
4735 get_last_target_status (&last_ptid, &last);
4736
4737 /* If an exception is thrown from this point on, make sure to
4738 propagate GDB's knowledge of the executing state to the
4739 frontend/user running state. A QUIT is an easy exception to see
4740 here, so do this before any filtered output. */
4741 if (!non_stop)
4742 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
4743 else if (last.kind != TARGET_WAITKIND_SIGNALLED
4744 && last.kind != TARGET_WAITKIND_EXITED)
4745 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
4746
4747 /* In non-stop mode, we don't want GDB to switch threads behind the
4748 user's back, to avoid races where the user is typing a command to
4749 apply to thread x, but GDB switches to thread y before the user
4750 finishes entering the command. */
4751
4752 /* As with the notification of thread events, we want to delay
4753 notifying the user that we've switched thread context until
4754 the inferior actually stops.
4755
4756 There's no point in saying anything if the inferior has exited.
4757 Note that SIGNALLED here means "exited with a signal", not
4758 "received a signal". */
4759 if (!non_stop
4760 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
4761 && target_has_execution
4762 && last.kind != TARGET_WAITKIND_SIGNALLED
4763 && last.kind != TARGET_WAITKIND_EXITED)
4764 {
4765 target_terminal_ours_for_output ();
4766 printf_filtered (_("[Switching to %s]\n"),
4767 target_pid_to_str (inferior_ptid));
4768 annotate_thread_changed ();
4769 previous_inferior_ptid = inferior_ptid;
4770 }
4771
4772 if (!breakpoints_always_inserted_mode () && target_has_execution)
4773 {
4774 if (remove_breakpoints ())
4775 {
4776 target_terminal_ours_for_output ();
4777 printf_filtered (_("\
4778 Cannot remove breakpoints because program is no longer writable.\n\
4779 Further execution is probably impossible.\n"));
4780 }
4781 }
4782
4783 /* If an auto-display called a function and that got a signal,
4784 delete that auto-display to avoid an infinite recursion. */
4785
4786 if (stopped_by_random_signal)
4787 disable_current_display ();
4788
4789 /* Don't print a message if in the middle of doing a "step n"
4790 operation for n > 1 */
4791 if (target_has_execution
4792 && last.kind != TARGET_WAITKIND_SIGNALLED
4793 && last.kind != TARGET_WAITKIND_EXITED
4794 && inferior_thread ()->step_multi
4795 && inferior_thread ()->stop_step)
4796 goto done;
4797
4798 target_terminal_ours ();
4799
4800 /* Set the current source location. This will also happen if we
4801 display the frame below, but the current SAL will be incorrect
4802 during a user hook-stop function. */
4803 if (has_stack_frames () && !stop_stack_dummy)
4804 set_current_sal_from_frame (get_current_frame (), 1);
4805
4806 /* Let the user/frontend see the threads as stopped. */
4807 do_cleanups (old_chain);
4808
4809 /* Look up the hook_stop and run it (CLI internally handles problem
4810 of stop_command's pre-hook not existing). */
4811 if (stop_command)
4812 catch_errors (hook_stop_stub, stop_command,
4813 "Error while running hook_stop:\n", RETURN_MASK_ALL);
4814
4815 if (!has_stack_frames ())
4816 goto done;
4817
4818 if (last.kind == TARGET_WAITKIND_SIGNALLED
4819 || last.kind == TARGET_WAITKIND_EXITED)
4820 goto done;
4821
4822 /* Select innermost stack frame - i.e., current frame is frame 0,
4823 and current location is based on that.
4824 Don't do this on return from a stack dummy routine,
4825 or if the program has exited. */
4826
4827 if (!stop_stack_dummy)
4828 {
4829 select_frame (get_current_frame ());
4830
4831 /* Print current location without a level number, if
4832 we have changed functions or hit a breakpoint.
4833 Print source line if we have one.
4834 bpstat_print() contains the logic deciding in detail
4835 what to print, based on the event(s) that just occurred. */
4836
4837 /* If --batch-silent is enabled then there's no need to print the current
4838 source location, and to try risks causing an error message about
4839 missing source files. */
4840 if (stop_print_frame && !batch_silent)
4841 {
4842 int bpstat_ret;
4843 int source_flag;
4844 int do_frame_printing = 1;
4845 struct thread_info *tp = inferior_thread ();
4846
4847 bpstat_ret = bpstat_print (tp->stop_bpstat);
4848 switch (bpstat_ret)
4849 {
4850 case PRINT_UNKNOWN:
4851 /* If we had hit a shared library event breakpoint,
4852 bpstat_print would print out this message. If we hit
4853 an OS-level shared library event, do the same
4854 thing. */
4855 if (last.kind == TARGET_WAITKIND_LOADED)
4856 {
4857 printf_filtered (_("Stopped due to shared library event\n"));
4858 source_flag = SRC_LINE; /* something bogus */
4859 do_frame_printing = 0;
4860 break;
4861 }
4862
4863 /* FIXME: cagney/2002-12-01: Given that a frame ID does
4864 (or should) carry around the function and does (or
4865 should) use that when doing a frame comparison. */
4866 if (tp->stop_step
4867 && frame_id_eq (tp->step_frame_id,
4868 get_frame_id (get_current_frame ()))
4869 && step_start_function == find_pc_function (stop_pc))
4870 source_flag = SRC_LINE; /* finished step, just print source line */
4871 else
4872 source_flag = SRC_AND_LOC; /* print location and source line */
4873 break;
4874 case PRINT_SRC_AND_LOC:
4875 source_flag = SRC_AND_LOC; /* print location and source line */
4876 break;
4877 case PRINT_SRC_ONLY:
4878 source_flag = SRC_LINE;
4879 break;
4880 case PRINT_NOTHING:
4881 source_flag = SRC_LINE; /* something bogus */
4882 do_frame_printing = 0;
4883 break;
4884 default:
4885 internal_error (__FILE__, __LINE__, _("Unknown value."));
4886 }
4887
4888 /* The behavior of this routine with respect to the source
4889 flag is:
4890 SRC_LINE: Print only source line
4891 LOCATION: Print only location
4892 SRC_AND_LOC: Print location and source line */
4893 if (do_frame_printing)
4894 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
4895
4896 /* Display the auto-display expressions. */
4897 do_displays ();
4898 }
4899 }
4900
4901 /* Save the function value return registers, if we care.
4902 We might be about to restore their previous contents. */
4903 if (inferior_thread ()->proceed_to_finish)
4904 {
4905 /* This should not be necessary. */
4906 if (stop_registers)
4907 regcache_xfree (stop_registers);
4908
4909 /* NB: The copy goes through to the target picking up the value of
4910 all the registers. */
4911 stop_registers = regcache_dup (get_current_regcache ());
4912 }
4913
4914 if (stop_stack_dummy)
4915 {
4916 /* Pop the empty frame that contains the stack dummy.
4917 This also restores inferior state prior to the call
4918 (struct inferior_thread_state). */
4919 struct frame_info *frame = get_current_frame ();
4920 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
4921 frame_pop (frame);
4922 /* frame_pop() calls reinit_frame_cache as the last thing it does
4923 which means there's currently no selected frame. We don't need
4924 to re-establish a selected frame if the dummy call returns normally,
4925 that will be done by restore_inferior_status. However, we do have
4926 to handle the case where the dummy call is returning after being
4927 stopped (e.g. the dummy call previously hit a breakpoint). We
4928 can't know which case we have so just always re-establish a
4929 selected frame here. */
4930 select_frame (get_current_frame ());
4931 }
4932
4933 done:
4934 annotate_stopped ();
4935
4936 /* Suppress the stop observer if we're in the middle of:
4937
4938 - a step n (n > 1), as there still more steps to be done.
4939
4940 - a "finish" command, as the observer will be called in
4941 finish_command_continuation, so it can include the inferior
4942 function's return value.
4943
4944 - calling an inferior function, as we pretend we inferior didn't
4945 run at all. The return value of the call is handled by the
4946 expression evaluator, through call_function_by_hand. */
4947
4948 if (!target_has_execution
4949 || last.kind == TARGET_WAITKIND_SIGNALLED
4950 || last.kind == TARGET_WAITKIND_EXITED
4951 || (!inferior_thread ()->step_multi
4952 && !(inferior_thread ()->stop_bpstat
4953 && inferior_thread ()->proceed_to_finish)
4954 && !inferior_thread ()->in_infcall))
4955 {
4956 if (!ptid_equal (inferior_ptid, null_ptid))
4957 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
4958 stop_print_frame);
4959 else
4960 observer_notify_normal_stop (NULL, stop_print_frame);
4961 }
4962
4963 if (target_has_execution)
4964 {
4965 if (last.kind != TARGET_WAITKIND_SIGNALLED
4966 && last.kind != TARGET_WAITKIND_EXITED)
4967 /* Delete the breakpoint we stopped at, if it wants to be deleted.
4968 Delete any breakpoint that is to be deleted at the next stop. */
4969 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
4970 }
4971 }
4972
4973 static int
4974 hook_stop_stub (void *cmd)
4975 {
4976 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
4977 return (0);
4978 }
4979 \f
4980 int
4981 signal_stop_state (int signo)
4982 {
4983 return signal_stop[signo];
4984 }
4985
4986 int
4987 signal_print_state (int signo)
4988 {
4989 return signal_print[signo];
4990 }
4991
4992 int
4993 signal_pass_state (int signo)
4994 {
4995 return signal_program[signo];
4996 }
4997
4998 int
4999 signal_stop_update (int signo, int state)
5000 {
5001 int ret = signal_stop[signo];
5002 signal_stop[signo] = state;
5003 return ret;
5004 }
5005
5006 int
5007 signal_print_update (int signo, int state)
5008 {
5009 int ret = signal_print[signo];
5010 signal_print[signo] = state;
5011 return ret;
5012 }
5013
5014 int
5015 signal_pass_update (int signo, int state)
5016 {
5017 int ret = signal_program[signo];
5018 signal_program[signo] = state;
5019 return ret;
5020 }
5021
5022 static void
5023 sig_print_header (void)
5024 {
5025 printf_filtered (_("\
5026 Signal Stop\tPrint\tPass to program\tDescription\n"));
5027 }
5028
5029 static void
5030 sig_print_info (enum target_signal oursig)
5031 {
5032 const char *name = target_signal_to_name (oursig);
5033 int name_padding = 13 - strlen (name);
5034
5035 if (name_padding <= 0)
5036 name_padding = 0;
5037
5038 printf_filtered ("%s", name);
5039 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5040 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5041 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5042 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5043 printf_filtered ("%s\n", target_signal_to_string (oursig));
5044 }
5045
5046 /* Specify how various signals in the inferior should be handled. */
5047
5048 static void
5049 handle_command (char *args, int from_tty)
5050 {
5051 char **argv;
5052 int digits, wordlen;
5053 int sigfirst, signum, siglast;
5054 enum target_signal oursig;
5055 int allsigs;
5056 int nsigs;
5057 unsigned char *sigs;
5058 struct cleanup *old_chain;
5059
5060 if (args == NULL)
5061 {
5062 error_no_arg (_("signal to handle"));
5063 }
5064
5065 /* Allocate and zero an array of flags for which signals to handle. */
5066
5067 nsigs = (int) TARGET_SIGNAL_LAST;
5068 sigs = (unsigned char *) alloca (nsigs);
5069 memset (sigs, 0, nsigs);
5070
5071 /* Break the command line up into args. */
5072
5073 argv = gdb_buildargv (args);
5074 old_chain = make_cleanup_freeargv (argv);
5075
5076 /* Walk through the args, looking for signal oursigs, signal names, and
5077 actions. Signal numbers and signal names may be interspersed with
5078 actions, with the actions being performed for all signals cumulatively
5079 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5080
5081 while (*argv != NULL)
5082 {
5083 wordlen = strlen (*argv);
5084 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5085 {;
5086 }
5087 allsigs = 0;
5088 sigfirst = siglast = -1;
5089
5090 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5091 {
5092 /* Apply action to all signals except those used by the
5093 debugger. Silently skip those. */
5094 allsigs = 1;
5095 sigfirst = 0;
5096 siglast = nsigs - 1;
5097 }
5098 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5099 {
5100 SET_SIGS (nsigs, sigs, signal_stop);
5101 SET_SIGS (nsigs, sigs, signal_print);
5102 }
5103 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5104 {
5105 UNSET_SIGS (nsigs, sigs, signal_program);
5106 }
5107 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5108 {
5109 SET_SIGS (nsigs, sigs, signal_print);
5110 }
5111 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5112 {
5113 SET_SIGS (nsigs, sigs, signal_program);
5114 }
5115 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5116 {
5117 UNSET_SIGS (nsigs, sigs, signal_stop);
5118 }
5119 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5120 {
5121 SET_SIGS (nsigs, sigs, signal_program);
5122 }
5123 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5124 {
5125 UNSET_SIGS (nsigs, sigs, signal_print);
5126 UNSET_SIGS (nsigs, sigs, signal_stop);
5127 }
5128 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5129 {
5130 UNSET_SIGS (nsigs, sigs, signal_program);
5131 }
5132 else if (digits > 0)
5133 {
5134 /* It is numeric. The numeric signal refers to our own
5135 internal signal numbering from target.h, not to host/target
5136 signal number. This is a feature; users really should be
5137 using symbolic names anyway, and the common ones like
5138 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5139
5140 sigfirst = siglast = (int)
5141 target_signal_from_command (atoi (*argv));
5142 if ((*argv)[digits] == '-')
5143 {
5144 siglast = (int)
5145 target_signal_from_command (atoi ((*argv) + digits + 1));
5146 }
5147 if (sigfirst > siglast)
5148 {
5149 /* Bet he didn't figure we'd think of this case... */
5150 signum = sigfirst;
5151 sigfirst = siglast;
5152 siglast = signum;
5153 }
5154 }
5155 else
5156 {
5157 oursig = target_signal_from_name (*argv);
5158 if (oursig != TARGET_SIGNAL_UNKNOWN)
5159 {
5160 sigfirst = siglast = (int) oursig;
5161 }
5162 else
5163 {
5164 /* Not a number and not a recognized flag word => complain. */
5165 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5166 }
5167 }
5168
5169 /* If any signal numbers or symbol names were found, set flags for
5170 which signals to apply actions to. */
5171
5172 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5173 {
5174 switch ((enum target_signal) signum)
5175 {
5176 case TARGET_SIGNAL_TRAP:
5177 case TARGET_SIGNAL_INT:
5178 if (!allsigs && !sigs[signum])
5179 {
5180 if (query (_("%s is used by the debugger.\n\
5181 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5182 {
5183 sigs[signum] = 1;
5184 }
5185 else
5186 {
5187 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5188 gdb_flush (gdb_stdout);
5189 }
5190 }
5191 break;
5192 case TARGET_SIGNAL_0:
5193 case TARGET_SIGNAL_DEFAULT:
5194 case TARGET_SIGNAL_UNKNOWN:
5195 /* Make sure that "all" doesn't print these. */
5196 break;
5197 default:
5198 sigs[signum] = 1;
5199 break;
5200 }
5201 }
5202
5203 argv++;
5204 }
5205
5206 for (signum = 0; signum < nsigs; signum++)
5207 if (sigs[signum])
5208 {
5209 target_notice_signals (inferior_ptid);
5210
5211 if (from_tty)
5212 {
5213 /* Show the results. */
5214 sig_print_header ();
5215 for (; signum < nsigs; signum++)
5216 if (sigs[signum])
5217 sig_print_info (signum);
5218 }
5219
5220 break;
5221 }
5222
5223 do_cleanups (old_chain);
5224 }
5225
5226 static void
5227 xdb_handle_command (char *args, int from_tty)
5228 {
5229 char **argv;
5230 struct cleanup *old_chain;
5231
5232 if (args == NULL)
5233 error_no_arg (_("xdb command"));
5234
5235 /* Break the command line up into args. */
5236
5237 argv = gdb_buildargv (args);
5238 old_chain = make_cleanup_freeargv (argv);
5239 if (argv[1] != (char *) NULL)
5240 {
5241 char *argBuf;
5242 int bufLen;
5243
5244 bufLen = strlen (argv[0]) + 20;
5245 argBuf = (char *) xmalloc (bufLen);
5246 if (argBuf)
5247 {
5248 int validFlag = 1;
5249 enum target_signal oursig;
5250
5251 oursig = target_signal_from_name (argv[0]);
5252 memset (argBuf, 0, bufLen);
5253 if (strcmp (argv[1], "Q") == 0)
5254 sprintf (argBuf, "%s %s", argv[0], "noprint");
5255 else
5256 {
5257 if (strcmp (argv[1], "s") == 0)
5258 {
5259 if (!signal_stop[oursig])
5260 sprintf (argBuf, "%s %s", argv[0], "stop");
5261 else
5262 sprintf (argBuf, "%s %s", argv[0], "nostop");
5263 }
5264 else if (strcmp (argv[1], "i") == 0)
5265 {
5266 if (!signal_program[oursig])
5267 sprintf (argBuf, "%s %s", argv[0], "pass");
5268 else
5269 sprintf (argBuf, "%s %s", argv[0], "nopass");
5270 }
5271 else if (strcmp (argv[1], "r") == 0)
5272 {
5273 if (!signal_print[oursig])
5274 sprintf (argBuf, "%s %s", argv[0], "print");
5275 else
5276 sprintf (argBuf, "%s %s", argv[0], "noprint");
5277 }
5278 else
5279 validFlag = 0;
5280 }
5281 if (validFlag)
5282 handle_command (argBuf, from_tty);
5283 else
5284 printf_filtered (_("Invalid signal handling flag.\n"));
5285 if (argBuf)
5286 xfree (argBuf);
5287 }
5288 }
5289 do_cleanups (old_chain);
5290 }
5291
5292 /* Print current contents of the tables set by the handle command.
5293 It is possible we should just be printing signals actually used
5294 by the current target (but for things to work right when switching
5295 targets, all signals should be in the signal tables). */
5296
5297 static void
5298 signals_info (char *signum_exp, int from_tty)
5299 {
5300 enum target_signal oursig;
5301 sig_print_header ();
5302
5303 if (signum_exp)
5304 {
5305 /* First see if this is a symbol name. */
5306 oursig = target_signal_from_name (signum_exp);
5307 if (oursig == TARGET_SIGNAL_UNKNOWN)
5308 {
5309 /* No, try numeric. */
5310 oursig =
5311 target_signal_from_command (parse_and_eval_long (signum_exp));
5312 }
5313 sig_print_info (oursig);
5314 return;
5315 }
5316
5317 printf_filtered ("\n");
5318 /* These ugly casts brought to you by the native VAX compiler. */
5319 for (oursig = TARGET_SIGNAL_FIRST;
5320 (int) oursig < (int) TARGET_SIGNAL_LAST;
5321 oursig = (enum target_signal) ((int) oursig + 1))
5322 {
5323 QUIT;
5324
5325 if (oursig != TARGET_SIGNAL_UNKNOWN
5326 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5327 sig_print_info (oursig);
5328 }
5329
5330 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5331 }
5332
5333 /* The $_siginfo convenience variable is a bit special. We don't know
5334 for sure the type of the value until we actually have a chance to
5335 fetch the data. The type can change depending on gdbarch, so it it
5336 also dependent on which thread you have selected.
5337
5338 1. making $_siginfo be an internalvar that creates a new value on
5339 access.
5340
5341 2. making the value of $_siginfo be an lval_computed value. */
5342
5343 /* This function implements the lval_computed support for reading a
5344 $_siginfo value. */
5345
5346 static void
5347 siginfo_value_read (struct value *v)
5348 {
5349 LONGEST transferred;
5350
5351 transferred =
5352 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5353 NULL,
5354 value_contents_all_raw (v),
5355 value_offset (v),
5356 TYPE_LENGTH (value_type (v)));
5357
5358 if (transferred != TYPE_LENGTH (value_type (v)))
5359 error (_("Unable to read siginfo"));
5360 }
5361
5362 /* This function implements the lval_computed support for writing a
5363 $_siginfo value. */
5364
5365 static void
5366 siginfo_value_write (struct value *v, struct value *fromval)
5367 {
5368 LONGEST transferred;
5369
5370 transferred = target_write (&current_target,
5371 TARGET_OBJECT_SIGNAL_INFO,
5372 NULL,
5373 value_contents_all_raw (fromval),
5374 value_offset (v),
5375 TYPE_LENGTH (value_type (fromval)));
5376
5377 if (transferred != TYPE_LENGTH (value_type (fromval)))
5378 error (_("Unable to write siginfo"));
5379 }
5380
5381 static struct lval_funcs siginfo_value_funcs =
5382 {
5383 siginfo_value_read,
5384 siginfo_value_write
5385 };
5386
5387 /* Return a new value with the correct type for the siginfo object of
5388 the current thread using architecture GDBARCH. Return a void value
5389 if there's no object available. */
5390
5391 static struct value *
5392 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5393 {
5394 if (target_has_stack
5395 && !ptid_equal (inferior_ptid, null_ptid)
5396 && gdbarch_get_siginfo_type_p (gdbarch))
5397 {
5398 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5399 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5400 }
5401
5402 return allocate_value (builtin_type (gdbarch)->builtin_void);
5403 }
5404
5405 \f
5406 /* Inferior thread state.
5407 These are details related to the inferior itself, and don't include
5408 things like what frame the user had selected or what gdb was doing
5409 with the target at the time.
5410 For inferior function calls these are things we want to restore
5411 regardless of whether the function call successfully completes
5412 or the dummy frame has to be manually popped. */
5413
5414 struct inferior_thread_state
5415 {
5416 enum target_signal stop_signal;
5417 CORE_ADDR stop_pc;
5418 struct regcache *registers;
5419 };
5420
5421 struct inferior_thread_state *
5422 save_inferior_thread_state (void)
5423 {
5424 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5425 struct thread_info *tp = inferior_thread ();
5426
5427 inf_state->stop_signal = tp->stop_signal;
5428 inf_state->stop_pc = stop_pc;
5429
5430 inf_state->registers = regcache_dup (get_current_regcache ());
5431
5432 return inf_state;
5433 }
5434
5435 /* Restore inferior session state to INF_STATE. */
5436
5437 void
5438 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5439 {
5440 struct thread_info *tp = inferior_thread ();
5441
5442 tp->stop_signal = inf_state->stop_signal;
5443 stop_pc = inf_state->stop_pc;
5444
5445 /* The inferior can be gone if the user types "print exit(0)"
5446 (and perhaps other times). */
5447 if (target_has_execution)
5448 /* NB: The register write goes through to the target. */
5449 regcache_cpy (get_current_regcache (), inf_state->registers);
5450 regcache_xfree (inf_state->registers);
5451 xfree (inf_state);
5452 }
5453
5454 static void
5455 do_restore_inferior_thread_state_cleanup (void *state)
5456 {
5457 restore_inferior_thread_state (state);
5458 }
5459
5460 struct cleanup *
5461 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5462 {
5463 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5464 }
5465
5466 void
5467 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5468 {
5469 regcache_xfree (inf_state->registers);
5470 xfree (inf_state);
5471 }
5472
5473 struct regcache *
5474 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5475 {
5476 return inf_state->registers;
5477 }
5478
5479 /* Session related state for inferior function calls.
5480 These are the additional bits of state that need to be restored
5481 when an inferior function call successfully completes. */
5482
5483 struct inferior_status
5484 {
5485 bpstat stop_bpstat;
5486 int stop_step;
5487 int stop_stack_dummy;
5488 int stopped_by_random_signal;
5489 int stepping_over_breakpoint;
5490 CORE_ADDR step_range_start;
5491 CORE_ADDR step_range_end;
5492 struct frame_id step_frame_id;
5493 struct frame_id step_stack_frame_id;
5494 enum step_over_calls_kind step_over_calls;
5495 CORE_ADDR step_resume_break_address;
5496 int stop_after_trap;
5497 int stop_soon;
5498
5499 /* ID if the selected frame when the inferior function call was made. */
5500 struct frame_id selected_frame_id;
5501
5502 int proceed_to_finish;
5503 int in_infcall;
5504 };
5505
5506 /* Save all of the information associated with the inferior<==>gdb
5507 connection. */
5508
5509 struct inferior_status *
5510 save_inferior_status (void)
5511 {
5512 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5513 struct thread_info *tp = inferior_thread ();
5514 struct inferior *inf = current_inferior ();
5515
5516 inf_status->stop_step = tp->stop_step;
5517 inf_status->stop_stack_dummy = stop_stack_dummy;
5518 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5519 inf_status->stepping_over_breakpoint = tp->trap_expected;
5520 inf_status->step_range_start = tp->step_range_start;
5521 inf_status->step_range_end = tp->step_range_end;
5522 inf_status->step_frame_id = tp->step_frame_id;
5523 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5524 inf_status->step_over_calls = tp->step_over_calls;
5525 inf_status->stop_after_trap = stop_after_trap;
5526 inf_status->stop_soon = inf->stop_soon;
5527 /* Save original bpstat chain here; replace it with copy of chain.
5528 If caller's caller is walking the chain, they'll be happier if we
5529 hand them back the original chain when restore_inferior_status is
5530 called. */
5531 inf_status->stop_bpstat = tp->stop_bpstat;
5532 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5533 inf_status->proceed_to_finish = tp->proceed_to_finish;
5534 inf_status->in_infcall = tp->in_infcall;
5535
5536 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5537
5538 return inf_status;
5539 }
5540
5541 static int
5542 restore_selected_frame (void *args)
5543 {
5544 struct frame_id *fid = (struct frame_id *) args;
5545 struct frame_info *frame;
5546
5547 frame = frame_find_by_id (*fid);
5548
5549 /* If inf_status->selected_frame_id is NULL, there was no previously
5550 selected frame. */
5551 if (frame == NULL)
5552 {
5553 warning (_("Unable to restore previously selected frame."));
5554 return 0;
5555 }
5556
5557 select_frame (frame);
5558
5559 return (1);
5560 }
5561
5562 /* Restore inferior session state to INF_STATUS. */
5563
5564 void
5565 restore_inferior_status (struct inferior_status *inf_status)
5566 {
5567 struct thread_info *tp = inferior_thread ();
5568 struct inferior *inf = current_inferior ();
5569
5570 tp->stop_step = inf_status->stop_step;
5571 stop_stack_dummy = inf_status->stop_stack_dummy;
5572 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5573 tp->trap_expected = inf_status->stepping_over_breakpoint;
5574 tp->step_range_start = inf_status->step_range_start;
5575 tp->step_range_end = inf_status->step_range_end;
5576 tp->step_frame_id = inf_status->step_frame_id;
5577 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5578 tp->step_over_calls = inf_status->step_over_calls;
5579 stop_after_trap = inf_status->stop_after_trap;
5580 inf->stop_soon = inf_status->stop_soon;
5581 bpstat_clear (&tp->stop_bpstat);
5582 tp->stop_bpstat = inf_status->stop_bpstat;
5583 inf_status->stop_bpstat = NULL;
5584 tp->proceed_to_finish = inf_status->proceed_to_finish;
5585 tp->in_infcall = inf_status->in_infcall;
5586
5587 if (target_has_stack)
5588 {
5589 /* The point of catch_errors is that if the stack is clobbered,
5590 walking the stack might encounter a garbage pointer and
5591 error() trying to dereference it. */
5592 if (catch_errors
5593 (restore_selected_frame, &inf_status->selected_frame_id,
5594 "Unable to restore previously selected frame:\n",
5595 RETURN_MASK_ERROR) == 0)
5596 /* Error in restoring the selected frame. Select the innermost
5597 frame. */
5598 select_frame (get_current_frame ());
5599 }
5600
5601 xfree (inf_status);
5602 }
5603
5604 static void
5605 do_restore_inferior_status_cleanup (void *sts)
5606 {
5607 restore_inferior_status (sts);
5608 }
5609
5610 struct cleanup *
5611 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5612 {
5613 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5614 }
5615
5616 void
5617 discard_inferior_status (struct inferior_status *inf_status)
5618 {
5619 /* See save_inferior_status for info on stop_bpstat. */
5620 bpstat_clear (&inf_status->stop_bpstat);
5621 xfree (inf_status);
5622 }
5623 \f
5624 int
5625 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5626 {
5627 struct target_waitstatus last;
5628 ptid_t last_ptid;
5629
5630 get_last_target_status (&last_ptid, &last);
5631
5632 if (last.kind != TARGET_WAITKIND_FORKED)
5633 return 0;
5634
5635 if (!ptid_equal (last_ptid, pid))
5636 return 0;
5637
5638 *child_pid = last.value.related_pid;
5639 return 1;
5640 }
5641
5642 int
5643 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
5644 {
5645 struct target_waitstatus last;
5646 ptid_t last_ptid;
5647
5648 get_last_target_status (&last_ptid, &last);
5649
5650 if (last.kind != TARGET_WAITKIND_VFORKED)
5651 return 0;
5652
5653 if (!ptid_equal (last_ptid, pid))
5654 return 0;
5655
5656 *child_pid = last.value.related_pid;
5657 return 1;
5658 }
5659
5660 int
5661 inferior_has_execd (ptid_t pid, char **execd_pathname)
5662 {
5663 struct target_waitstatus last;
5664 ptid_t last_ptid;
5665
5666 get_last_target_status (&last_ptid, &last);
5667
5668 if (last.kind != TARGET_WAITKIND_EXECD)
5669 return 0;
5670
5671 if (!ptid_equal (last_ptid, pid))
5672 return 0;
5673
5674 *execd_pathname = xstrdup (last.value.execd_pathname);
5675 return 1;
5676 }
5677
5678 int
5679 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
5680 {
5681 struct target_waitstatus last;
5682 ptid_t last_ptid;
5683
5684 get_last_target_status (&last_ptid, &last);
5685
5686 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
5687 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
5688 return 0;
5689
5690 if (!ptid_equal (last_ptid, pid))
5691 return 0;
5692
5693 *syscall_number = last.value.syscall_number;
5694 return 1;
5695 }
5696
5697 /* Oft used ptids */
5698 ptid_t null_ptid;
5699 ptid_t minus_one_ptid;
5700
5701 /* Create a ptid given the necessary PID, LWP, and TID components. */
5702
5703 ptid_t
5704 ptid_build (int pid, long lwp, long tid)
5705 {
5706 ptid_t ptid;
5707
5708 ptid.pid = pid;
5709 ptid.lwp = lwp;
5710 ptid.tid = tid;
5711 return ptid;
5712 }
5713
5714 /* Create a ptid from just a pid. */
5715
5716 ptid_t
5717 pid_to_ptid (int pid)
5718 {
5719 return ptid_build (pid, 0, 0);
5720 }
5721
5722 /* Fetch the pid (process id) component from a ptid. */
5723
5724 int
5725 ptid_get_pid (ptid_t ptid)
5726 {
5727 return ptid.pid;
5728 }
5729
5730 /* Fetch the lwp (lightweight process) component from a ptid. */
5731
5732 long
5733 ptid_get_lwp (ptid_t ptid)
5734 {
5735 return ptid.lwp;
5736 }
5737
5738 /* Fetch the tid (thread id) component from a ptid. */
5739
5740 long
5741 ptid_get_tid (ptid_t ptid)
5742 {
5743 return ptid.tid;
5744 }
5745
5746 /* ptid_equal() is used to test equality of two ptids. */
5747
5748 int
5749 ptid_equal (ptid_t ptid1, ptid_t ptid2)
5750 {
5751 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
5752 && ptid1.tid == ptid2.tid);
5753 }
5754
5755 /* Returns true if PTID represents a process. */
5756
5757 int
5758 ptid_is_pid (ptid_t ptid)
5759 {
5760 if (ptid_equal (minus_one_ptid, ptid))
5761 return 0;
5762 if (ptid_equal (null_ptid, ptid))
5763 return 0;
5764
5765 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
5766 }
5767
5768 /* restore_inferior_ptid() will be used by the cleanup machinery
5769 to restore the inferior_ptid value saved in a call to
5770 save_inferior_ptid(). */
5771
5772 static void
5773 restore_inferior_ptid (void *arg)
5774 {
5775 ptid_t *saved_ptid_ptr = arg;
5776 inferior_ptid = *saved_ptid_ptr;
5777 xfree (arg);
5778 }
5779
5780 /* Save the value of inferior_ptid so that it may be restored by a
5781 later call to do_cleanups(). Returns the struct cleanup pointer
5782 needed for later doing the cleanup. */
5783
5784 struct cleanup *
5785 save_inferior_ptid (void)
5786 {
5787 ptid_t *saved_ptid_ptr;
5788
5789 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
5790 *saved_ptid_ptr = inferior_ptid;
5791 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
5792 }
5793 \f
5794
5795 /* User interface for reverse debugging:
5796 Set exec-direction / show exec-direction commands
5797 (returns error unless target implements to_set_exec_direction method). */
5798
5799 enum exec_direction_kind execution_direction = EXEC_FORWARD;
5800 static const char exec_forward[] = "forward";
5801 static const char exec_reverse[] = "reverse";
5802 static const char *exec_direction = exec_forward;
5803 static const char *exec_direction_names[] = {
5804 exec_forward,
5805 exec_reverse,
5806 NULL
5807 };
5808
5809 static void
5810 set_exec_direction_func (char *args, int from_tty,
5811 struct cmd_list_element *cmd)
5812 {
5813 if (target_can_execute_reverse)
5814 {
5815 if (!strcmp (exec_direction, exec_forward))
5816 execution_direction = EXEC_FORWARD;
5817 else if (!strcmp (exec_direction, exec_reverse))
5818 execution_direction = EXEC_REVERSE;
5819 }
5820 }
5821
5822 static void
5823 show_exec_direction_func (struct ui_file *out, int from_tty,
5824 struct cmd_list_element *cmd, const char *value)
5825 {
5826 switch (execution_direction) {
5827 case EXEC_FORWARD:
5828 fprintf_filtered (out, _("Forward.\n"));
5829 break;
5830 case EXEC_REVERSE:
5831 fprintf_filtered (out, _("Reverse.\n"));
5832 break;
5833 case EXEC_ERROR:
5834 default:
5835 fprintf_filtered (out,
5836 _("Forward (target `%s' does not support exec-direction).\n"),
5837 target_shortname);
5838 break;
5839 }
5840 }
5841
5842 /* User interface for non-stop mode. */
5843
5844 int non_stop = 0;
5845 static int non_stop_1 = 0;
5846
5847 static void
5848 set_non_stop (char *args, int from_tty,
5849 struct cmd_list_element *c)
5850 {
5851 if (target_has_execution)
5852 {
5853 non_stop_1 = non_stop;
5854 error (_("Cannot change this setting while the inferior is running."));
5855 }
5856
5857 non_stop = non_stop_1;
5858 }
5859
5860 static void
5861 show_non_stop (struct ui_file *file, int from_tty,
5862 struct cmd_list_element *c, const char *value)
5863 {
5864 fprintf_filtered (file,
5865 _("Controlling the inferior in non-stop mode is %s.\n"),
5866 value);
5867 }
5868
5869 static void
5870 show_schedule_multiple (struct ui_file *file, int from_tty,
5871 struct cmd_list_element *c, const char *value)
5872 {
5873 fprintf_filtered (file, _("\
5874 Resuming the execution of threads of all processes is %s.\n"), value);
5875 }
5876
5877 void
5878 _initialize_infrun (void)
5879 {
5880 int i;
5881 int numsigs;
5882 struct cmd_list_element *c;
5883
5884 add_info ("signals", signals_info, _("\
5885 What debugger does when program gets various signals.\n\
5886 Specify a signal as argument to print info on that signal only."));
5887 add_info_alias ("handle", "signals", 0);
5888
5889 add_com ("handle", class_run, handle_command, _("\
5890 Specify how to handle a signal.\n\
5891 Args are signals and actions to apply to those signals.\n\
5892 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
5893 from 1-15 are allowed for compatibility with old versions of GDB.\n\
5894 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
5895 The special arg \"all\" is recognized to mean all signals except those\n\
5896 used by the debugger, typically SIGTRAP and SIGINT.\n\
5897 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
5898 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
5899 Stop means reenter debugger if this signal happens (implies print).\n\
5900 Print means print a message if this signal happens.\n\
5901 Pass means let program see this signal; otherwise program doesn't know.\n\
5902 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
5903 Pass and Stop may be combined."));
5904 if (xdb_commands)
5905 {
5906 add_com ("lz", class_info, signals_info, _("\
5907 What debugger does when program gets various signals.\n\
5908 Specify a signal as argument to print info on that signal only."));
5909 add_com ("z", class_run, xdb_handle_command, _("\
5910 Specify how to handle a signal.\n\
5911 Args are signals and actions to apply to those signals.\n\
5912 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
5913 from 1-15 are allowed for compatibility with old versions of GDB.\n\
5914 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
5915 The special arg \"all\" is recognized to mean all signals except those\n\
5916 used by the debugger, typically SIGTRAP and SIGINT.\n\
5917 Recognized actions include \"s\" (toggles between stop and nostop), \n\
5918 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
5919 nopass), \"Q\" (noprint)\n\
5920 Stop means reenter debugger if this signal happens (implies print).\n\
5921 Print means print a message if this signal happens.\n\
5922 Pass means let program see this signal; otherwise program doesn't know.\n\
5923 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
5924 Pass and Stop may be combined."));
5925 }
5926
5927 if (!dbx_commands)
5928 stop_command = add_cmd ("stop", class_obscure,
5929 not_just_help_class_command, _("\
5930 There is no `stop' command, but you can set a hook on `stop'.\n\
5931 This allows you to set a list of commands to be run each time execution\n\
5932 of the program stops."), &cmdlist);
5933
5934 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
5935 Set inferior debugging."), _("\
5936 Show inferior debugging."), _("\
5937 When non-zero, inferior specific debugging is enabled."),
5938 NULL,
5939 show_debug_infrun,
5940 &setdebuglist, &showdebuglist);
5941
5942 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
5943 Set displaced stepping debugging."), _("\
5944 Show displaced stepping debugging."), _("\
5945 When non-zero, displaced stepping specific debugging is enabled."),
5946 NULL,
5947 show_debug_displaced,
5948 &setdebuglist, &showdebuglist);
5949
5950 add_setshow_boolean_cmd ("non-stop", no_class,
5951 &non_stop_1, _("\
5952 Set whether gdb controls the inferior in non-stop mode."), _("\
5953 Show whether gdb controls the inferior in non-stop mode."), _("\
5954 When debugging a multi-threaded program and this setting is\n\
5955 off (the default, also called all-stop mode), when one thread stops\n\
5956 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
5957 all other threads in the program while you interact with the thread of\n\
5958 interest. When you continue or step a thread, you can allow the other\n\
5959 threads to run, or have them remain stopped, but while you inspect any\n\
5960 thread's state, all threads stop.\n\
5961 \n\
5962 In non-stop mode, when one thread stops, other threads can continue\n\
5963 to run freely. You'll be able to step each thread independently,\n\
5964 leave it stopped or free to run as needed."),
5965 set_non_stop,
5966 show_non_stop,
5967 &setlist,
5968 &showlist);
5969
5970 numsigs = (int) TARGET_SIGNAL_LAST;
5971 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
5972 signal_print = (unsigned char *)
5973 xmalloc (sizeof (signal_print[0]) * numsigs);
5974 signal_program = (unsigned char *)
5975 xmalloc (sizeof (signal_program[0]) * numsigs);
5976 for (i = 0; i < numsigs; i++)
5977 {
5978 signal_stop[i] = 1;
5979 signal_print[i] = 1;
5980 signal_program[i] = 1;
5981 }
5982
5983 /* Signals caused by debugger's own actions
5984 should not be given to the program afterwards. */
5985 signal_program[TARGET_SIGNAL_TRAP] = 0;
5986 signal_program[TARGET_SIGNAL_INT] = 0;
5987
5988 /* Signals that are not errors should not normally enter the debugger. */
5989 signal_stop[TARGET_SIGNAL_ALRM] = 0;
5990 signal_print[TARGET_SIGNAL_ALRM] = 0;
5991 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
5992 signal_print[TARGET_SIGNAL_VTALRM] = 0;
5993 signal_stop[TARGET_SIGNAL_PROF] = 0;
5994 signal_print[TARGET_SIGNAL_PROF] = 0;
5995 signal_stop[TARGET_SIGNAL_CHLD] = 0;
5996 signal_print[TARGET_SIGNAL_CHLD] = 0;
5997 signal_stop[TARGET_SIGNAL_IO] = 0;
5998 signal_print[TARGET_SIGNAL_IO] = 0;
5999 signal_stop[TARGET_SIGNAL_POLL] = 0;
6000 signal_print[TARGET_SIGNAL_POLL] = 0;
6001 signal_stop[TARGET_SIGNAL_URG] = 0;
6002 signal_print[TARGET_SIGNAL_URG] = 0;
6003 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6004 signal_print[TARGET_SIGNAL_WINCH] = 0;
6005
6006 /* These signals are used internally by user-level thread
6007 implementations. (See signal(5) on Solaris.) Like the above
6008 signals, a healthy program receives and handles them as part of
6009 its normal operation. */
6010 signal_stop[TARGET_SIGNAL_LWP] = 0;
6011 signal_print[TARGET_SIGNAL_LWP] = 0;
6012 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6013 signal_print[TARGET_SIGNAL_WAITING] = 0;
6014 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6015 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6016
6017 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6018 &stop_on_solib_events, _("\
6019 Set stopping for shared library events."), _("\
6020 Show stopping for shared library events."), _("\
6021 If nonzero, gdb will give control to the user when the dynamic linker\n\
6022 notifies gdb of shared library events. The most common event of interest\n\
6023 to the user would be loading/unloading of a new library."),
6024 NULL,
6025 show_stop_on_solib_events,
6026 &setlist, &showlist);
6027
6028 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6029 follow_fork_mode_kind_names,
6030 &follow_fork_mode_string, _("\
6031 Set debugger response to a program call of fork or vfork."), _("\
6032 Show debugger response to a program call of fork or vfork."), _("\
6033 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6034 parent - the original process is debugged after a fork\n\
6035 child - the new process is debugged after a fork\n\
6036 The unfollowed process will continue to run.\n\
6037 By default, the debugger will follow the parent process."),
6038 NULL,
6039 show_follow_fork_mode_string,
6040 &setlist, &showlist);
6041
6042 add_setshow_enum_cmd ("scheduler-locking", class_run,
6043 scheduler_enums, &scheduler_mode, _("\
6044 Set mode for locking scheduler during execution."), _("\
6045 Show mode for locking scheduler during execution."), _("\
6046 off == no locking (threads may preempt at any time)\n\
6047 on == full locking (no thread except the current thread may run)\n\
6048 step == scheduler locked during every single-step operation.\n\
6049 In this mode, no other thread may run during a step command.\n\
6050 Other threads may run while stepping over a function call ('next')."),
6051 set_schedlock_func, /* traps on target vector */
6052 show_scheduler_mode,
6053 &setlist, &showlist);
6054
6055 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6056 Set mode for resuming threads of all processes."), _("\
6057 Show mode for resuming threads of all processes."), _("\
6058 When on, execution commands (such as 'continue' or 'next') resume all\n\
6059 threads of all processes. When off (which is the default), execution\n\
6060 commands only resume the threads of the current process. The set of\n\
6061 threads that are resumed is further refined by the scheduler-locking\n\
6062 mode (see help set scheduler-locking)."),
6063 NULL,
6064 show_schedule_multiple,
6065 &setlist, &showlist);
6066
6067 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6068 Set mode of the step operation."), _("\
6069 Show mode of the step operation."), _("\
6070 When set, doing a step over a function without debug line information\n\
6071 will stop at the first instruction of that function. Otherwise, the\n\
6072 function is skipped and the step command stops at a different source line."),
6073 NULL,
6074 show_step_stop_if_no_debug,
6075 &setlist, &showlist);
6076
6077 add_setshow_enum_cmd ("displaced-stepping", class_run,
6078 can_use_displaced_stepping_enum,
6079 &can_use_displaced_stepping, _("\
6080 Set debugger's willingness to use displaced stepping."), _("\
6081 Show debugger's willingness to use displaced stepping."), _("\
6082 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6083 supported by the target architecture. If off, gdb will not use displaced\n\
6084 stepping to step over breakpoints, even if such is supported by the target\n\
6085 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6086 if the target architecture supports it and non-stop mode is active, but will not\n\
6087 use it in all-stop mode (see help set non-stop)."),
6088 NULL,
6089 show_can_use_displaced_stepping,
6090 &setlist, &showlist);
6091
6092 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6093 &exec_direction, _("Set direction of execution.\n\
6094 Options are 'forward' or 'reverse'."),
6095 _("Show direction of execution (forward/reverse)."),
6096 _("Tells gdb whether to execute forward or backward."),
6097 set_exec_direction_func, show_exec_direction_func,
6098 &setlist, &showlist);
6099
6100 /* ptid initializations */
6101 null_ptid = ptid_build (0, 0, 0);
6102 minus_one_ptid = ptid_build (-1, 0, 0);
6103 inferior_ptid = null_ptid;
6104 target_last_wait_ptid = minus_one_ptid;
6105 displaced_step_ptid = null_ptid;
6106
6107 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6108 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6109 observer_attach_thread_exit (infrun_thread_thread_exit);
6110
6111 /* Explicitly create without lookup, since that tries to create a
6112 value with a void typed value, and when we get here, gdbarch
6113 isn't initialized yet. At this point, we're quite sure there
6114 isn't another convenience variable of the same name. */
6115 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6116 }
This page took 0.157087 seconds and 3 git commands to generate.