* gdbarch.sh (displaced_step_hw_singlestep): New callback.
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54
55 /* Prototypes for local functions */
56
57 static void signals_info (char *, int);
58
59 static void handle_command (char *, int);
60
61 static void sig_print_info (enum target_signal);
62
63 static void sig_print_header (void);
64
65 static void resume_cleanups (void *);
66
67 static int hook_stop_stub (void *);
68
69 static int restore_selected_frame (void *);
70
71 static void build_infrun (void);
72
73 static int follow_fork (void);
74
75 static void set_schedlock_func (char *args, int from_tty,
76 struct cmd_list_element *c);
77
78 static int currently_stepping (struct thread_info *tp);
79
80 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
81 void *data);
82
83 static void xdb_handle_command (char *args, int from_tty);
84
85 static int prepare_to_proceed (int);
86
87 void _initialize_infrun (void);
88
89 void nullify_last_target_wait_ptid (void);
90
91 /* When set, stop the 'step' command if we enter a function which has
92 no line number information. The normal behavior is that we step
93 over such function. */
94 int step_stop_if_no_debug = 0;
95 static void
96 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
97 struct cmd_list_element *c, const char *value)
98 {
99 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
100 }
101
102 /* In asynchronous mode, but simulating synchronous execution. */
103
104 int sync_execution = 0;
105
106 /* wait_for_inferior and normal_stop use this to notify the user
107 when the inferior stopped in a different thread than it had been
108 running in. */
109
110 static ptid_t previous_inferior_ptid;
111
112 int debug_displaced = 0;
113 static void
114 show_debug_displaced (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
118 }
119
120 static int debug_infrun = 0;
121 static void
122 show_debug_infrun (struct ui_file *file, int from_tty,
123 struct cmd_list_element *c, const char *value)
124 {
125 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
126 }
127
128 /* If the program uses ELF-style shared libraries, then calls to
129 functions in shared libraries go through stubs, which live in a
130 table called the PLT (Procedure Linkage Table). The first time the
131 function is called, the stub sends control to the dynamic linker,
132 which looks up the function's real address, patches the stub so
133 that future calls will go directly to the function, and then passes
134 control to the function.
135
136 If we are stepping at the source level, we don't want to see any of
137 this --- we just want to skip over the stub and the dynamic linker.
138 The simple approach is to single-step until control leaves the
139 dynamic linker.
140
141 However, on some systems (e.g., Red Hat's 5.2 distribution) the
142 dynamic linker calls functions in the shared C library, so you
143 can't tell from the PC alone whether the dynamic linker is still
144 running. In this case, we use a step-resume breakpoint to get us
145 past the dynamic linker, as if we were using "next" to step over a
146 function call.
147
148 in_solib_dynsym_resolve_code() says whether we're in the dynamic
149 linker code or not. Normally, this means we single-step. However,
150 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
151 address where we can place a step-resume breakpoint to get past the
152 linker's symbol resolution function.
153
154 in_solib_dynsym_resolve_code() can generally be implemented in a
155 pretty portable way, by comparing the PC against the address ranges
156 of the dynamic linker's sections.
157
158 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
159 it depends on internal details of the dynamic linker. It's usually
160 not too hard to figure out where to put a breakpoint, but it
161 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
162 sanity checking. If it can't figure things out, returning zero and
163 getting the (possibly confusing) stepping behavior is better than
164 signalling an error, which will obscure the change in the
165 inferior's state. */
166
167 /* This function returns TRUE if pc is the address of an instruction
168 that lies within the dynamic linker (such as the event hook, or the
169 dld itself).
170
171 This function must be used only when a dynamic linker event has
172 been caught, and the inferior is being stepped out of the hook, or
173 undefined results are guaranteed. */
174
175 #ifndef SOLIB_IN_DYNAMIC_LINKER
176 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
177 #endif
178
179
180 /* Convert the #defines into values. This is temporary until wfi control
181 flow is completely sorted out. */
182
183 #ifndef CANNOT_STEP_HW_WATCHPOINTS
184 #define CANNOT_STEP_HW_WATCHPOINTS 0
185 #else
186 #undef CANNOT_STEP_HW_WATCHPOINTS
187 #define CANNOT_STEP_HW_WATCHPOINTS 1
188 #endif
189
190 /* Tables of how to react to signals; the user sets them. */
191
192 static unsigned char *signal_stop;
193 static unsigned char *signal_print;
194 static unsigned char *signal_program;
195
196 #define SET_SIGS(nsigs,sigs,flags) \
197 do { \
198 int signum = (nsigs); \
199 while (signum-- > 0) \
200 if ((sigs)[signum]) \
201 (flags)[signum] = 1; \
202 } while (0)
203
204 #define UNSET_SIGS(nsigs,sigs,flags) \
205 do { \
206 int signum = (nsigs); \
207 while (signum-- > 0) \
208 if ((sigs)[signum]) \
209 (flags)[signum] = 0; \
210 } while (0)
211
212 /* Value to pass to target_resume() to cause all threads to resume */
213
214 #define RESUME_ALL minus_one_ptid
215
216 /* Command list pointer for the "stop" placeholder. */
217
218 static struct cmd_list_element *stop_command;
219
220 /* Function inferior was in as of last step command. */
221
222 static struct symbol *step_start_function;
223
224 /* Nonzero if we want to give control to the user when we're notified
225 of shared library events by the dynamic linker. */
226 static int stop_on_solib_events;
227 static void
228 show_stop_on_solib_events (struct ui_file *file, int from_tty,
229 struct cmd_list_element *c, const char *value)
230 {
231 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
232 value);
233 }
234
235 /* Nonzero means expecting a trace trap
236 and should stop the inferior and return silently when it happens. */
237
238 int stop_after_trap;
239
240 /* Save register contents here when executing a "finish" command or are
241 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
242 Thus this contains the return value from the called function (assuming
243 values are returned in a register). */
244
245 struct regcache *stop_registers;
246
247 /* Nonzero after stop if current stack frame should be printed. */
248
249 static int stop_print_frame;
250
251 /* This is a cached copy of the pid/waitstatus of the last event
252 returned by target_wait()/deprecated_target_wait_hook(). This
253 information is returned by get_last_target_status(). */
254 static ptid_t target_last_wait_ptid;
255 static struct target_waitstatus target_last_waitstatus;
256
257 static void context_switch (ptid_t ptid);
258
259 void init_thread_stepping_state (struct thread_info *tss);
260
261 void init_infwait_state (void);
262
263 static const char follow_fork_mode_child[] = "child";
264 static const char follow_fork_mode_parent[] = "parent";
265
266 static const char *follow_fork_mode_kind_names[] = {
267 follow_fork_mode_child,
268 follow_fork_mode_parent,
269 NULL
270 };
271
272 static const char *follow_fork_mode_string = follow_fork_mode_parent;
273 static void
274 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
275 struct cmd_list_element *c, const char *value)
276 {
277 fprintf_filtered (file, _("\
278 Debugger response to a program call of fork or vfork is \"%s\".\n"),
279 value);
280 }
281 \f
282
283 /* Tell the target to follow the fork we're stopped at. Returns true
284 if the inferior should be resumed; false, if the target for some
285 reason decided it's best not to resume. */
286
287 static int
288 follow_fork (void)
289 {
290 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
291 int should_resume = 1;
292 struct thread_info *tp;
293
294 /* Copy user stepping state to the new inferior thread. FIXME: the
295 followed fork child thread should have a copy of most of the
296 parent thread structure's run control related fields, not just these.
297 Initialized to avoid "may be used uninitialized" warnings from gcc. */
298 struct breakpoint *step_resume_breakpoint = NULL;
299 CORE_ADDR step_range_start = 0;
300 CORE_ADDR step_range_end = 0;
301 struct frame_id step_frame_id = { 0 };
302
303 if (!non_stop)
304 {
305 ptid_t wait_ptid;
306 struct target_waitstatus wait_status;
307
308 /* Get the last target status returned by target_wait(). */
309 get_last_target_status (&wait_ptid, &wait_status);
310
311 /* If not stopped at a fork event, then there's nothing else to
312 do. */
313 if (wait_status.kind != TARGET_WAITKIND_FORKED
314 && wait_status.kind != TARGET_WAITKIND_VFORKED)
315 return 1;
316
317 /* Check if we switched over from WAIT_PTID, since the event was
318 reported. */
319 if (!ptid_equal (wait_ptid, minus_one_ptid)
320 && !ptid_equal (inferior_ptid, wait_ptid))
321 {
322 /* We did. Switch back to WAIT_PTID thread, to tell the
323 target to follow it (in either direction). We'll
324 afterwards refuse to resume, and inform the user what
325 happened. */
326 switch_to_thread (wait_ptid);
327 should_resume = 0;
328 }
329 }
330
331 tp = inferior_thread ();
332
333 /* If there were any forks/vforks that were caught and are now to be
334 followed, then do so now. */
335 switch (tp->pending_follow.kind)
336 {
337 case TARGET_WAITKIND_FORKED:
338 case TARGET_WAITKIND_VFORKED:
339 {
340 ptid_t parent, child;
341
342 /* If the user did a next/step, etc, over a fork call,
343 preserve the stepping state in the fork child. */
344 if (follow_child && should_resume)
345 {
346 step_resume_breakpoint
347 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
348 step_range_start = tp->step_range_start;
349 step_range_end = tp->step_range_end;
350 step_frame_id = tp->step_frame_id;
351
352 /* For now, delete the parent's sr breakpoint, otherwise,
353 parent/child sr breakpoints are considered duplicates,
354 and the child version will not be installed. Remove
355 this when the breakpoints module becomes aware of
356 inferiors and address spaces. */
357 delete_step_resume_breakpoint (tp);
358 tp->step_range_start = 0;
359 tp->step_range_end = 0;
360 tp->step_frame_id = null_frame_id;
361 }
362
363 parent = inferior_ptid;
364 child = tp->pending_follow.value.related_pid;
365
366 /* Tell the target to do whatever is necessary to follow
367 either parent or child. */
368 if (target_follow_fork (follow_child))
369 {
370 /* Target refused to follow, or there's some other reason
371 we shouldn't resume. */
372 should_resume = 0;
373 }
374 else
375 {
376 /* This pending follow fork event is now handled, one way
377 or another. The previous selected thread may be gone
378 from the lists by now, but if it is still around, need
379 to clear the pending follow request. */
380 tp = find_thread_ptid (parent);
381 if (tp)
382 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
383
384 /* This makes sure we don't try to apply the "Switched
385 over from WAIT_PID" logic above. */
386 nullify_last_target_wait_ptid ();
387
388 /* If we followed the child, switch to it... */
389 if (follow_child)
390 {
391 switch_to_thread (child);
392
393 /* ... and preserve the stepping state, in case the
394 user was stepping over the fork call. */
395 if (should_resume)
396 {
397 tp = inferior_thread ();
398 tp->step_resume_breakpoint = step_resume_breakpoint;
399 tp->step_range_start = step_range_start;
400 tp->step_range_end = step_range_end;
401 tp->step_frame_id = step_frame_id;
402 }
403 else
404 {
405 /* If we get here, it was because we're trying to
406 resume from a fork catchpoint, but, the user
407 has switched threads away from the thread that
408 forked. In that case, the resume command
409 issued is most likely not applicable to the
410 child, so just warn, and refuse to resume. */
411 warning (_("\
412 Not resuming: switched threads before following fork child.\n"));
413 }
414
415 /* Reset breakpoints in the child as appropriate. */
416 follow_inferior_reset_breakpoints ();
417 }
418 else
419 switch_to_thread (parent);
420 }
421 }
422 break;
423 case TARGET_WAITKIND_SPURIOUS:
424 /* Nothing to follow. */
425 break;
426 default:
427 internal_error (__FILE__, __LINE__,
428 "Unexpected pending_follow.kind %d\n",
429 tp->pending_follow.kind);
430 break;
431 }
432
433 return should_resume;
434 }
435
436 void
437 follow_inferior_reset_breakpoints (void)
438 {
439 struct thread_info *tp = inferior_thread ();
440
441 /* Was there a step_resume breakpoint? (There was if the user
442 did a "next" at the fork() call.) If so, explicitly reset its
443 thread number.
444
445 step_resumes are a form of bp that are made to be per-thread.
446 Since we created the step_resume bp when the parent process
447 was being debugged, and now are switching to the child process,
448 from the breakpoint package's viewpoint, that's a switch of
449 "threads". We must update the bp's notion of which thread
450 it is for, or it'll be ignored when it triggers. */
451
452 if (tp->step_resume_breakpoint)
453 breakpoint_re_set_thread (tp->step_resume_breakpoint);
454
455 /* Reinsert all breakpoints in the child. The user may have set
456 breakpoints after catching the fork, in which case those
457 were never set in the child, but only in the parent. This makes
458 sure the inserted breakpoints match the breakpoint list. */
459
460 breakpoint_re_set ();
461 insert_breakpoints ();
462 }
463
464 /* EXECD_PATHNAME is assumed to be non-NULL. */
465
466 static void
467 follow_exec (ptid_t pid, char *execd_pathname)
468 {
469 struct target_ops *tgt;
470 struct thread_info *th = inferior_thread ();
471
472 /* This is an exec event that we actually wish to pay attention to.
473 Refresh our symbol table to the newly exec'd program, remove any
474 momentary bp's, etc.
475
476 If there are breakpoints, they aren't really inserted now,
477 since the exec() transformed our inferior into a fresh set
478 of instructions.
479
480 We want to preserve symbolic breakpoints on the list, since
481 we have hopes that they can be reset after the new a.out's
482 symbol table is read.
483
484 However, any "raw" breakpoints must be removed from the list
485 (e.g., the solib bp's), since their address is probably invalid
486 now.
487
488 And, we DON'T want to call delete_breakpoints() here, since
489 that may write the bp's "shadow contents" (the instruction
490 value that was overwritten witha TRAP instruction). Since
491 we now have a new a.out, those shadow contents aren't valid. */
492 update_breakpoints_after_exec ();
493
494 /* If there was one, it's gone now. We cannot truly step-to-next
495 statement through an exec(). */
496 th->step_resume_breakpoint = NULL;
497 th->step_range_start = 0;
498 th->step_range_end = 0;
499
500 /* The target reports the exec event to the main thread, even if
501 some other thread does the exec, and even if the main thread was
502 already stopped --- if debugging in non-stop mode, it's possible
503 the user had the main thread held stopped in the previous image
504 --- release it now. This is the same behavior as step-over-exec
505 with scheduler-locking on in all-stop mode. */
506 th->stop_requested = 0;
507
508 /* What is this a.out's name? */
509 printf_unfiltered (_("Executing new program: %s\n"), execd_pathname);
510
511 /* We've followed the inferior through an exec. Therefore, the
512 inferior has essentially been killed & reborn. */
513
514 gdb_flush (gdb_stdout);
515
516 breakpoint_init_inferior (inf_execd);
517
518 if (gdb_sysroot && *gdb_sysroot)
519 {
520 char *name = alloca (strlen (gdb_sysroot)
521 + strlen (execd_pathname)
522 + 1);
523 strcpy (name, gdb_sysroot);
524 strcat (name, execd_pathname);
525 execd_pathname = name;
526 }
527
528 /* That a.out is now the one to use. */
529 exec_file_attach (execd_pathname, 0);
530
531 /* Reset the shared library package. This ensures that we get a
532 shlib event when the child reaches "_start", at which point the
533 dld will have had a chance to initialize the child. */
534 /* Also, loading a symbol file below may trigger symbol lookups, and
535 we don't want those to be satisfied by the libraries of the
536 previous incarnation of this process. */
537 no_shared_libraries (NULL, 0);
538
539 /* Load the main file's symbols. */
540 symbol_file_add_main (execd_pathname, 0);
541
542 #ifdef SOLIB_CREATE_INFERIOR_HOOK
543 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
544 #else
545 solib_create_inferior_hook ();
546 #endif
547
548 jit_inferior_created_hook ();
549
550 /* Reinsert all breakpoints. (Those which were symbolic have
551 been reset to the proper address in the new a.out, thanks
552 to symbol_file_command...) */
553 insert_breakpoints ();
554
555 /* The next resume of this inferior should bring it to the shlib
556 startup breakpoints. (If the user had also set bp's on
557 "main" from the old (parent) process, then they'll auto-
558 matically get reset there in the new process.) */
559 }
560
561 /* Non-zero if we just simulating a single-step. This is needed
562 because we cannot remove the breakpoints in the inferior process
563 until after the `wait' in `wait_for_inferior'. */
564 static int singlestep_breakpoints_inserted_p = 0;
565
566 /* The thread we inserted single-step breakpoints for. */
567 static ptid_t singlestep_ptid;
568
569 /* PC when we started this single-step. */
570 static CORE_ADDR singlestep_pc;
571
572 /* If another thread hit the singlestep breakpoint, we save the original
573 thread here so that we can resume single-stepping it later. */
574 static ptid_t saved_singlestep_ptid;
575 static int stepping_past_singlestep_breakpoint;
576
577 /* If not equal to null_ptid, this means that after stepping over breakpoint
578 is finished, we need to switch to deferred_step_ptid, and step it.
579
580 The use case is when one thread has hit a breakpoint, and then the user
581 has switched to another thread and issued 'step'. We need to step over
582 breakpoint in the thread which hit the breakpoint, but then continue
583 stepping the thread user has selected. */
584 static ptid_t deferred_step_ptid;
585 \f
586 /* Displaced stepping. */
587
588 /* In non-stop debugging mode, we must take special care to manage
589 breakpoints properly; in particular, the traditional strategy for
590 stepping a thread past a breakpoint it has hit is unsuitable.
591 'Displaced stepping' is a tactic for stepping one thread past a
592 breakpoint it has hit while ensuring that other threads running
593 concurrently will hit the breakpoint as they should.
594
595 The traditional way to step a thread T off a breakpoint in a
596 multi-threaded program in all-stop mode is as follows:
597
598 a0) Initially, all threads are stopped, and breakpoints are not
599 inserted.
600 a1) We single-step T, leaving breakpoints uninserted.
601 a2) We insert breakpoints, and resume all threads.
602
603 In non-stop debugging, however, this strategy is unsuitable: we
604 don't want to have to stop all threads in the system in order to
605 continue or step T past a breakpoint. Instead, we use displaced
606 stepping:
607
608 n0) Initially, T is stopped, other threads are running, and
609 breakpoints are inserted.
610 n1) We copy the instruction "under" the breakpoint to a separate
611 location, outside the main code stream, making any adjustments
612 to the instruction, register, and memory state as directed by
613 T's architecture.
614 n2) We single-step T over the instruction at its new location.
615 n3) We adjust the resulting register and memory state as directed
616 by T's architecture. This includes resetting T's PC to point
617 back into the main instruction stream.
618 n4) We resume T.
619
620 This approach depends on the following gdbarch methods:
621
622 - gdbarch_max_insn_length and gdbarch_displaced_step_location
623 indicate where to copy the instruction, and how much space must
624 be reserved there. We use these in step n1.
625
626 - gdbarch_displaced_step_copy_insn copies a instruction to a new
627 address, and makes any necessary adjustments to the instruction,
628 register contents, and memory. We use this in step n1.
629
630 - gdbarch_displaced_step_fixup adjusts registers and memory after
631 we have successfuly single-stepped the instruction, to yield the
632 same effect the instruction would have had if we had executed it
633 at its original address. We use this in step n3.
634
635 - gdbarch_displaced_step_free_closure provides cleanup.
636
637 The gdbarch_displaced_step_copy_insn and
638 gdbarch_displaced_step_fixup functions must be written so that
639 copying an instruction with gdbarch_displaced_step_copy_insn,
640 single-stepping across the copied instruction, and then applying
641 gdbarch_displaced_insn_fixup should have the same effects on the
642 thread's memory and registers as stepping the instruction in place
643 would have. Exactly which responsibilities fall to the copy and
644 which fall to the fixup is up to the author of those functions.
645
646 See the comments in gdbarch.sh for details.
647
648 Note that displaced stepping and software single-step cannot
649 currently be used in combination, although with some care I think
650 they could be made to. Software single-step works by placing
651 breakpoints on all possible subsequent instructions; if the
652 displaced instruction is a PC-relative jump, those breakpoints
653 could fall in very strange places --- on pages that aren't
654 executable, or at addresses that are not proper instruction
655 boundaries. (We do generally let other threads run while we wait
656 to hit the software single-step breakpoint, and they might
657 encounter such a corrupted instruction.) One way to work around
658 this would be to have gdbarch_displaced_step_copy_insn fully
659 simulate the effect of PC-relative instructions (and return NULL)
660 on architectures that use software single-stepping.
661
662 In non-stop mode, we can have independent and simultaneous step
663 requests, so more than one thread may need to simultaneously step
664 over a breakpoint. The current implementation assumes there is
665 only one scratch space per process. In this case, we have to
666 serialize access to the scratch space. If thread A wants to step
667 over a breakpoint, but we are currently waiting for some other
668 thread to complete a displaced step, we leave thread A stopped and
669 place it in the displaced_step_request_queue. Whenever a displaced
670 step finishes, we pick the next thread in the queue and start a new
671 displaced step operation on it. See displaced_step_prepare and
672 displaced_step_fixup for details. */
673
674 /* If this is not null_ptid, this is the thread carrying out a
675 displaced single-step. This thread's state will require fixing up
676 once it has completed its step. */
677 static ptid_t displaced_step_ptid;
678
679 struct displaced_step_request
680 {
681 ptid_t ptid;
682 struct displaced_step_request *next;
683 };
684
685 /* A queue of pending displaced stepping requests. */
686 struct displaced_step_request *displaced_step_request_queue;
687
688 /* The architecture the thread had when we stepped it. */
689 static struct gdbarch *displaced_step_gdbarch;
690
691 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
692 for post-step cleanup. */
693 static struct displaced_step_closure *displaced_step_closure;
694
695 /* The address of the original instruction, and the copy we made. */
696 static CORE_ADDR displaced_step_original, displaced_step_copy;
697
698 /* Saved contents of copy area. */
699 static gdb_byte *displaced_step_saved_copy;
700
701 /* Enum strings for "set|show displaced-stepping". */
702
703 static const char can_use_displaced_stepping_auto[] = "auto";
704 static const char can_use_displaced_stepping_on[] = "on";
705 static const char can_use_displaced_stepping_off[] = "off";
706 static const char *can_use_displaced_stepping_enum[] =
707 {
708 can_use_displaced_stepping_auto,
709 can_use_displaced_stepping_on,
710 can_use_displaced_stepping_off,
711 NULL,
712 };
713
714 /* If ON, and the architecture supports it, GDB will use displaced
715 stepping to step over breakpoints. If OFF, or if the architecture
716 doesn't support it, GDB will instead use the traditional
717 hold-and-step approach. If AUTO (which is the default), GDB will
718 decide which technique to use to step over breakpoints depending on
719 which of all-stop or non-stop mode is active --- displaced stepping
720 in non-stop mode; hold-and-step in all-stop mode. */
721
722 static const char *can_use_displaced_stepping =
723 can_use_displaced_stepping_auto;
724
725 static void
726 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
727 struct cmd_list_element *c,
728 const char *value)
729 {
730 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
731 fprintf_filtered (file, _("\
732 Debugger's willingness to use displaced stepping to step over \
733 breakpoints is %s (currently %s).\n"),
734 value, non_stop ? "on" : "off");
735 else
736 fprintf_filtered (file, _("\
737 Debugger's willingness to use displaced stepping to step over \
738 breakpoints is %s.\n"), value);
739 }
740
741 /* Return non-zero if displaced stepping can/should be used to step
742 over breakpoints. */
743
744 static int
745 use_displaced_stepping (struct gdbarch *gdbarch)
746 {
747 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
748 && non_stop)
749 || can_use_displaced_stepping == can_use_displaced_stepping_on)
750 && gdbarch_displaced_step_copy_insn_p (gdbarch)
751 && !RECORD_IS_USED);
752 }
753
754 /* Clean out any stray displaced stepping state. */
755 static void
756 displaced_step_clear (void)
757 {
758 /* Indicate that there is no cleanup pending. */
759 displaced_step_ptid = null_ptid;
760
761 if (displaced_step_closure)
762 {
763 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
764 displaced_step_closure);
765 displaced_step_closure = NULL;
766 }
767 }
768
769 static void
770 displaced_step_clear_cleanup (void *ignore)
771 {
772 displaced_step_clear ();
773 }
774
775 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
776 void
777 displaced_step_dump_bytes (struct ui_file *file,
778 const gdb_byte *buf,
779 size_t len)
780 {
781 int i;
782
783 for (i = 0; i < len; i++)
784 fprintf_unfiltered (file, "%02x ", buf[i]);
785 fputs_unfiltered ("\n", file);
786 }
787
788 /* Prepare to single-step, using displaced stepping.
789
790 Note that we cannot use displaced stepping when we have a signal to
791 deliver. If we have a signal to deliver and an instruction to step
792 over, then after the step, there will be no indication from the
793 target whether the thread entered a signal handler or ignored the
794 signal and stepped over the instruction successfully --- both cases
795 result in a simple SIGTRAP. In the first case we mustn't do a
796 fixup, and in the second case we must --- but we can't tell which.
797 Comments in the code for 'random signals' in handle_inferior_event
798 explain how we handle this case instead.
799
800 Returns 1 if preparing was successful -- this thread is going to be
801 stepped now; or 0 if displaced stepping this thread got queued. */
802 static int
803 displaced_step_prepare (ptid_t ptid)
804 {
805 struct cleanup *old_cleanups, *ignore_cleanups;
806 struct regcache *regcache = get_thread_regcache (ptid);
807 struct gdbarch *gdbarch = get_regcache_arch (regcache);
808 CORE_ADDR original, copy;
809 ULONGEST len;
810 struct displaced_step_closure *closure;
811
812 /* We should never reach this function if the architecture does not
813 support displaced stepping. */
814 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
815
816 /* For the first cut, we're displaced stepping one thread at a
817 time. */
818
819 if (!ptid_equal (displaced_step_ptid, null_ptid))
820 {
821 /* Already waiting for a displaced step to finish. Defer this
822 request and place in queue. */
823 struct displaced_step_request *req, *new_req;
824
825 if (debug_displaced)
826 fprintf_unfiltered (gdb_stdlog,
827 "displaced: defering step of %s\n",
828 target_pid_to_str (ptid));
829
830 new_req = xmalloc (sizeof (*new_req));
831 new_req->ptid = ptid;
832 new_req->next = NULL;
833
834 if (displaced_step_request_queue)
835 {
836 for (req = displaced_step_request_queue;
837 req && req->next;
838 req = req->next)
839 ;
840 req->next = new_req;
841 }
842 else
843 displaced_step_request_queue = new_req;
844
845 return 0;
846 }
847 else
848 {
849 if (debug_displaced)
850 fprintf_unfiltered (gdb_stdlog,
851 "displaced: stepping %s now\n",
852 target_pid_to_str (ptid));
853 }
854
855 displaced_step_clear ();
856
857 old_cleanups = save_inferior_ptid ();
858 inferior_ptid = ptid;
859
860 original = regcache_read_pc (regcache);
861
862 copy = gdbarch_displaced_step_location (gdbarch);
863 len = gdbarch_max_insn_length (gdbarch);
864
865 /* Save the original contents of the copy area. */
866 displaced_step_saved_copy = xmalloc (len);
867 ignore_cleanups = make_cleanup (free_current_contents,
868 &displaced_step_saved_copy);
869 read_memory (copy, displaced_step_saved_copy, len);
870 if (debug_displaced)
871 {
872 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
873 paddress (gdbarch, copy));
874 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
875 };
876
877 closure = gdbarch_displaced_step_copy_insn (gdbarch,
878 original, copy, regcache);
879
880 /* We don't support the fully-simulated case at present. */
881 gdb_assert (closure);
882
883 /* Save the information we need to fix things up if the step
884 succeeds. */
885 displaced_step_ptid = ptid;
886 displaced_step_gdbarch = gdbarch;
887 displaced_step_closure = closure;
888 displaced_step_original = original;
889 displaced_step_copy = copy;
890
891 make_cleanup (displaced_step_clear_cleanup, 0);
892
893 /* Resume execution at the copy. */
894 regcache_write_pc (regcache, copy);
895
896 discard_cleanups (ignore_cleanups);
897
898 do_cleanups (old_cleanups);
899
900 if (debug_displaced)
901 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
902 paddress (gdbarch, copy));
903
904 return 1;
905 }
906
907 static void
908 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
909 {
910 struct cleanup *ptid_cleanup = save_inferior_ptid ();
911 inferior_ptid = ptid;
912 write_memory (memaddr, myaddr, len);
913 do_cleanups (ptid_cleanup);
914 }
915
916 static void
917 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
918 {
919 struct cleanup *old_cleanups;
920
921 /* Was this event for the pid we displaced? */
922 if (ptid_equal (displaced_step_ptid, null_ptid)
923 || ! ptid_equal (displaced_step_ptid, event_ptid))
924 return;
925
926 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
927
928 /* Restore the contents of the copy area. */
929 {
930 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
931 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
932 displaced_step_saved_copy, len);
933 if (debug_displaced)
934 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
935 paddress (displaced_step_gdbarch,
936 displaced_step_copy));
937 }
938
939 /* Did the instruction complete successfully? */
940 if (signal == TARGET_SIGNAL_TRAP)
941 {
942 /* Fix up the resulting state. */
943 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
944 displaced_step_closure,
945 displaced_step_original,
946 displaced_step_copy,
947 get_thread_regcache (displaced_step_ptid));
948 }
949 else
950 {
951 /* Since the instruction didn't complete, all we can do is
952 relocate the PC. */
953 struct regcache *regcache = get_thread_regcache (event_ptid);
954 CORE_ADDR pc = regcache_read_pc (regcache);
955 pc = displaced_step_original + (pc - displaced_step_copy);
956 regcache_write_pc (regcache, pc);
957 }
958
959 do_cleanups (old_cleanups);
960
961 displaced_step_ptid = null_ptid;
962
963 /* Are there any pending displaced stepping requests? If so, run
964 one now. */
965 while (displaced_step_request_queue)
966 {
967 struct displaced_step_request *head;
968 ptid_t ptid;
969 struct regcache *regcache;
970 struct gdbarch *gdbarch;
971 CORE_ADDR actual_pc;
972
973 head = displaced_step_request_queue;
974 ptid = head->ptid;
975 displaced_step_request_queue = head->next;
976 xfree (head);
977
978 context_switch (ptid);
979
980 regcache = get_thread_regcache (ptid);
981 actual_pc = regcache_read_pc (regcache);
982
983 if (breakpoint_here_p (actual_pc))
984 {
985 if (debug_displaced)
986 fprintf_unfiltered (gdb_stdlog,
987 "displaced: stepping queued %s now\n",
988 target_pid_to_str (ptid));
989
990 displaced_step_prepare (ptid);
991
992 gdbarch = get_regcache_arch (regcache);
993
994 if (debug_displaced)
995 {
996 CORE_ADDR actual_pc = regcache_read_pc (regcache);
997 gdb_byte buf[4];
998
999 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1000 paddress (gdbarch, actual_pc));
1001 read_memory (actual_pc, buf, sizeof (buf));
1002 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1003 }
1004
1005 if (gdbarch_displaced_step_hw_singlestep
1006 (gdbarch, displaced_step_closure))
1007 target_resume (ptid, 1, TARGET_SIGNAL_0);
1008 else
1009 target_resume (ptid, 0, TARGET_SIGNAL_0);
1010
1011 /* Done, we're stepping a thread. */
1012 break;
1013 }
1014 else
1015 {
1016 int step;
1017 struct thread_info *tp = inferior_thread ();
1018
1019 /* The breakpoint we were sitting under has since been
1020 removed. */
1021 tp->trap_expected = 0;
1022
1023 /* Go back to what we were trying to do. */
1024 step = currently_stepping (tp);
1025
1026 if (debug_displaced)
1027 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1028 target_pid_to_str (tp->ptid), step);
1029
1030 target_resume (ptid, step, TARGET_SIGNAL_0);
1031 tp->stop_signal = TARGET_SIGNAL_0;
1032
1033 /* This request was discarded. See if there's any other
1034 thread waiting for its turn. */
1035 }
1036 }
1037 }
1038
1039 /* Update global variables holding ptids to hold NEW_PTID if they were
1040 holding OLD_PTID. */
1041 static void
1042 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1043 {
1044 struct displaced_step_request *it;
1045
1046 if (ptid_equal (inferior_ptid, old_ptid))
1047 inferior_ptid = new_ptid;
1048
1049 if (ptid_equal (singlestep_ptid, old_ptid))
1050 singlestep_ptid = new_ptid;
1051
1052 if (ptid_equal (displaced_step_ptid, old_ptid))
1053 displaced_step_ptid = new_ptid;
1054
1055 if (ptid_equal (deferred_step_ptid, old_ptid))
1056 deferred_step_ptid = new_ptid;
1057
1058 for (it = displaced_step_request_queue; it; it = it->next)
1059 if (ptid_equal (it->ptid, old_ptid))
1060 it->ptid = new_ptid;
1061 }
1062
1063 \f
1064 /* Resuming. */
1065
1066 /* Things to clean up if we QUIT out of resume (). */
1067 static void
1068 resume_cleanups (void *ignore)
1069 {
1070 normal_stop ();
1071 }
1072
1073 static const char schedlock_off[] = "off";
1074 static const char schedlock_on[] = "on";
1075 static const char schedlock_step[] = "step";
1076 static const char *scheduler_enums[] = {
1077 schedlock_off,
1078 schedlock_on,
1079 schedlock_step,
1080 NULL
1081 };
1082 static const char *scheduler_mode = schedlock_off;
1083 static void
1084 show_scheduler_mode (struct ui_file *file, int from_tty,
1085 struct cmd_list_element *c, const char *value)
1086 {
1087 fprintf_filtered (file, _("\
1088 Mode for locking scheduler during execution is \"%s\".\n"),
1089 value);
1090 }
1091
1092 static void
1093 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1094 {
1095 if (!target_can_lock_scheduler)
1096 {
1097 scheduler_mode = schedlock_off;
1098 error (_("Target '%s' cannot support this command."), target_shortname);
1099 }
1100 }
1101
1102 /* True if execution commands resume all threads of all processes by
1103 default; otherwise, resume only threads of the current inferior
1104 process. */
1105 int sched_multi = 0;
1106
1107 /* Try to setup for software single stepping over the specified location.
1108 Return 1 if target_resume() should use hardware single step.
1109
1110 GDBARCH the current gdbarch.
1111 PC the location to step over. */
1112
1113 static int
1114 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1115 {
1116 int hw_step = 1;
1117
1118 if (gdbarch_software_single_step_p (gdbarch)
1119 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1120 {
1121 hw_step = 0;
1122 /* Do not pull these breakpoints until after a `wait' in
1123 `wait_for_inferior' */
1124 singlestep_breakpoints_inserted_p = 1;
1125 singlestep_ptid = inferior_ptid;
1126 singlestep_pc = pc;
1127 }
1128 return hw_step;
1129 }
1130
1131 /* Resume the inferior, but allow a QUIT. This is useful if the user
1132 wants to interrupt some lengthy single-stepping operation
1133 (for child processes, the SIGINT goes to the inferior, and so
1134 we get a SIGINT random_signal, but for remote debugging and perhaps
1135 other targets, that's not true).
1136
1137 STEP nonzero if we should step (zero to continue instead).
1138 SIG is the signal to give the inferior (zero for none). */
1139 void
1140 resume (int step, enum target_signal sig)
1141 {
1142 int should_resume = 1;
1143 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1144 struct regcache *regcache = get_current_regcache ();
1145 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1146 struct thread_info *tp = inferior_thread ();
1147 CORE_ADDR pc = regcache_read_pc (regcache);
1148
1149 QUIT;
1150
1151 if (debug_infrun)
1152 fprintf_unfiltered (gdb_stdlog,
1153 "infrun: resume (step=%d, signal=%d), "
1154 "trap_expected=%d\n",
1155 step, sig, tp->trap_expected);
1156
1157 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1158 over an instruction that causes a page fault without triggering
1159 a hardware watchpoint. The kernel properly notices that it shouldn't
1160 stop, because the hardware watchpoint is not triggered, but it forgets
1161 the step request and continues the program normally.
1162 Work around the problem by removing hardware watchpoints if a step is
1163 requested, GDB will check for a hardware watchpoint trigger after the
1164 step anyway. */
1165 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1166 remove_hw_watchpoints ();
1167
1168
1169 /* Normally, by the time we reach `resume', the breakpoints are either
1170 removed or inserted, as appropriate. The exception is if we're sitting
1171 at a permanent breakpoint; we need to step over it, but permanent
1172 breakpoints can't be removed. So we have to test for it here. */
1173 if (breakpoint_here_p (pc) == permanent_breakpoint_here)
1174 {
1175 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1176 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1177 else
1178 error (_("\
1179 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1180 how to step past a permanent breakpoint on this architecture. Try using\n\
1181 a command like `return' or `jump' to continue execution."));
1182 }
1183
1184 /* If enabled, step over breakpoints by executing a copy of the
1185 instruction at a different address.
1186
1187 We can't use displaced stepping when we have a signal to deliver;
1188 the comments for displaced_step_prepare explain why. The
1189 comments in the handle_inferior event for dealing with 'random
1190 signals' explain what we do instead. */
1191 if (use_displaced_stepping (gdbarch)
1192 && (tp->trap_expected
1193 || (step && gdbarch_software_single_step_p (gdbarch)))
1194 && sig == TARGET_SIGNAL_0)
1195 {
1196 if (!displaced_step_prepare (inferior_ptid))
1197 {
1198 /* Got placed in displaced stepping queue. Will be resumed
1199 later when all the currently queued displaced stepping
1200 requests finish. The thread is not executing at this point,
1201 and the call to set_executing will be made later. But we
1202 need to call set_running here, since from frontend point of view,
1203 the thread is running. */
1204 set_running (inferior_ptid, 1);
1205 discard_cleanups (old_cleanups);
1206 return;
1207 }
1208
1209 step = gdbarch_displaced_step_hw_singlestep
1210 (gdbarch, displaced_step_closure);
1211 }
1212
1213 /* Do we need to do it the hard way, w/temp breakpoints? */
1214 else if (step)
1215 step = maybe_software_singlestep (gdbarch, pc);
1216
1217 if (should_resume)
1218 {
1219 ptid_t resume_ptid;
1220
1221 /* If STEP is set, it's a request to use hardware stepping
1222 facilities. But in that case, we should never
1223 use singlestep breakpoint. */
1224 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1225
1226 /* Decide the set of threads to ask the target to resume. Start
1227 by assuming everything will be resumed, than narrow the set
1228 by applying increasingly restricting conditions. */
1229
1230 /* By default, resume all threads of all processes. */
1231 resume_ptid = RESUME_ALL;
1232
1233 /* Maybe resume only all threads of the current process. */
1234 if (!sched_multi && target_supports_multi_process ())
1235 {
1236 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1237 }
1238
1239 /* Maybe resume a single thread after all. */
1240 if (singlestep_breakpoints_inserted_p
1241 && stepping_past_singlestep_breakpoint)
1242 {
1243 /* The situation here is as follows. In thread T1 we wanted to
1244 single-step. Lacking hardware single-stepping we've
1245 set breakpoint at the PC of the next instruction -- call it
1246 P. After resuming, we've hit that breakpoint in thread T2.
1247 Now we've removed original breakpoint, inserted breakpoint
1248 at P+1, and try to step to advance T2 past breakpoint.
1249 We need to step only T2, as if T1 is allowed to freely run,
1250 it can run past P, and if other threads are allowed to run,
1251 they can hit breakpoint at P+1, and nested hits of single-step
1252 breakpoints is not something we'd want -- that's complicated
1253 to support, and has no value. */
1254 resume_ptid = inferior_ptid;
1255 }
1256 else if ((step || singlestep_breakpoints_inserted_p)
1257 && tp->trap_expected)
1258 {
1259 /* We're allowing a thread to run past a breakpoint it has
1260 hit, by single-stepping the thread with the breakpoint
1261 removed. In which case, we need to single-step only this
1262 thread, and keep others stopped, as they can miss this
1263 breakpoint if allowed to run.
1264
1265 The current code actually removes all breakpoints when
1266 doing this, not just the one being stepped over, so if we
1267 let other threads run, we can actually miss any
1268 breakpoint, not just the one at PC. */
1269 resume_ptid = inferior_ptid;
1270 }
1271 else if (non_stop)
1272 {
1273 /* With non-stop mode on, threads are always handled
1274 individually. */
1275 resume_ptid = inferior_ptid;
1276 }
1277 else if ((scheduler_mode == schedlock_on)
1278 || (scheduler_mode == schedlock_step
1279 && (step || singlestep_breakpoints_inserted_p)))
1280 {
1281 /* User-settable 'scheduler' mode requires solo thread resume. */
1282 resume_ptid = inferior_ptid;
1283 }
1284
1285 if (gdbarch_cannot_step_breakpoint (gdbarch))
1286 {
1287 /* Most targets can step a breakpoint instruction, thus
1288 executing it normally. But if this one cannot, just
1289 continue and we will hit it anyway. */
1290 if (step && breakpoint_inserted_here_p (pc))
1291 step = 0;
1292 }
1293
1294 if (debug_displaced
1295 && use_displaced_stepping (gdbarch)
1296 && tp->trap_expected)
1297 {
1298 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1299 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1300 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1301 gdb_byte buf[4];
1302
1303 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1304 paddress (resume_gdbarch, actual_pc));
1305 read_memory (actual_pc, buf, sizeof (buf));
1306 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1307 }
1308
1309 /* Install inferior's terminal modes. */
1310 target_terminal_inferior ();
1311
1312 /* Avoid confusing the next resume, if the next stop/resume
1313 happens to apply to another thread. */
1314 tp->stop_signal = TARGET_SIGNAL_0;
1315
1316 target_resume (resume_ptid, step, sig);
1317 }
1318
1319 discard_cleanups (old_cleanups);
1320 }
1321 \f
1322 /* Proceeding. */
1323
1324 /* Clear out all variables saying what to do when inferior is continued.
1325 First do this, then set the ones you want, then call `proceed'. */
1326
1327 static void
1328 clear_proceed_status_thread (struct thread_info *tp)
1329 {
1330 if (debug_infrun)
1331 fprintf_unfiltered (gdb_stdlog,
1332 "infrun: clear_proceed_status_thread (%s)\n",
1333 target_pid_to_str (tp->ptid));
1334
1335 tp->trap_expected = 0;
1336 tp->step_range_start = 0;
1337 tp->step_range_end = 0;
1338 tp->step_frame_id = null_frame_id;
1339 tp->step_stack_frame_id = null_frame_id;
1340 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1341 tp->stop_requested = 0;
1342
1343 tp->stop_step = 0;
1344
1345 tp->proceed_to_finish = 0;
1346
1347 /* Discard any remaining commands or status from previous stop. */
1348 bpstat_clear (&tp->stop_bpstat);
1349 }
1350
1351 static int
1352 clear_proceed_status_callback (struct thread_info *tp, void *data)
1353 {
1354 if (is_exited (tp->ptid))
1355 return 0;
1356
1357 clear_proceed_status_thread (tp);
1358 return 0;
1359 }
1360
1361 void
1362 clear_proceed_status (void)
1363 {
1364 if (!ptid_equal (inferior_ptid, null_ptid))
1365 {
1366 struct inferior *inferior;
1367
1368 if (non_stop)
1369 {
1370 /* If in non-stop mode, only delete the per-thread status
1371 of the current thread. */
1372 clear_proceed_status_thread (inferior_thread ());
1373 }
1374 else
1375 {
1376 /* In all-stop mode, delete the per-thread status of
1377 *all* threads. */
1378 iterate_over_threads (clear_proceed_status_callback, NULL);
1379 }
1380
1381 inferior = current_inferior ();
1382 inferior->stop_soon = NO_STOP_QUIETLY;
1383 }
1384
1385 stop_after_trap = 0;
1386
1387 observer_notify_about_to_proceed ();
1388
1389 if (stop_registers)
1390 {
1391 regcache_xfree (stop_registers);
1392 stop_registers = NULL;
1393 }
1394 }
1395
1396 /* Check the current thread against the thread that reported the most recent
1397 event. If a step-over is required return TRUE and set the current thread
1398 to the old thread. Otherwise return FALSE.
1399
1400 This should be suitable for any targets that support threads. */
1401
1402 static int
1403 prepare_to_proceed (int step)
1404 {
1405 ptid_t wait_ptid;
1406 struct target_waitstatus wait_status;
1407 int schedlock_enabled;
1408
1409 /* With non-stop mode on, threads are always handled individually. */
1410 gdb_assert (! non_stop);
1411
1412 /* Get the last target status returned by target_wait(). */
1413 get_last_target_status (&wait_ptid, &wait_status);
1414
1415 /* Make sure we were stopped at a breakpoint. */
1416 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1417 || wait_status.value.sig != TARGET_SIGNAL_TRAP)
1418 {
1419 return 0;
1420 }
1421
1422 schedlock_enabled = (scheduler_mode == schedlock_on
1423 || (scheduler_mode == schedlock_step
1424 && step));
1425
1426 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1427 if (schedlock_enabled)
1428 return 0;
1429
1430 /* Don't switch over if we're about to resume some other process
1431 other than WAIT_PTID's, and schedule-multiple is off. */
1432 if (!sched_multi
1433 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1434 return 0;
1435
1436 /* Switched over from WAIT_PID. */
1437 if (!ptid_equal (wait_ptid, minus_one_ptid)
1438 && !ptid_equal (inferior_ptid, wait_ptid))
1439 {
1440 struct regcache *regcache = get_thread_regcache (wait_ptid);
1441
1442 if (breakpoint_here_p (regcache_read_pc (regcache)))
1443 {
1444 /* If stepping, remember current thread to switch back to. */
1445 if (step)
1446 deferred_step_ptid = inferior_ptid;
1447
1448 /* Switch back to WAIT_PID thread. */
1449 switch_to_thread (wait_ptid);
1450
1451 /* We return 1 to indicate that there is a breakpoint here,
1452 so we need to step over it before continuing to avoid
1453 hitting it straight away. */
1454 return 1;
1455 }
1456 }
1457
1458 return 0;
1459 }
1460
1461 /* Basic routine for continuing the program in various fashions.
1462
1463 ADDR is the address to resume at, or -1 for resume where stopped.
1464 SIGGNAL is the signal to give it, or 0 for none,
1465 or -1 for act according to how it stopped.
1466 STEP is nonzero if should trap after one instruction.
1467 -1 means return after that and print nothing.
1468 You should probably set various step_... variables
1469 before calling here, if you are stepping.
1470
1471 You should call clear_proceed_status before calling proceed. */
1472
1473 void
1474 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1475 {
1476 struct regcache *regcache;
1477 struct gdbarch *gdbarch;
1478 struct thread_info *tp;
1479 CORE_ADDR pc;
1480 int oneproc = 0;
1481
1482 /* If we're stopped at a fork/vfork, follow the branch set by the
1483 "set follow-fork-mode" command; otherwise, we'll just proceed
1484 resuming the current thread. */
1485 if (!follow_fork ())
1486 {
1487 /* The target for some reason decided not to resume. */
1488 normal_stop ();
1489 return;
1490 }
1491
1492 regcache = get_current_regcache ();
1493 gdbarch = get_regcache_arch (regcache);
1494 pc = regcache_read_pc (regcache);
1495
1496 if (step > 0)
1497 step_start_function = find_pc_function (pc);
1498 if (step < 0)
1499 stop_after_trap = 1;
1500
1501 if (addr == (CORE_ADDR) -1)
1502 {
1503 if (pc == stop_pc && breakpoint_here_p (pc)
1504 && execution_direction != EXEC_REVERSE)
1505 /* There is a breakpoint at the address we will resume at,
1506 step one instruction before inserting breakpoints so that
1507 we do not stop right away (and report a second hit at this
1508 breakpoint).
1509
1510 Note, we don't do this in reverse, because we won't
1511 actually be executing the breakpoint insn anyway.
1512 We'll be (un-)executing the previous instruction. */
1513
1514 oneproc = 1;
1515 else if (gdbarch_single_step_through_delay_p (gdbarch)
1516 && gdbarch_single_step_through_delay (gdbarch,
1517 get_current_frame ()))
1518 /* We stepped onto an instruction that needs to be stepped
1519 again before re-inserting the breakpoint, do so. */
1520 oneproc = 1;
1521 }
1522 else
1523 {
1524 regcache_write_pc (regcache, addr);
1525 }
1526
1527 if (debug_infrun)
1528 fprintf_unfiltered (gdb_stdlog,
1529 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1530 paddress (gdbarch, addr), siggnal, step);
1531
1532 if (non_stop)
1533 /* In non-stop, each thread is handled individually. The context
1534 must already be set to the right thread here. */
1535 ;
1536 else
1537 {
1538 /* In a multi-threaded task we may select another thread and
1539 then continue or step.
1540
1541 But if the old thread was stopped at a breakpoint, it will
1542 immediately cause another breakpoint stop without any
1543 execution (i.e. it will report a breakpoint hit incorrectly).
1544 So we must step over it first.
1545
1546 prepare_to_proceed checks the current thread against the
1547 thread that reported the most recent event. If a step-over
1548 is required it returns TRUE and sets the current thread to
1549 the old thread. */
1550 if (prepare_to_proceed (step))
1551 oneproc = 1;
1552 }
1553
1554 /* prepare_to_proceed may change the current thread. */
1555 tp = inferior_thread ();
1556
1557 if (oneproc)
1558 {
1559 tp->trap_expected = 1;
1560 /* If displaced stepping is enabled, we can step over the
1561 breakpoint without hitting it, so leave all breakpoints
1562 inserted. Otherwise we need to disable all breakpoints, step
1563 one instruction, and then re-add them when that step is
1564 finished. */
1565 if (!use_displaced_stepping (gdbarch))
1566 remove_breakpoints ();
1567 }
1568
1569 /* We can insert breakpoints if we're not trying to step over one,
1570 or if we are stepping over one but we're using displaced stepping
1571 to do so. */
1572 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1573 insert_breakpoints ();
1574
1575 if (!non_stop)
1576 {
1577 /* Pass the last stop signal to the thread we're resuming,
1578 irrespective of whether the current thread is the thread that
1579 got the last event or not. This was historically GDB's
1580 behaviour before keeping a stop_signal per thread. */
1581
1582 struct thread_info *last_thread;
1583 ptid_t last_ptid;
1584 struct target_waitstatus last_status;
1585
1586 get_last_target_status (&last_ptid, &last_status);
1587 if (!ptid_equal (inferior_ptid, last_ptid)
1588 && !ptid_equal (last_ptid, null_ptid)
1589 && !ptid_equal (last_ptid, minus_one_ptid))
1590 {
1591 last_thread = find_thread_ptid (last_ptid);
1592 if (last_thread)
1593 {
1594 tp->stop_signal = last_thread->stop_signal;
1595 last_thread->stop_signal = TARGET_SIGNAL_0;
1596 }
1597 }
1598 }
1599
1600 if (siggnal != TARGET_SIGNAL_DEFAULT)
1601 tp->stop_signal = siggnal;
1602 /* If this signal should not be seen by program,
1603 give it zero. Used for debugging signals. */
1604 else if (!signal_program[tp->stop_signal])
1605 tp->stop_signal = TARGET_SIGNAL_0;
1606
1607 annotate_starting ();
1608
1609 /* Make sure that output from GDB appears before output from the
1610 inferior. */
1611 gdb_flush (gdb_stdout);
1612
1613 /* Refresh prev_pc value just prior to resuming. This used to be
1614 done in stop_stepping, however, setting prev_pc there did not handle
1615 scenarios such as inferior function calls or returning from
1616 a function via the return command. In those cases, the prev_pc
1617 value was not set properly for subsequent commands. The prev_pc value
1618 is used to initialize the starting line number in the ecs. With an
1619 invalid value, the gdb next command ends up stopping at the position
1620 represented by the next line table entry past our start position.
1621 On platforms that generate one line table entry per line, this
1622 is not a problem. However, on the ia64, the compiler generates
1623 extraneous line table entries that do not increase the line number.
1624 When we issue the gdb next command on the ia64 after an inferior call
1625 or a return command, we often end up a few instructions forward, still
1626 within the original line we started.
1627
1628 An attempt was made to have init_execution_control_state () refresh
1629 the prev_pc value before calculating the line number. This approach
1630 did not work because on platforms that use ptrace, the pc register
1631 cannot be read unless the inferior is stopped. At that point, we
1632 are not guaranteed the inferior is stopped and so the regcache_read_pc ()
1633 call can fail. Setting the prev_pc value here ensures the value is
1634 updated correctly when the inferior is stopped. */
1635 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1636
1637 /* Fill in with reasonable starting values. */
1638 init_thread_stepping_state (tp);
1639
1640 /* Reset to normal state. */
1641 init_infwait_state ();
1642
1643 /* Resume inferior. */
1644 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1645
1646 /* Wait for it to stop (if not standalone)
1647 and in any case decode why it stopped, and act accordingly. */
1648 /* Do this only if we are not using the event loop, or if the target
1649 does not support asynchronous execution. */
1650 if (!target_can_async_p ())
1651 {
1652 wait_for_inferior (0);
1653 normal_stop ();
1654 }
1655 }
1656 \f
1657
1658 /* Start remote-debugging of a machine over a serial link. */
1659
1660 void
1661 start_remote (int from_tty)
1662 {
1663 struct inferior *inferior;
1664 init_wait_for_inferior ();
1665
1666 inferior = current_inferior ();
1667 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1668
1669 /* Always go on waiting for the target, regardless of the mode. */
1670 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1671 indicate to wait_for_inferior that a target should timeout if
1672 nothing is returned (instead of just blocking). Because of this,
1673 targets expecting an immediate response need to, internally, set
1674 things up so that the target_wait() is forced to eventually
1675 timeout. */
1676 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1677 differentiate to its caller what the state of the target is after
1678 the initial open has been performed. Here we're assuming that
1679 the target has stopped. It should be possible to eventually have
1680 target_open() return to the caller an indication that the target
1681 is currently running and GDB state should be set to the same as
1682 for an async run. */
1683 wait_for_inferior (0);
1684
1685 /* Now that the inferior has stopped, do any bookkeeping like
1686 loading shared libraries. We want to do this before normal_stop,
1687 so that the displayed frame is up to date. */
1688 post_create_inferior (&current_target, from_tty);
1689
1690 normal_stop ();
1691 }
1692
1693 /* Initialize static vars when a new inferior begins. */
1694
1695 void
1696 init_wait_for_inferior (void)
1697 {
1698 /* These are meaningless until the first time through wait_for_inferior. */
1699
1700 breakpoint_init_inferior (inf_starting);
1701
1702 clear_proceed_status ();
1703
1704 stepping_past_singlestep_breakpoint = 0;
1705 deferred_step_ptid = null_ptid;
1706
1707 target_last_wait_ptid = minus_one_ptid;
1708
1709 previous_inferior_ptid = null_ptid;
1710 init_infwait_state ();
1711
1712 displaced_step_clear ();
1713
1714 /* Discard any skipped inlined frames. */
1715 clear_inline_frame_state (minus_one_ptid);
1716 }
1717
1718 \f
1719 /* This enum encodes possible reasons for doing a target_wait, so that
1720 wfi can call target_wait in one place. (Ultimately the call will be
1721 moved out of the infinite loop entirely.) */
1722
1723 enum infwait_states
1724 {
1725 infwait_normal_state,
1726 infwait_thread_hop_state,
1727 infwait_step_watch_state,
1728 infwait_nonstep_watch_state
1729 };
1730
1731 /* Why did the inferior stop? Used to print the appropriate messages
1732 to the interface from within handle_inferior_event(). */
1733 enum inferior_stop_reason
1734 {
1735 /* Step, next, nexti, stepi finished. */
1736 END_STEPPING_RANGE,
1737 /* Inferior terminated by signal. */
1738 SIGNAL_EXITED,
1739 /* Inferior exited. */
1740 EXITED,
1741 /* Inferior received signal, and user asked to be notified. */
1742 SIGNAL_RECEIVED,
1743 /* Reverse execution -- target ran out of history info. */
1744 NO_HISTORY
1745 };
1746
1747 /* The PTID we'll do a target_wait on.*/
1748 ptid_t waiton_ptid;
1749
1750 /* Current inferior wait state. */
1751 enum infwait_states infwait_state;
1752
1753 /* Data to be passed around while handling an event. This data is
1754 discarded between events. */
1755 struct execution_control_state
1756 {
1757 ptid_t ptid;
1758 /* The thread that got the event, if this was a thread event; NULL
1759 otherwise. */
1760 struct thread_info *event_thread;
1761
1762 struct target_waitstatus ws;
1763 int random_signal;
1764 CORE_ADDR stop_func_start;
1765 CORE_ADDR stop_func_end;
1766 char *stop_func_name;
1767 int new_thread_event;
1768 int wait_some_more;
1769 };
1770
1771 static void init_execution_control_state (struct execution_control_state *ecs);
1772
1773 static void handle_inferior_event (struct execution_control_state *ecs);
1774
1775 static void handle_step_into_function (struct gdbarch *gdbarch,
1776 struct execution_control_state *ecs);
1777 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
1778 struct execution_control_state *ecs);
1779 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
1780 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
1781 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
1782 struct symtab_and_line sr_sal,
1783 struct frame_id sr_id);
1784 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
1785
1786 static void stop_stepping (struct execution_control_state *ecs);
1787 static void prepare_to_wait (struct execution_control_state *ecs);
1788 static void keep_going (struct execution_control_state *ecs);
1789 static void print_stop_reason (enum inferior_stop_reason stop_reason,
1790 int stop_info);
1791
1792 /* Callback for iterate over threads. If the thread is stopped, but
1793 the user/frontend doesn't know about that yet, go through
1794 normal_stop, as if the thread had just stopped now. ARG points at
1795 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
1796 ptid_is_pid(PTID) is true, applies to all threads of the process
1797 pointed at by PTID. Otherwise, apply only to the thread pointed by
1798 PTID. */
1799
1800 static int
1801 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
1802 {
1803 ptid_t ptid = * (ptid_t *) arg;
1804
1805 if ((ptid_equal (info->ptid, ptid)
1806 || ptid_equal (minus_one_ptid, ptid)
1807 || (ptid_is_pid (ptid)
1808 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
1809 && is_running (info->ptid)
1810 && !is_executing (info->ptid))
1811 {
1812 struct cleanup *old_chain;
1813 struct execution_control_state ecss;
1814 struct execution_control_state *ecs = &ecss;
1815
1816 memset (ecs, 0, sizeof (*ecs));
1817
1818 old_chain = make_cleanup_restore_current_thread ();
1819
1820 switch_to_thread (info->ptid);
1821
1822 /* Go through handle_inferior_event/normal_stop, so we always
1823 have consistent output as if the stop event had been
1824 reported. */
1825 ecs->ptid = info->ptid;
1826 ecs->event_thread = find_thread_ptid (info->ptid);
1827 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
1828 ecs->ws.value.sig = TARGET_SIGNAL_0;
1829
1830 handle_inferior_event (ecs);
1831
1832 if (!ecs->wait_some_more)
1833 {
1834 struct thread_info *tp;
1835
1836 normal_stop ();
1837
1838 /* Finish off the continuations. The continations
1839 themselves are responsible for realising the thread
1840 didn't finish what it was supposed to do. */
1841 tp = inferior_thread ();
1842 do_all_intermediate_continuations_thread (tp);
1843 do_all_continuations_thread (tp);
1844 }
1845
1846 do_cleanups (old_chain);
1847 }
1848
1849 return 0;
1850 }
1851
1852 /* This function is attached as a "thread_stop_requested" observer.
1853 Cleanup local state that assumed the PTID was to be resumed, and
1854 report the stop to the frontend. */
1855
1856 static void
1857 infrun_thread_stop_requested (ptid_t ptid)
1858 {
1859 struct displaced_step_request *it, *next, *prev = NULL;
1860
1861 /* PTID was requested to stop. Remove it from the displaced
1862 stepping queue, so we don't try to resume it automatically. */
1863 for (it = displaced_step_request_queue; it; it = next)
1864 {
1865 next = it->next;
1866
1867 if (ptid_equal (it->ptid, ptid)
1868 || ptid_equal (minus_one_ptid, ptid)
1869 || (ptid_is_pid (ptid)
1870 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
1871 {
1872 if (displaced_step_request_queue == it)
1873 displaced_step_request_queue = it->next;
1874 else
1875 prev->next = it->next;
1876
1877 xfree (it);
1878 }
1879 else
1880 prev = it;
1881 }
1882
1883 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
1884 }
1885
1886 static void
1887 infrun_thread_thread_exit (struct thread_info *tp, int silent)
1888 {
1889 if (ptid_equal (target_last_wait_ptid, tp->ptid))
1890 nullify_last_target_wait_ptid ();
1891 }
1892
1893 /* Callback for iterate_over_threads. */
1894
1895 static int
1896 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
1897 {
1898 if (is_exited (info->ptid))
1899 return 0;
1900
1901 delete_step_resume_breakpoint (info);
1902 return 0;
1903 }
1904
1905 /* In all-stop, delete the step resume breakpoint of any thread that
1906 had one. In non-stop, delete the step resume breakpoint of the
1907 thread that just stopped. */
1908
1909 static void
1910 delete_step_thread_step_resume_breakpoint (void)
1911 {
1912 if (!target_has_execution
1913 || ptid_equal (inferior_ptid, null_ptid))
1914 /* If the inferior has exited, we have already deleted the step
1915 resume breakpoints out of GDB's lists. */
1916 return;
1917
1918 if (non_stop)
1919 {
1920 /* If in non-stop mode, only delete the step-resume or
1921 longjmp-resume breakpoint of the thread that just stopped
1922 stepping. */
1923 struct thread_info *tp = inferior_thread ();
1924 delete_step_resume_breakpoint (tp);
1925 }
1926 else
1927 /* In all-stop mode, delete all step-resume and longjmp-resume
1928 breakpoints of any thread that had them. */
1929 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
1930 }
1931
1932 /* A cleanup wrapper. */
1933
1934 static void
1935 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
1936 {
1937 delete_step_thread_step_resume_breakpoint ();
1938 }
1939
1940 /* Pretty print the results of target_wait, for debugging purposes. */
1941
1942 static void
1943 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
1944 const struct target_waitstatus *ws)
1945 {
1946 char *status_string = target_waitstatus_to_string (ws);
1947 struct ui_file *tmp_stream = mem_fileopen ();
1948 char *text;
1949
1950 /* The text is split over several lines because it was getting too long.
1951 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
1952 output as a unit; we want only one timestamp printed if debug_timestamp
1953 is set. */
1954
1955 fprintf_unfiltered (tmp_stream,
1956 "infrun: target_wait (%d", PIDGET (waiton_ptid));
1957 if (PIDGET (waiton_ptid) != -1)
1958 fprintf_unfiltered (tmp_stream,
1959 " [%s]", target_pid_to_str (waiton_ptid));
1960 fprintf_unfiltered (tmp_stream, ", status) =\n");
1961 fprintf_unfiltered (tmp_stream,
1962 "infrun: %d [%s],\n",
1963 PIDGET (result_ptid), target_pid_to_str (result_ptid));
1964 fprintf_unfiltered (tmp_stream,
1965 "infrun: %s\n",
1966 status_string);
1967
1968 text = ui_file_xstrdup (tmp_stream, NULL);
1969
1970 /* This uses %s in part to handle %'s in the text, but also to avoid
1971 a gcc error: the format attribute requires a string literal. */
1972 fprintf_unfiltered (gdb_stdlog, "%s", text);
1973
1974 xfree (status_string);
1975 xfree (text);
1976 ui_file_delete (tmp_stream);
1977 }
1978
1979 /* Wait for control to return from inferior to debugger.
1980
1981 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
1982 as if they were SIGTRAP signals. This can be useful during
1983 the startup sequence on some targets such as HP/UX, where
1984 we receive an EXEC event instead of the expected SIGTRAP.
1985
1986 If inferior gets a signal, we may decide to start it up again
1987 instead of returning. That is why there is a loop in this function.
1988 When this function actually returns it means the inferior
1989 should be left stopped and GDB should read more commands. */
1990
1991 void
1992 wait_for_inferior (int treat_exec_as_sigtrap)
1993 {
1994 struct cleanup *old_cleanups;
1995 struct execution_control_state ecss;
1996 struct execution_control_state *ecs;
1997
1998 if (debug_infrun)
1999 fprintf_unfiltered
2000 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2001 treat_exec_as_sigtrap);
2002
2003 old_cleanups =
2004 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2005
2006 ecs = &ecss;
2007 memset (ecs, 0, sizeof (*ecs));
2008
2009 /* We'll update this if & when we switch to a new thread. */
2010 previous_inferior_ptid = inferior_ptid;
2011
2012 while (1)
2013 {
2014 struct cleanup *old_chain;
2015
2016 /* We have to invalidate the registers BEFORE calling target_wait
2017 because they can be loaded from the target while in target_wait.
2018 This makes remote debugging a bit more efficient for those
2019 targets that provide critical registers as part of their normal
2020 status mechanism. */
2021
2022 overlay_cache_invalid = 1;
2023 registers_changed ();
2024
2025 if (deprecated_target_wait_hook)
2026 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2027 else
2028 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2029
2030 if (debug_infrun)
2031 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2032
2033 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2034 {
2035 xfree (ecs->ws.value.execd_pathname);
2036 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2037 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2038 }
2039
2040 /* If an error happens while handling the event, propagate GDB's
2041 knowledge of the executing state to the frontend/user running
2042 state. */
2043 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2044
2045 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2046 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2047 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2048
2049 /* Now figure out what to do with the result of the result. */
2050 handle_inferior_event (ecs);
2051
2052 /* No error, don't finish the state yet. */
2053 discard_cleanups (old_chain);
2054
2055 if (!ecs->wait_some_more)
2056 break;
2057 }
2058
2059 do_cleanups (old_cleanups);
2060 }
2061
2062 /* Asynchronous version of wait_for_inferior. It is called by the
2063 event loop whenever a change of state is detected on the file
2064 descriptor corresponding to the target. It can be called more than
2065 once to complete a single execution command. In such cases we need
2066 to keep the state in a global variable ECSS. If it is the last time
2067 that this function is called for a single execution command, then
2068 report to the user that the inferior has stopped, and do the
2069 necessary cleanups. */
2070
2071 void
2072 fetch_inferior_event (void *client_data)
2073 {
2074 struct execution_control_state ecss;
2075 struct execution_control_state *ecs = &ecss;
2076 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2077 struct cleanup *ts_old_chain;
2078 int was_sync = sync_execution;
2079
2080 memset (ecs, 0, sizeof (*ecs));
2081
2082 /* We'll update this if & when we switch to a new thread. */
2083 previous_inferior_ptid = inferior_ptid;
2084
2085 if (non_stop)
2086 /* In non-stop mode, the user/frontend should not notice a thread
2087 switch due to internal events. Make sure we reverse to the
2088 user selected thread and frame after handling the event and
2089 running any breakpoint commands. */
2090 make_cleanup_restore_current_thread ();
2091
2092 /* We have to invalidate the registers BEFORE calling target_wait
2093 because they can be loaded from the target while in target_wait.
2094 This makes remote debugging a bit more efficient for those
2095 targets that provide critical registers as part of their normal
2096 status mechanism. */
2097
2098 overlay_cache_invalid = 1;
2099 registers_changed ();
2100
2101 if (deprecated_target_wait_hook)
2102 ecs->ptid =
2103 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2104 else
2105 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2106
2107 if (debug_infrun)
2108 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2109
2110 if (non_stop
2111 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2112 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2113 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2114 /* In non-stop mode, each thread is handled individually. Switch
2115 early, so the global state is set correctly for this
2116 thread. */
2117 context_switch (ecs->ptid);
2118
2119 /* If an error happens while handling the event, propagate GDB's
2120 knowledge of the executing state to the frontend/user running
2121 state. */
2122 if (!non_stop)
2123 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2124 else
2125 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2126
2127 /* Now figure out what to do with the result of the result. */
2128 handle_inferior_event (ecs);
2129
2130 if (!ecs->wait_some_more)
2131 {
2132 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2133
2134 delete_step_thread_step_resume_breakpoint ();
2135
2136 /* We may not find an inferior if this was a process exit. */
2137 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2138 normal_stop ();
2139
2140 if (target_has_execution
2141 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2142 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2143 && ecs->event_thread->step_multi
2144 && ecs->event_thread->stop_step)
2145 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2146 else
2147 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2148 }
2149
2150 /* No error, don't finish the thread states yet. */
2151 discard_cleanups (ts_old_chain);
2152
2153 /* Revert thread and frame. */
2154 do_cleanups (old_chain);
2155
2156 /* If the inferior was in sync execution mode, and now isn't,
2157 restore the prompt. */
2158 if (was_sync && !sync_execution)
2159 display_gdb_prompt (0);
2160 }
2161
2162 /* Record the frame and location we're currently stepping through. */
2163 void
2164 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2165 {
2166 struct thread_info *tp = inferior_thread ();
2167
2168 tp->step_frame_id = get_frame_id (frame);
2169 tp->step_stack_frame_id = get_stack_frame_id (frame);
2170
2171 tp->current_symtab = sal.symtab;
2172 tp->current_line = sal.line;
2173 }
2174
2175 /* Prepare an execution control state for looping through a
2176 wait_for_inferior-type loop. */
2177
2178 static void
2179 init_execution_control_state (struct execution_control_state *ecs)
2180 {
2181 ecs->random_signal = 0;
2182 }
2183
2184 /* Clear context switchable stepping state. */
2185
2186 void
2187 init_thread_stepping_state (struct thread_info *tss)
2188 {
2189 tss->stepping_over_breakpoint = 0;
2190 tss->step_after_step_resume_breakpoint = 0;
2191 tss->stepping_through_solib_after_catch = 0;
2192 tss->stepping_through_solib_catchpoints = NULL;
2193 }
2194
2195 /* Return the cached copy of the last pid/waitstatus returned by
2196 target_wait()/deprecated_target_wait_hook(). The data is actually
2197 cached by handle_inferior_event(), which gets called immediately
2198 after target_wait()/deprecated_target_wait_hook(). */
2199
2200 void
2201 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2202 {
2203 *ptidp = target_last_wait_ptid;
2204 *status = target_last_waitstatus;
2205 }
2206
2207 void
2208 nullify_last_target_wait_ptid (void)
2209 {
2210 target_last_wait_ptid = minus_one_ptid;
2211 }
2212
2213 /* Switch thread contexts. */
2214
2215 static void
2216 context_switch (ptid_t ptid)
2217 {
2218 if (debug_infrun)
2219 {
2220 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2221 target_pid_to_str (inferior_ptid));
2222 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2223 target_pid_to_str (ptid));
2224 }
2225
2226 switch_to_thread (ptid);
2227 }
2228
2229 static void
2230 adjust_pc_after_break (struct execution_control_state *ecs)
2231 {
2232 struct regcache *regcache;
2233 struct gdbarch *gdbarch;
2234 CORE_ADDR breakpoint_pc;
2235
2236 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2237 we aren't, just return.
2238
2239 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2240 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2241 implemented by software breakpoints should be handled through the normal
2242 breakpoint layer.
2243
2244 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2245 different signals (SIGILL or SIGEMT for instance), but it is less
2246 clear where the PC is pointing afterwards. It may not match
2247 gdbarch_decr_pc_after_break. I don't know any specific target that
2248 generates these signals at breakpoints (the code has been in GDB since at
2249 least 1992) so I can not guess how to handle them here.
2250
2251 In earlier versions of GDB, a target with
2252 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2253 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2254 target with both of these set in GDB history, and it seems unlikely to be
2255 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2256
2257 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2258 return;
2259
2260 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2261 return;
2262
2263 /* In reverse execution, when a breakpoint is hit, the instruction
2264 under it has already been de-executed. The reported PC always
2265 points at the breakpoint address, so adjusting it further would
2266 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2267 architecture:
2268
2269 B1 0x08000000 : INSN1
2270 B2 0x08000001 : INSN2
2271 0x08000002 : INSN3
2272 PC -> 0x08000003 : INSN4
2273
2274 Say you're stopped at 0x08000003 as above. Reverse continuing
2275 from that point should hit B2 as below. Reading the PC when the
2276 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2277 been de-executed already.
2278
2279 B1 0x08000000 : INSN1
2280 B2 PC -> 0x08000001 : INSN2
2281 0x08000002 : INSN3
2282 0x08000003 : INSN4
2283
2284 We can't apply the same logic as for forward execution, because
2285 we would wrongly adjust the PC to 0x08000000, since there's a
2286 breakpoint at PC - 1. We'd then report a hit on B1, although
2287 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2288 behaviour. */
2289 if (execution_direction == EXEC_REVERSE)
2290 return;
2291
2292 /* If this target does not decrement the PC after breakpoints, then
2293 we have nothing to do. */
2294 regcache = get_thread_regcache (ecs->ptid);
2295 gdbarch = get_regcache_arch (regcache);
2296 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2297 return;
2298
2299 /* Find the location where (if we've hit a breakpoint) the
2300 breakpoint would be. */
2301 breakpoint_pc = regcache_read_pc (regcache)
2302 - gdbarch_decr_pc_after_break (gdbarch);
2303
2304 /* Check whether there actually is a software breakpoint inserted at
2305 that location.
2306
2307 If in non-stop mode, a race condition is possible where we've
2308 removed a breakpoint, but stop events for that breakpoint were
2309 already queued and arrive later. To suppress those spurious
2310 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2311 and retire them after a number of stop events are reported. */
2312 if (software_breakpoint_inserted_here_p (breakpoint_pc)
2313 || (non_stop && moribund_breakpoint_here_p (breakpoint_pc)))
2314 {
2315 struct cleanup *old_cleanups = NULL;
2316 if (RECORD_IS_USED)
2317 old_cleanups = record_gdb_operation_disable_set ();
2318
2319 /* When using hardware single-step, a SIGTRAP is reported for both
2320 a completed single-step and a software breakpoint. Need to
2321 differentiate between the two, as the latter needs adjusting
2322 but the former does not.
2323
2324 The SIGTRAP can be due to a completed hardware single-step only if
2325 - we didn't insert software single-step breakpoints
2326 - the thread to be examined is still the current thread
2327 - this thread is currently being stepped
2328
2329 If any of these events did not occur, we must have stopped due
2330 to hitting a software breakpoint, and have to back up to the
2331 breakpoint address.
2332
2333 As a special case, we could have hardware single-stepped a
2334 software breakpoint. In this case (prev_pc == breakpoint_pc),
2335 we also need to back up to the breakpoint address. */
2336
2337 if (singlestep_breakpoints_inserted_p
2338 || !ptid_equal (ecs->ptid, inferior_ptid)
2339 || !currently_stepping (ecs->event_thread)
2340 || ecs->event_thread->prev_pc == breakpoint_pc)
2341 regcache_write_pc (regcache, breakpoint_pc);
2342
2343 if (RECORD_IS_USED)
2344 do_cleanups (old_cleanups);
2345 }
2346 }
2347
2348 void
2349 init_infwait_state (void)
2350 {
2351 waiton_ptid = pid_to_ptid (-1);
2352 infwait_state = infwait_normal_state;
2353 }
2354
2355 void
2356 error_is_running (void)
2357 {
2358 error (_("\
2359 Cannot execute this command while the selected thread is running."));
2360 }
2361
2362 void
2363 ensure_not_running (void)
2364 {
2365 if (is_running (inferior_ptid))
2366 error_is_running ();
2367 }
2368
2369 static int
2370 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2371 {
2372 for (frame = get_prev_frame (frame);
2373 frame != NULL;
2374 frame = get_prev_frame (frame))
2375 {
2376 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2377 return 1;
2378 if (get_frame_type (frame) != INLINE_FRAME)
2379 break;
2380 }
2381
2382 return 0;
2383 }
2384
2385 /* Auxiliary function that handles syscall entry/return events.
2386 It returns 1 if the inferior should keep going (and GDB
2387 should ignore the event), or 0 if the event deserves to be
2388 processed. */
2389 static int
2390 deal_with_syscall_event (struct execution_control_state *ecs)
2391 {
2392 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2393 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2394 int syscall_number = gdbarch_get_syscall_number (gdbarch,
2395 ecs->ptid);
2396 target_last_waitstatus.value.syscall_number = syscall_number;
2397
2398 if (catch_syscall_enabled () > 0
2399 && catching_syscall_number (syscall_number) > 0)
2400 {
2401 if (debug_infrun)
2402 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2403 syscall_number);
2404 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2405
2406 if (!ptid_equal (ecs->ptid, inferior_ptid))
2407 {
2408 context_switch (ecs->ptid);
2409 reinit_frame_cache ();
2410 }
2411
2412 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2413
2414 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2415
2416 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2417
2418 /* If no catchpoint triggered for this, then keep going. */
2419 if (ecs->random_signal)
2420 {
2421 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2422 keep_going (ecs);
2423 return 1;
2424 }
2425 return 0;
2426 }
2427 else
2428 {
2429 resume (0, TARGET_SIGNAL_0);
2430 prepare_to_wait (ecs);
2431 return 1;
2432 }
2433 }
2434
2435 /* Given an execution control state that has been freshly filled in
2436 by an event from the inferior, figure out what it means and take
2437 appropriate action. */
2438
2439 static void
2440 handle_inferior_event (struct execution_control_state *ecs)
2441 {
2442 struct frame_info *frame;
2443 struct gdbarch *gdbarch;
2444 int sw_single_step_trap_p = 0;
2445 int stopped_by_watchpoint;
2446 int stepped_after_stopped_by_watchpoint = 0;
2447 struct symtab_and_line stop_pc_sal;
2448 enum stop_kind stop_soon;
2449
2450 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2451 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2452 && ecs->ws.kind != TARGET_WAITKIND_IGNORE)
2453 {
2454 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2455 gdb_assert (inf);
2456 stop_soon = inf->stop_soon;
2457 }
2458 else
2459 stop_soon = NO_STOP_QUIETLY;
2460
2461 /* Cache the last pid/waitstatus. */
2462 target_last_wait_ptid = ecs->ptid;
2463 target_last_waitstatus = ecs->ws;
2464
2465 /* Always clear state belonging to the previous time we stopped. */
2466 stop_stack_dummy = 0;
2467
2468 /* If it's a new process, add it to the thread database */
2469
2470 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2471 && !ptid_equal (ecs->ptid, minus_one_ptid)
2472 && !in_thread_list (ecs->ptid));
2473
2474 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2475 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2476 add_thread (ecs->ptid);
2477
2478 ecs->event_thread = find_thread_ptid (ecs->ptid);
2479
2480 /* Dependent on valid ECS->EVENT_THREAD. */
2481 adjust_pc_after_break (ecs);
2482
2483 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2484 reinit_frame_cache ();
2485
2486 if (ecs->ws.kind != TARGET_WAITKIND_IGNORE)
2487 {
2488 breakpoint_retire_moribund ();
2489
2490 /* Mark the non-executing threads accordingly. In all-stop, all
2491 threads of all processes are stopped when we get any event
2492 reported. In non-stop mode, only the event thread stops. If
2493 we're handling a process exit in non-stop mode, there's
2494 nothing to do, as threads of the dead process are gone, and
2495 threads of any other process were left running. */
2496 if (!non_stop)
2497 set_executing (minus_one_ptid, 0);
2498 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2499 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2500 set_executing (inferior_ptid, 0);
2501 }
2502
2503 switch (infwait_state)
2504 {
2505 case infwait_thread_hop_state:
2506 if (debug_infrun)
2507 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2508 break;
2509
2510 case infwait_normal_state:
2511 if (debug_infrun)
2512 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2513 break;
2514
2515 case infwait_step_watch_state:
2516 if (debug_infrun)
2517 fprintf_unfiltered (gdb_stdlog,
2518 "infrun: infwait_step_watch_state\n");
2519
2520 stepped_after_stopped_by_watchpoint = 1;
2521 break;
2522
2523 case infwait_nonstep_watch_state:
2524 if (debug_infrun)
2525 fprintf_unfiltered (gdb_stdlog,
2526 "infrun: infwait_nonstep_watch_state\n");
2527 insert_breakpoints ();
2528
2529 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2530 handle things like signals arriving and other things happening
2531 in combination correctly? */
2532 stepped_after_stopped_by_watchpoint = 1;
2533 break;
2534
2535 default:
2536 internal_error (__FILE__, __LINE__, _("bad switch"));
2537 }
2538
2539 infwait_state = infwait_normal_state;
2540 waiton_ptid = pid_to_ptid (-1);
2541
2542 switch (ecs->ws.kind)
2543 {
2544 case TARGET_WAITKIND_LOADED:
2545 if (debug_infrun)
2546 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2547 /* Ignore gracefully during startup of the inferior, as it might
2548 be the shell which has just loaded some objects, otherwise
2549 add the symbols for the newly loaded objects. Also ignore at
2550 the beginning of an attach or remote session; we will query
2551 the full list of libraries once the connection is
2552 established. */
2553 if (stop_soon == NO_STOP_QUIETLY)
2554 {
2555 /* Check for any newly added shared libraries if we're
2556 supposed to be adding them automatically. Switch
2557 terminal for any messages produced by
2558 breakpoint_re_set. */
2559 target_terminal_ours_for_output ();
2560 /* NOTE: cagney/2003-11-25: Make certain that the target
2561 stack's section table is kept up-to-date. Architectures,
2562 (e.g., PPC64), use the section table to perform
2563 operations such as address => section name and hence
2564 require the table to contain all sections (including
2565 those found in shared libraries). */
2566 #ifdef SOLIB_ADD
2567 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2568 #else
2569 solib_add (NULL, 0, &current_target, auto_solib_add);
2570 #endif
2571 target_terminal_inferior ();
2572
2573 /* If requested, stop when the dynamic linker notifies
2574 gdb of events. This allows the user to get control
2575 and place breakpoints in initializer routines for
2576 dynamically loaded objects (among other things). */
2577 if (stop_on_solib_events)
2578 {
2579 stop_stepping (ecs);
2580 return;
2581 }
2582
2583 /* NOTE drow/2007-05-11: This might be a good place to check
2584 for "catch load". */
2585 }
2586
2587 /* If we are skipping through a shell, or through shared library
2588 loading that we aren't interested in, resume the program. If
2589 we're running the program normally, also resume. But stop if
2590 we're attaching or setting up a remote connection. */
2591 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2592 {
2593 /* Loading of shared libraries might have changed breakpoint
2594 addresses. Make sure new breakpoints are inserted. */
2595 if (stop_soon == NO_STOP_QUIETLY
2596 && !breakpoints_always_inserted_mode ())
2597 insert_breakpoints ();
2598 resume (0, TARGET_SIGNAL_0);
2599 prepare_to_wait (ecs);
2600 return;
2601 }
2602
2603 break;
2604
2605 case TARGET_WAITKIND_SPURIOUS:
2606 if (debug_infrun)
2607 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2608 resume (0, TARGET_SIGNAL_0);
2609 prepare_to_wait (ecs);
2610 return;
2611
2612 case TARGET_WAITKIND_EXITED:
2613 if (debug_infrun)
2614 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2615 inferior_ptid = ecs->ptid;
2616 target_terminal_ours (); /* Must do this before mourn anyway */
2617 print_stop_reason (EXITED, ecs->ws.value.integer);
2618
2619 /* Record the exit code in the convenience variable $_exitcode, so
2620 that the user can inspect this again later. */
2621 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2622 (LONGEST) ecs->ws.value.integer);
2623 gdb_flush (gdb_stdout);
2624 target_mourn_inferior ();
2625 singlestep_breakpoints_inserted_p = 0;
2626 stop_print_frame = 0;
2627 stop_stepping (ecs);
2628 return;
2629
2630 case TARGET_WAITKIND_SIGNALLED:
2631 if (debug_infrun)
2632 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2633 inferior_ptid = ecs->ptid;
2634 stop_print_frame = 0;
2635 target_terminal_ours (); /* Must do this before mourn anyway */
2636
2637 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2638 reach here unless the inferior is dead. However, for years
2639 target_kill() was called here, which hints that fatal signals aren't
2640 really fatal on some systems. If that's true, then some changes
2641 may be needed. */
2642 target_mourn_inferior ();
2643
2644 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2645 singlestep_breakpoints_inserted_p = 0;
2646 stop_stepping (ecs);
2647 return;
2648
2649 /* The following are the only cases in which we keep going;
2650 the above cases end in a continue or goto. */
2651 case TARGET_WAITKIND_FORKED:
2652 case TARGET_WAITKIND_VFORKED:
2653 if (debug_infrun)
2654 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2655
2656 if (!ptid_equal (ecs->ptid, inferior_ptid))
2657 {
2658 context_switch (ecs->ptid);
2659 reinit_frame_cache ();
2660 }
2661
2662 /* Immediately detach breakpoints from the child before there's
2663 any chance of letting the user delete breakpoints from the
2664 breakpoint lists. If we don't do this early, it's easy to
2665 leave left over traps in the child, vis: "break foo; catch
2666 fork; c; <fork>; del; c; <child calls foo>". We only follow
2667 the fork on the last `continue', and by that time the
2668 breakpoint at "foo" is long gone from the breakpoint table.
2669 If we vforked, then we don't need to unpatch here, since both
2670 parent and child are sharing the same memory pages; we'll
2671 need to unpatch at follow/detach time instead to be certain
2672 that new breakpoints added between catchpoint hit time and
2673 vfork follow are detached. */
2674 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2675 {
2676 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2677
2678 /* This won't actually modify the breakpoint list, but will
2679 physically remove the breakpoints from the child. */
2680 detach_breakpoints (child_pid);
2681 }
2682
2683 /* In case the event is caught by a catchpoint, remember that
2684 the event is to be followed at the next resume of the thread,
2685 and not immediately. */
2686 ecs->event_thread->pending_follow = ecs->ws;
2687
2688 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2689
2690 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2691
2692 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2693
2694 /* If no catchpoint triggered for this, then keep going. */
2695 if (ecs->random_signal)
2696 {
2697 int should_resume;
2698
2699 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2700
2701 should_resume = follow_fork ();
2702
2703 ecs->event_thread = inferior_thread ();
2704 ecs->ptid = inferior_ptid;
2705
2706 if (should_resume)
2707 keep_going (ecs);
2708 else
2709 stop_stepping (ecs);
2710 return;
2711 }
2712 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2713 goto process_event_stop_test;
2714
2715 case TARGET_WAITKIND_EXECD:
2716 if (debug_infrun)
2717 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
2718
2719 if (!ptid_equal (ecs->ptid, inferior_ptid))
2720 {
2721 context_switch (ecs->ptid);
2722 reinit_frame_cache ();
2723 }
2724
2725 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2726
2727 /* This causes the eventpoints and symbol table to be reset.
2728 Must do this now, before trying to determine whether to
2729 stop. */
2730 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
2731
2732 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2733 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2734
2735 /* Note that this may be referenced from inside
2736 bpstat_stop_status above, through inferior_has_execd. */
2737 xfree (ecs->ws.value.execd_pathname);
2738 ecs->ws.value.execd_pathname = NULL;
2739
2740 /* If no catchpoint triggered for this, then keep going. */
2741 if (ecs->random_signal)
2742 {
2743 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2744 keep_going (ecs);
2745 return;
2746 }
2747 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2748 goto process_event_stop_test;
2749
2750 /* Be careful not to try to gather much state about a thread
2751 that's in a syscall. It's frequently a losing proposition. */
2752 case TARGET_WAITKIND_SYSCALL_ENTRY:
2753 if (debug_infrun)
2754 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
2755 /* Getting the current syscall number */
2756 if (deal_with_syscall_event (ecs) != 0)
2757 return;
2758 goto process_event_stop_test;
2759 break;
2760
2761 /* Before examining the threads further, step this thread to
2762 get it entirely out of the syscall. (We get notice of the
2763 event when the thread is just on the verge of exiting a
2764 syscall. Stepping one instruction seems to get it back
2765 into user code.) */
2766 case TARGET_WAITKIND_SYSCALL_RETURN:
2767 if (debug_infrun)
2768 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
2769 if (deal_with_syscall_event (ecs) != 0)
2770 return;
2771 goto process_event_stop_test;
2772 break;
2773
2774 case TARGET_WAITKIND_STOPPED:
2775 if (debug_infrun)
2776 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
2777 ecs->event_thread->stop_signal = ecs->ws.value.sig;
2778 break;
2779
2780 case TARGET_WAITKIND_NO_HISTORY:
2781 /* Reverse execution: target ran out of history info. */
2782 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2783 print_stop_reason (NO_HISTORY, 0);
2784 stop_stepping (ecs);
2785 return;
2786
2787 /* We had an event in the inferior, but we are not interested
2788 in handling it at this level. The lower layers have already
2789 done what needs to be done, if anything.
2790
2791 One of the possible circumstances for this is when the
2792 inferior produces output for the console. The inferior has
2793 not stopped, and we are ignoring the event. Another possible
2794 circumstance is any event which the lower level knows will be
2795 reported multiple times without an intervening resume. */
2796 case TARGET_WAITKIND_IGNORE:
2797 if (debug_infrun)
2798 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2799 prepare_to_wait (ecs);
2800 return;
2801 }
2802
2803 if (ecs->new_thread_event)
2804 {
2805 if (non_stop)
2806 /* Non-stop assumes that the target handles adding new threads
2807 to the thread list. */
2808 internal_error (__FILE__, __LINE__, "\
2809 targets should add new threads to the thread list themselves in non-stop mode.");
2810
2811 /* We may want to consider not doing a resume here in order to
2812 give the user a chance to play with the new thread. It might
2813 be good to make that a user-settable option. */
2814
2815 /* At this point, all threads are stopped (happens automatically
2816 in either the OS or the native code). Therefore we need to
2817 continue all threads in order to make progress. */
2818
2819 if (!ptid_equal (ecs->ptid, inferior_ptid))
2820 context_switch (ecs->ptid);
2821 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
2822 prepare_to_wait (ecs);
2823 return;
2824 }
2825
2826 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
2827 {
2828 /* Do we need to clean up the state of a thread that has
2829 completed a displaced single-step? (Doing so usually affects
2830 the PC, so do it here, before we set stop_pc.) */
2831 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
2832
2833 /* If we either finished a single-step or hit a breakpoint, but
2834 the user wanted this thread to be stopped, pretend we got a
2835 SIG0 (generic unsignaled stop). */
2836
2837 if (ecs->event_thread->stop_requested
2838 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2839 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2840 }
2841
2842 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2843
2844 if (debug_infrun)
2845 {
2846 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2847 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2848
2849 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
2850 paddress (gdbarch, stop_pc));
2851 if (target_stopped_by_watchpoint ())
2852 {
2853 CORE_ADDR addr;
2854 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
2855
2856 if (target_stopped_data_address (&current_target, &addr))
2857 fprintf_unfiltered (gdb_stdlog,
2858 "infrun: stopped data address = %s\n",
2859 paddress (gdbarch, addr));
2860 else
2861 fprintf_unfiltered (gdb_stdlog,
2862 "infrun: (no data address available)\n");
2863 }
2864 }
2865
2866 if (stepping_past_singlestep_breakpoint)
2867 {
2868 gdb_assert (singlestep_breakpoints_inserted_p);
2869 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
2870 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
2871
2872 stepping_past_singlestep_breakpoint = 0;
2873
2874 /* We've either finished single-stepping past the single-step
2875 breakpoint, or stopped for some other reason. It would be nice if
2876 we could tell, but we can't reliably. */
2877 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2878 {
2879 if (debug_infrun)
2880 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
2881 /* Pull the single step breakpoints out of the target. */
2882 remove_single_step_breakpoints ();
2883 singlestep_breakpoints_inserted_p = 0;
2884
2885 ecs->random_signal = 0;
2886 ecs->event_thread->trap_expected = 0;
2887
2888 context_switch (saved_singlestep_ptid);
2889 if (deprecated_context_hook)
2890 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
2891
2892 resume (1, TARGET_SIGNAL_0);
2893 prepare_to_wait (ecs);
2894 return;
2895 }
2896 }
2897
2898 if (!ptid_equal (deferred_step_ptid, null_ptid))
2899 {
2900 /* In non-stop mode, there's never a deferred_step_ptid set. */
2901 gdb_assert (!non_stop);
2902
2903 /* If we stopped for some other reason than single-stepping, ignore
2904 the fact that we were supposed to switch back. */
2905 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2906 {
2907 if (debug_infrun)
2908 fprintf_unfiltered (gdb_stdlog,
2909 "infrun: handling deferred step\n");
2910
2911 /* Pull the single step breakpoints out of the target. */
2912 if (singlestep_breakpoints_inserted_p)
2913 {
2914 remove_single_step_breakpoints ();
2915 singlestep_breakpoints_inserted_p = 0;
2916 }
2917
2918 /* Note: We do not call context_switch at this point, as the
2919 context is already set up for stepping the original thread. */
2920 switch_to_thread (deferred_step_ptid);
2921 deferred_step_ptid = null_ptid;
2922 /* Suppress spurious "Switching to ..." message. */
2923 previous_inferior_ptid = inferior_ptid;
2924
2925 resume (1, TARGET_SIGNAL_0);
2926 prepare_to_wait (ecs);
2927 return;
2928 }
2929
2930 deferred_step_ptid = null_ptid;
2931 }
2932
2933 /* See if a thread hit a thread-specific breakpoint that was meant for
2934 another thread. If so, then step that thread past the breakpoint,
2935 and continue it. */
2936
2937 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2938 {
2939 int thread_hop_needed = 0;
2940
2941 /* Check if a regular breakpoint has been hit before checking
2942 for a potential single step breakpoint. Otherwise, GDB will
2943 not see this breakpoint hit when stepping onto breakpoints. */
2944 if (regular_breakpoint_inserted_here_p (stop_pc))
2945 {
2946 ecs->random_signal = 0;
2947 if (!breakpoint_thread_match (stop_pc, ecs->ptid))
2948 thread_hop_needed = 1;
2949 }
2950 else if (singlestep_breakpoints_inserted_p)
2951 {
2952 /* We have not context switched yet, so this should be true
2953 no matter which thread hit the singlestep breakpoint. */
2954 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
2955 if (debug_infrun)
2956 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
2957 "trap for %s\n",
2958 target_pid_to_str (ecs->ptid));
2959
2960 ecs->random_signal = 0;
2961 /* The call to in_thread_list is necessary because PTIDs sometimes
2962 change when we go from single-threaded to multi-threaded. If
2963 the singlestep_ptid is still in the list, assume that it is
2964 really different from ecs->ptid. */
2965 if (!ptid_equal (singlestep_ptid, ecs->ptid)
2966 && in_thread_list (singlestep_ptid))
2967 {
2968 /* If the PC of the thread we were trying to single-step
2969 has changed, discard this event (which we were going
2970 to ignore anyway), and pretend we saw that thread
2971 trap. This prevents us continuously moving the
2972 single-step breakpoint forward, one instruction at a
2973 time. If the PC has changed, then the thread we were
2974 trying to single-step has trapped or been signalled,
2975 but the event has not been reported to GDB yet.
2976
2977 There might be some cases where this loses signal
2978 information, if a signal has arrived at exactly the
2979 same time that the PC changed, but this is the best
2980 we can do with the information available. Perhaps we
2981 should arrange to report all events for all threads
2982 when they stop, or to re-poll the remote looking for
2983 this particular thread (i.e. temporarily enable
2984 schedlock). */
2985
2986 CORE_ADDR new_singlestep_pc
2987 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
2988
2989 if (new_singlestep_pc != singlestep_pc)
2990 {
2991 enum target_signal stop_signal;
2992
2993 if (debug_infrun)
2994 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
2995 " but expected thread advanced also\n");
2996
2997 /* The current context still belongs to
2998 singlestep_ptid. Don't swap here, since that's
2999 the context we want to use. Just fudge our
3000 state and continue. */
3001 stop_signal = ecs->event_thread->stop_signal;
3002 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3003 ecs->ptid = singlestep_ptid;
3004 ecs->event_thread = find_thread_ptid (ecs->ptid);
3005 ecs->event_thread->stop_signal = stop_signal;
3006 stop_pc = new_singlestep_pc;
3007 }
3008 else
3009 {
3010 if (debug_infrun)
3011 fprintf_unfiltered (gdb_stdlog,
3012 "infrun: unexpected thread\n");
3013
3014 thread_hop_needed = 1;
3015 stepping_past_singlestep_breakpoint = 1;
3016 saved_singlestep_ptid = singlestep_ptid;
3017 }
3018 }
3019 }
3020
3021 if (thread_hop_needed)
3022 {
3023 struct regcache *thread_regcache;
3024 int remove_status = 0;
3025
3026 if (debug_infrun)
3027 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3028
3029 /* Switch context before touching inferior memory, the
3030 previous thread may have exited. */
3031 if (!ptid_equal (inferior_ptid, ecs->ptid))
3032 context_switch (ecs->ptid);
3033
3034 /* Saw a breakpoint, but it was hit by the wrong thread.
3035 Just continue. */
3036
3037 if (singlestep_breakpoints_inserted_p)
3038 {
3039 /* Pull the single step breakpoints out of the target. */
3040 remove_single_step_breakpoints ();
3041 singlestep_breakpoints_inserted_p = 0;
3042 }
3043
3044 /* If the arch can displace step, don't remove the
3045 breakpoints. */
3046 thread_regcache = get_thread_regcache (ecs->ptid);
3047 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3048 remove_status = remove_breakpoints ();
3049
3050 /* Did we fail to remove breakpoints? If so, try
3051 to set the PC past the bp. (There's at least
3052 one situation in which we can fail to remove
3053 the bp's: On HP-UX's that use ttrace, we can't
3054 change the address space of a vforking child
3055 process until the child exits (well, okay, not
3056 then either :-) or execs. */
3057 if (remove_status != 0)
3058 error (_("Cannot step over breakpoint hit in wrong thread"));
3059 else
3060 { /* Single step */
3061 if (!non_stop)
3062 {
3063 /* Only need to require the next event from this
3064 thread in all-stop mode. */
3065 waiton_ptid = ecs->ptid;
3066 infwait_state = infwait_thread_hop_state;
3067 }
3068
3069 ecs->event_thread->stepping_over_breakpoint = 1;
3070 keep_going (ecs);
3071 return;
3072 }
3073 }
3074 else if (singlestep_breakpoints_inserted_p)
3075 {
3076 sw_single_step_trap_p = 1;
3077 ecs->random_signal = 0;
3078 }
3079 }
3080 else
3081 ecs->random_signal = 1;
3082
3083 /* See if something interesting happened to the non-current thread. If
3084 so, then switch to that thread. */
3085 if (!ptid_equal (ecs->ptid, inferior_ptid))
3086 {
3087 if (debug_infrun)
3088 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3089
3090 context_switch (ecs->ptid);
3091
3092 if (deprecated_context_hook)
3093 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3094 }
3095
3096 /* At this point, get hold of the now-current thread's frame. */
3097 frame = get_current_frame ();
3098 gdbarch = get_frame_arch (frame);
3099
3100 if (singlestep_breakpoints_inserted_p)
3101 {
3102 /* Pull the single step breakpoints out of the target. */
3103 remove_single_step_breakpoints ();
3104 singlestep_breakpoints_inserted_p = 0;
3105 }
3106
3107 if (stepped_after_stopped_by_watchpoint)
3108 stopped_by_watchpoint = 0;
3109 else
3110 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3111
3112 /* If necessary, step over this watchpoint. We'll be back to display
3113 it in a moment. */
3114 if (stopped_by_watchpoint
3115 && (target_have_steppable_watchpoint
3116 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3117 {
3118 /* At this point, we are stopped at an instruction which has
3119 attempted to write to a piece of memory under control of
3120 a watchpoint. The instruction hasn't actually executed
3121 yet. If we were to evaluate the watchpoint expression
3122 now, we would get the old value, and therefore no change
3123 would seem to have occurred.
3124
3125 In order to make watchpoints work `right', we really need
3126 to complete the memory write, and then evaluate the
3127 watchpoint expression. We do this by single-stepping the
3128 target.
3129
3130 It may not be necessary to disable the watchpoint to stop over
3131 it. For example, the PA can (with some kernel cooperation)
3132 single step over a watchpoint without disabling the watchpoint.
3133
3134 It is far more common to need to disable a watchpoint to step
3135 the inferior over it. If we have non-steppable watchpoints,
3136 we must disable the current watchpoint; it's simplest to
3137 disable all watchpoints and breakpoints. */
3138 int hw_step = 1;
3139
3140 if (!target_have_steppable_watchpoint)
3141 remove_breakpoints ();
3142 /* Single step */
3143 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3144 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3145 waiton_ptid = ecs->ptid;
3146 if (target_have_steppable_watchpoint)
3147 infwait_state = infwait_step_watch_state;
3148 else
3149 infwait_state = infwait_nonstep_watch_state;
3150 prepare_to_wait (ecs);
3151 return;
3152 }
3153
3154 ecs->stop_func_start = 0;
3155 ecs->stop_func_end = 0;
3156 ecs->stop_func_name = 0;
3157 /* Don't care about return value; stop_func_start and stop_func_name
3158 will both be 0 if it doesn't work. */
3159 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3160 &ecs->stop_func_start, &ecs->stop_func_end);
3161 ecs->stop_func_start
3162 += gdbarch_deprecated_function_start_offset (gdbarch);
3163 ecs->event_thread->stepping_over_breakpoint = 0;
3164 bpstat_clear (&ecs->event_thread->stop_bpstat);
3165 ecs->event_thread->stop_step = 0;
3166 stop_print_frame = 1;
3167 ecs->random_signal = 0;
3168 stopped_by_random_signal = 0;
3169
3170 /* Hide inlined functions starting here, unless we just performed stepi or
3171 nexti. After stepi and nexti, always show the innermost frame (not any
3172 inline function call sites). */
3173 if (ecs->event_thread->step_range_end != 1)
3174 skip_inline_frames (ecs->ptid);
3175
3176 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3177 && ecs->event_thread->trap_expected
3178 && gdbarch_single_step_through_delay_p (gdbarch)
3179 && currently_stepping (ecs->event_thread))
3180 {
3181 /* We're trying to step off a breakpoint. Turns out that we're
3182 also on an instruction that needs to be stepped multiple
3183 times before it's been fully executing. E.g., architectures
3184 with a delay slot. It needs to be stepped twice, once for
3185 the instruction and once for the delay slot. */
3186 int step_through_delay
3187 = gdbarch_single_step_through_delay (gdbarch, frame);
3188 if (debug_infrun && step_through_delay)
3189 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3190 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3191 {
3192 /* The user issued a continue when stopped at a breakpoint.
3193 Set up for another trap and get out of here. */
3194 ecs->event_thread->stepping_over_breakpoint = 1;
3195 keep_going (ecs);
3196 return;
3197 }
3198 else if (step_through_delay)
3199 {
3200 /* The user issued a step when stopped at a breakpoint.
3201 Maybe we should stop, maybe we should not - the delay
3202 slot *might* correspond to a line of source. In any
3203 case, don't decide that here, just set
3204 ecs->stepping_over_breakpoint, making sure we
3205 single-step again before breakpoints are re-inserted. */
3206 ecs->event_thread->stepping_over_breakpoint = 1;
3207 }
3208 }
3209
3210 /* Look at the cause of the stop, and decide what to do.
3211 The alternatives are:
3212 1) stop_stepping and return; to really stop and return to the debugger,
3213 2) keep_going and return to start up again
3214 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3215 3) set ecs->random_signal to 1, and the decision between 1 and 2
3216 will be made according to the signal handling tables. */
3217
3218 /* First, distinguish signals caused by the debugger from signals
3219 that have to do with the program's own actions. Note that
3220 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3221 on the operating system version. Here we detect when a SIGILL or
3222 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3223 something similar for SIGSEGV, since a SIGSEGV will be generated
3224 when we're trying to execute a breakpoint instruction on a
3225 non-executable stack. This happens for call dummy breakpoints
3226 for architectures like SPARC that place call dummies on the
3227 stack.
3228
3229 If we're doing a displaced step past a breakpoint, then the
3230 breakpoint is always inserted at the original instruction;
3231 non-standard signals can't be explained by the breakpoint. */
3232 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3233 || (! ecs->event_thread->trap_expected
3234 && breakpoint_inserted_here_p (stop_pc)
3235 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_ILL
3236 || ecs->event_thread->stop_signal == TARGET_SIGNAL_SEGV
3237 || ecs->event_thread->stop_signal == TARGET_SIGNAL_EMT))
3238 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3239 || stop_soon == STOP_QUIETLY_REMOTE)
3240 {
3241 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3242 {
3243 if (debug_infrun)
3244 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3245 stop_print_frame = 0;
3246 stop_stepping (ecs);
3247 return;
3248 }
3249
3250 /* This is originated from start_remote(), start_inferior() and
3251 shared libraries hook functions. */
3252 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3253 {
3254 if (debug_infrun)
3255 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3256 stop_stepping (ecs);
3257 return;
3258 }
3259
3260 /* This originates from attach_command(). We need to overwrite
3261 the stop_signal here, because some kernels don't ignore a
3262 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3263 See more comments in inferior.h. On the other hand, if we
3264 get a non-SIGSTOP, report it to the user - assume the backend
3265 will handle the SIGSTOP if it should show up later.
3266
3267 Also consider that the attach is complete when we see a
3268 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3269 target extended-remote report it instead of a SIGSTOP
3270 (e.g. gdbserver). We already rely on SIGTRAP being our
3271 signal, so this is no exception.
3272
3273 Also consider that the attach is complete when we see a
3274 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3275 the target to stop all threads of the inferior, in case the
3276 low level attach operation doesn't stop them implicitly. If
3277 they weren't stopped implicitly, then the stub will report a
3278 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3279 other than GDB's request. */
3280 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3281 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3282 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3283 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3284 {
3285 stop_stepping (ecs);
3286 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3287 return;
3288 }
3289
3290 /* See if there is a breakpoint at the current PC. */
3291 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
3292
3293 /* Following in case break condition called a
3294 function. */
3295 stop_print_frame = 1;
3296
3297 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3298 at one stage in the past included checks for an inferior
3299 function call's call dummy's return breakpoint. The original
3300 comment, that went with the test, read:
3301
3302 ``End of a stack dummy. Some systems (e.g. Sony news) give
3303 another signal besides SIGTRAP, so check here as well as
3304 above.''
3305
3306 If someone ever tries to get call dummys on a
3307 non-executable stack to work (where the target would stop
3308 with something like a SIGSEGV), then those tests might need
3309 to be re-instated. Given, however, that the tests were only
3310 enabled when momentary breakpoints were not being used, I
3311 suspect that it won't be the case.
3312
3313 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3314 be necessary for call dummies on a non-executable stack on
3315 SPARC. */
3316
3317 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3318 ecs->random_signal
3319 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3320 || ecs->event_thread->trap_expected
3321 || (ecs->event_thread->step_range_end
3322 && ecs->event_thread->step_resume_breakpoint == NULL));
3323 else
3324 {
3325 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3326 if (!ecs->random_signal)
3327 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3328 }
3329 }
3330
3331 /* When we reach this point, we've pretty much decided
3332 that the reason for stopping must've been a random
3333 (unexpected) signal. */
3334
3335 else
3336 ecs->random_signal = 1;
3337
3338 process_event_stop_test:
3339
3340 /* Re-fetch current thread's frame in case we did a
3341 "goto process_event_stop_test" above. */
3342 frame = get_current_frame ();
3343 gdbarch = get_frame_arch (frame);
3344
3345 /* For the program's own signals, act according to
3346 the signal handling tables. */
3347
3348 if (ecs->random_signal)
3349 {
3350 /* Signal not for debugging purposes. */
3351 int printed = 0;
3352
3353 if (debug_infrun)
3354 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3355 ecs->event_thread->stop_signal);
3356
3357 stopped_by_random_signal = 1;
3358
3359 if (signal_print[ecs->event_thread->stop_signal])
3360 {
3361 printed = 1;
3362 target_terminal_ours_for_output ();
3363 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3364 }
3365 /* Always stop on signals if we're either just gaining control
3366 of the program, or the user explicitly requested this thread
3367 to remain stopped. */
3368 if (stop_soon != NO_STOP_QUIETLY
3369 || ecs->event_thread->stop_requested
3370 || signal_stop_state (ecs->event_thread->stop_signal))
3371 {
3372 stop_stepping (ecs);
3373 return;
3374 }
3375 /* If not going to stop, give terminal back
3376 if we took it away. */
3377 else if (printed)
3378 target_terminal_inferior ();
3379
3380 /* Clear the signal if it should not be passed. */
3381 if (signal_program[ecs->event_thread->stop_signal] == 0)
3382 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3383
3384 if (ecs->event_thread->prev_pc == stop_pc
3385 && ecs->event_thread->trap_expected
3386 && ecs->event_thread->step_resume_breakpoint == NULL)
3387 {
3388 /* We were just starting a new sequence, attempting to
3389 single-step off of a breakpoint and expecting a SIGTRAP.
3390 Instead this signal arrives. This signal will take us out
3391 of the stepping range so GDB needs to remember to, when
3392 the signal handler returns, resume stepping off that
3393 breakpoint. */
3394 /* To simplify things, "continue" is forced to use the same
3395 code paths as single-step - set a breakpoint at the
3396 signal return address and then, once hit, step off that
3397 breakpoint. */
3398 if (debug_infrun)
3399 fprintf_unfiltered (gdb_stdlog,
3400 "infrun: signal arrived while stepping over "
3401 "breakpoint\n");
3402
3403 insert_step_resume_breakpoint_at_frame (frame);
3404 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3405 keep_going (ecs);
3406 return;
3407 }
3408
3409 if (ecs->event_thread->step_range_end != 0
3410 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3411 && (ecs->event_thread->step_range_start <= stop_pc
3412 && stop_pc < ecs->event_thread->step_range_end)
3413 && frame_id_eq (get_stack_frame_id (frame),
3414 ecs->event_thread->step_stack_frame_id)
3415 && ecs->event_thread->step_resume_breakpoint == NULL)
3416 {
3417 /* The inferior is about to take a signal that will take it
3418 out of the single step range. Set a breakpoint at the
3419 current PC (which is presumably where the signal handler
3420 will eventually return) and then allow the inferior to
3421 run free.
3422
3423 Note that this is only needed for a signal delivered
3424 while in the single-step range. Nested signals aren't a
3425 problem as they eventually all return. */
3426 if (debug_infrun)
3427 fprintf_unfiltered (gdb_stdlog,
3428 "infrun: signal may take us out of "
3429 "single-step range\n");
3430
3431 insert_step_resume_breakpoint_at_frame (frame);
3432 keep_going (ecs);
3433 return;
3434 }
3435
3436 /* Note: step_resume_breakpoint may be non-NULL. This occures
3437 when either there's a nested signal, or when there's a
3438 pending signal enabled just as the signal handler returns
3439 (leaving the inferior at the step-resume-breakpoint without
3440 actually executing it). Either way continue until the
3441 breakpoint is really hit. */
3442 keep_going (ecs);
3443 return;
3444 }
3445
3446 /* Handle cases caused by hitting a breakpoint. */
3447 {
3448 CORE_ADDR jmp_buf_pc;
3449 struct bpstat_what what;
3450
3451 what = bpstat_what (ecs->event_thread->stop_bpstat);
3452
3453 if (what.call_dummy)
3454 {
3455 stop_stack_dummy = 1;
3456 }
3457
3458 switch (what.main_action)
3459 {
3460 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3461 /* If we hit the breakpoint at longjmp while stepping, we
3462 install a momentary breakpoint at the target of the
3463 jmp_buf. */
3464
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3468
3469 ecs->event_thread->stepping_over_breakpoint = 1;
3470
3471 if (!gdbarch_get_longjmp_target_p (gdbarch)
3472 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3473 {
3474 if (debug_infrun)
3475 fprintf_unfiltered (gdb_stdlog, "\
3476 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3477 keep_going (ecs);
3478 return;
3479 }
3480
3481 /* We're going to replace the current step-resume breakpoint
3482 with a longjmp-resume breakpoint. */
3483 delete_step_resume_breakpoint (ecs->event_thread);
3484
3485 /* Insert a breakpoint at resume address. */
3486 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3487
3488 keep_going (ecs);
3489 return;
3490
3491 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3492 if (debug_infrun)
3493 fprintf_unfiltered (gdb_stdlog,
3494 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3495
3496 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3497 delete_step_resume_breakpoint (ecs->event_thread);
3498
3499 ecs->event_thread->stop_step = 1;
3500 print_stop_reason (END_STEPPING_RANGE, 0);
3501 stop_stepping (ecs);
3502 return;
3503
3504 case BPSTAT_WHAT_SINGLE:
3505 if (debug_infrun)
3506 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3507 ecs->event_thread->stepping_over_breakpoint = 1;
3508 /* Still need to check other stuff, at least the case
3509 where we are stepping and step out of the right range. */
3510 break;
3511
3512 case BPSTAT_WHAT_STOP_NOISY:
3513 if (debug_infrun)
3514 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3515 stop_print_frame = 1;
3516
3517 /* We are about to nuke the step_resume_breakpointt via the
3518 cleanup chain, so no need to worry about it here. */
3519
3520 stop_stepping (ecs);
3521 return;
3522
3523 case BPSTAT_WHAT_STOP_SILENT:
3524 if (debug_infrun)
3525 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3526 stop_print_frame = 0;
3527
3528 /* We are about to nuke the step_resume_breakpoin via the
3529 cleanup chain, so no need to worry about it here. */
3530
3531 stop_stepping (ecs);
3532 return;
3533
3534 case BPSTAT_WHAT_STEP_RESUME:
3535 if (debug_infrun)
3536 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3537
3538 delete_step_resume_breakpoint (ecs->event_thread);
3539 if (ecs->event_thread->step_after_step_resume_breakpoint)
3540 {
3541 /* Back when the step-resume breakpoint was inserted, we
3542 were trying to single-step off a breakpoint. Go back
3543 to doing that. */
3544 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3545 ecs->event_thread->stepping_over_breakpoint = 1;
3546 keep_going (ecs);
3547 return;
3548 }
3549 if (stop_pc == ecs->stop_func_start
3550 && execution_direction == EXEC_REVERSE)
3551 {
3552 /* We are stepping over a function call in reverse, and
3553 just hit the step-resume breakpoint at the start
3554 address of the function. Go back to single-stepping,
3555 which should take us back to the function call. */
3556 ecs->event_thread->stepping_over_breakpoint = 1;
3557 keep_going (ecs);
3558 return;
3559 }
3560 break;
3561
3562 case BPSTAT_WHAT_CHECK_SHLIBS:
3563 {
3564 if (debug_infrun)
3565 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3566
3567 /* Check for any newly added shared libraries if we're
3568 supposed to be adding them automatically. Switch
3569 terminal for any messages produced by
3570 breakpoint_re_set. */
3571 target_terminal_ours_for_output ();
3572 /* NOTE: cagney/2003-11-25: Make certain that the target
3573 stack's section table is kept up-to-date. Architectures,
3574 (e.g., PPC64), use the section table to perform
3575 operations such as address => section name and hence
3576 require the table to contain all sections (including
3577 those found in shared libraries). */
3578 #ifdef SOLIB_ADD
3579 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3580 #else
3581 solib_add (NULL, 0, &current_target, auto_solib_add);
3582 #endif
3583 target_terminal_inferior ();
3584
3585 /* If requested, stop when the dynamic linker notifies
3586 gdb of events. This allows the user to get control
3587 and place breakpoints in initializer routines for
3588 dynamically loaded objects (among other things). */
3589 if (stop_on_solib_events || stop_stack_dummy)
3590 {
3591 stop_stepping (ecs);
3592 return;
3593 }
3594 else
3595 {
3596 /* We want to step over this breakpoint, then keep going. */
3597 ecs->event_thread->stepping_over_breakpoint = 1;
3598 break;
3599 }
3600 }
3601 break;
3602
3603 case BPSTAT_WHAT_CHECK_JIT:
3604 if (debug_infrun)
3605 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_JIT\n");
3606
3607 /* Switch terminal for any messages produced by breakpoint_re_set. */
3608 target_terminal_ours_for_output ();
3609
3610 jit_event_handler (gdbarch);
3611
3612 target_terminal_inferior ();
3613
3614 /* We want to step over this breakpoint, then keep going. */
3615 ecs->event_thread->stepping_over_breakpoint = 1;
3616
3617 break;
3618
3619 case BPSTAT_WHAT_LAST:
3620 /* Not a real code, but listed here to shut up gcc -Wall. */
3621
3622 case BPSTAT_WHAT_KEEP_CHECKING:
3623 break;
3624 }
3625 }
3626
3627 /* We come here if we hit a breakpoint but should not
3628 stop for it. Possibly we also were stepping
3629 and should stop for that. So fall through and
3630 test for stepping. But, if not stepping,
3631 do not stop. */
3632
3633 /* In all-stop mode, if we're currently stepping but have stopped in
3634 some other thread, we need to switch back to the stepped thread. */
3635 if (!non_stop)
3636 {
3637 struct thread_info *tp;
3638 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3639 ecs->event_thread);
3640 if (tp)
3641 {
3642 /* However, if the current thread is blocked on some internal
3643 breakpoint, and we simply need to step over that breakpoint
3644 to get it going again, do that first. */
3645 if ((ecs->event_thread->trap_expected
3646 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3647 || ecs->event_thread->stepping_over_breakpoint)
3648 {
3649 keep_going (ecs);
3650 return;
3651 }
3652
3653 /* If the stepping thread exited, then don't try to switch
3654 back and resume it, which could fail in several different
3655 ways depending on the target. Instead, just keep going.
3656
3657 We can find a stepping dead thread in the thread list in
3658 two cases:
3659
3660 - The target supports thread exit events, and when the
3661 target tries to delete the thread from the thread list,
3662 inferior_ptid pointed at the exiting thread. In such
3663 case, calling delete_thread does not really remove the
3664 thread from the list; instead, the thread is left listed,
3665 with 'exited' state.
3666
3667 - The target's debug interface does not support thread
3668 exit events, and so we have no idea whatsoever if the
3669 previously stepping thread is still alive. For that
3670 reason, we need to synchronously query the target
3671 now. */
3672 if (is_exited (tp->ptid)
3673 || !target_thread_alive (tp->ptid))
3674 {
3675 if (debug_infrun)
3676 fprintf_unfiltered (gdb_stdlog, "\
3677 infrun: not switching back to stepped thread, it has vanished\n");
3678
3679 delete_thread (tp->ptid);
3680 keep_going (ecs);
3681 return;
3682 }
3683
3684 /* Otherwise, we no longer expect a trap in the current thread.
3685 Clear the trap_expected flag before switching back -- this is
3686 what keep_going would do as well, if we called it. */
3687 ecs->event_thread->trap_expected = 0;
3688
3689 if (debug_infrun)
3690 fprintf_unfiltered (gdb_stdlog,
3691 "infrun: switching back to stepped thread\n");
3692
3693 ecs->event_thread = tp;
3694 ecs->ptid = tp->ptid;
3695 context_switch (ecs->ptid);
3696 keep_going (ecs);
3697 return;
3698 }
3699 }
3700
3701 /* Are we stepping to get the inferior out of the dynamic linker's
3702 hook (and possibly the dld itself) after catching a shlib
3703 event? */
3704 if (ecs->event_thread->stepping_through_solib_after_catch)
3705 {
3706 #if defined(SOLIB_ADD)
3707 /* Have we reached our destination? If not, keep going. */
3708 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
3709 {
3710 if (debug_infrun)
3711 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
3712 ecs->event_thread->stepping_over_breakpoint = 1;
3713 keep_going (ecs);
3714 return;
3715 }
3716 #endif
3717 if (debug_infrun)
3718 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
3719 /* Else, stop and report the catchpoint(s) whose triggering
3720 caused us to begin stepping. */
3721 ecs->event_thread->stepping_through_solib_after_catch = 0;
3722 bpstat_clear (&ecs->event_thread->stop_bpstat);
3723 ecs->event_thread->stop_bpstat
3724 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
3725 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
3726 stop_print_frame = 1;
3727 stop_stepping (ecs);
3728 return;
3729 }
3730
3731 if (ecs->event_thread->step_resume_breakpoint)
3732 {
3733 if (debug_infrun)
3734 fprintf_unfiltered (gdb_stdlog,
3735 "infrun: step-resume breakpoint is inserted\n");
3736
3737 /* Having a step-resume breakpoint overrides anything
3738 else having to do with stepping commands until
3739 that breakpoint is reached. */
3740 keep_going (ecs);
3741 return;
3742 }
3743
3744 if (ecs->event_thread->step_range_end == 0)
3745 {
3746 if (debug_infrun)
3747 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
3748 /* Likewise if we aren't even stepping. */
3749 keep_going (ecs);
3750 return;
3751 }
3752
3753 /* If stepping through a line, keep going if still within it.
3754
3755 Note that step_range_end is the address of the first instruction
3756 beyond the step range, and NOT the address of the last instruction
3757 within it!
3758
3759 Note also that during reverse execution, we may be stepping
3760 through a function epilogue and therefore must detect when
3761 the current-frame changes in the middle of a line. */
3762
3763 if (stop_pc >= ecs->event_thread->step_range_start
3764 && stop_pc < ecs->event_thread->step_range_end
3765 && (execution_direction != EXEC_REVERSE
3766 || frame_id_eq (get_frame_id (frame),
3767 ecs->event_thread->step_frame_id)))
3768 {
3769 if (debug_infrun)
3770 fprintf_unfiltered
3771 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
3772 paddress (gdbarch, ecs->event_thread->step_range_start),
3773 paddress (gdbarch, ecs->event_thread->step_range_end));
3774
3775 /* When stepping backward, stop at beginning of line range
3776 (unless it's the function entry point, in which case
3777 keep going back to the call point). */
3778 if (stop_pc == ecs->event_thread->step_range_start
3779 && stop_pc != ecs->stop_func_start
3780 && execution_direction == EXEC_REVERSE)
3781 {
3782 ecs->event_thread->stop_step = 1;
3783 print_stop_reason (END_STEPPING_RANGE, 0);
3784 stop_stepping (ecs);
3785 }
3786 else
3787 keep_going (ecs);
3788
3789 return;
3790 }
3791
3792 /* We stepped out of the stepping range. */
3793
3794 /* If we are stepping at the source level and entered the runtime
3795 loader dynamic symbol resolution code...
3796
3797 EXEC_FORWARD: we keep on single stepping until we exit the run
3798 time loader code and reach the callee's address.
3799
3800 EXEC_REVERSE: we've already executed the callee (backward), and
3801 the runtime loader code is handled just like any other
3802 undebuggable function call. Now we need only keep stepping
3803 backward through the trampoline code, and that's handled further
3804 down, so there is nothing for us to do here. */
3805
3806 if (execution_direction != EXEC_REVERSE
3807 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3808 && in_solib_dynsym_resolve_code (stop_pc))
3809 {
3810 CORE_ADDR pc_after_resolver =
3811 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
3812
3813 if (debug_infrun)
3814 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
3815
3816 if (pc_after_resolver)
3817 {
3818 /* Set up a step-resume breakpoint at the address
3819 indicated by SKIP_SOLIB_RESOLVER. */
3820 struct symtab_and_line sr_sal;
3821 init_sal (&sr_sal);
3822 sr_sal.pc = pc_after_resolver;
3823
3824 insert_step_resume_breakpoint_at_sal (gdbarch,
3825 sr_sal, null_frame_id);
3826 }
3827
3828 keep_going (ecs);
3829 return;
3830 }
3831
3832 if (ecs->event_thread->step_range_end != 1
3833 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3834 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
3835 && get_frame_type (frame) == SIGTRAMP_FRAME)
3836 {
3837 if (debug_infrun)
3838 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
3839 /* The inferior, while doing a "step" or "next", has ended up in
3840 a signal trampoline (either by a signal being delivered or by
3841 the signal handler returning). Just single-step until the
3842 inferior leaves the trampoline (either by calling the handler
3843 or returning). */
3844 keep_going (ecs);
3845 return;
3846 }
3847
3848 /* Check for subroutine calls. The check for the current frame
3849 equalling the step ID is not necessary - the check of the
3850 previous frame's ID is sufficient - but it is a common case and
3851 cheaper than checking the previous frame's ID.
3852
3853 NOTE: frame_id_eq will never report two invalid frame IDs as
3854 being equal, so to get into this block, both the current and
3855 previous frame must have valid frame IDs. */
3856 /* The outer_frame_id check is a heuristic to detect stepping
3857 through startup code. If we step over an instruction which
3858 sets the stack pointer from an invalid value to a valid value,
3859 we may detect that as a subroutine call from the mythical
3860 "outermost" function. This could be fixed by marking
3861 outermost frames as !stack_p,code_p,special_p. Then the
3862 initial outermost frame, before sp was valid, would
3863 have code_addr == &_start. See the commend in frame_id_eq
3864 for more. */
3865 if (!frame_id_eq (get_stack_frame_id (frame),
3866 ecs->event_thread->step_stack_frame_id)
3867 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
3868 ecs->event_thread->step_stack_frame_id)
3869 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
3870 outer_frame_id)
3871 || step_start_function != find_pc_function (stop_pc))))
3872 {
3873 CORE_ADDR real_stop_pc;
3874
3875 if (debug_infrun)
3876 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
3877
3878 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
3879 || ((ecs->event_thread->step_range_end == 1)
3880 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
3881 ecs->stop_func_start)))
3882 {
3883 /* I presume that step_over_calls is only 0 when we're
3884 supposed to be stepping at the assembly language level
3885 ("stepi"). Just stop. */
3886 /* Also, maybe we just did a "nexti" inside a prolog, so we
3887 thought it was a subroutine call but it was not. Stop as
3888 well. FENN */
3889 /* And this works the same backward as frontward. MVS */
3890 ecs->event_thread->stop_step = 1;
3891 print_stop_reason (END_STEPPING_RANGE, 0);
3892 stop_stepping (ecs);
3893 return;
3894 }
3895
3896 /* Reverse stepping through solib trampolines. */
3897
3898 if (execution_direction == EXEC_REVERSE
3899 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
3900 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
3901 || (ecs->stop_func_start == 0
3902 && in_solib_dynsym_resolve_code (stop_pc))))
3903 {
3904 /* Any solib trampoline code can be handled in reverse
3905 by simply continuing to single-step. We have already
3906 executed the solib function (backwards), and a few
3907 steps will take us back through the trampoline to the
3908 caller. */
3909 keep_going (ecs);
3910 return;
3911 }
3912
3913 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
3914 {
3915 /* We're doing a "next".
3916
3917 Normal (forward) execution: set a breakpoint at the
3918 callee's return address (the address at which the caller
3919 will resume).
3920
3921 Reverse (backward) execution. set the step-resume
3922 breakpoint at the start of the function that we just
3923 stepped into (backwards), and continue to there. When we
3924 get there, we'll need to single-step back to the caller. */
3925
3926 if (execution_direction == EXEC_REVERSE)
3927 {
3928 struct symtab_and_line sr_sal;
3929
3930 /* Normal function call return (static or dynamic). */
3931 init_sal (&sr_sal);
3932 sr_sal.pc = ecs->stop_func_start;
3933 insert_step_resume_breakpoint_at_sal (gdbarch,
3934 sr_sal, null_frame_id);
3935 }
3936 else
3937 insert_step_resume_breakpoint_at_caller (frame);
3938
3939 keep_going (ecs);
3940 return;
3941 }
3942
3943 /* If we are in a function call trampoline (a stub between the
3944 calling routine and the real function), locate the real
3945 function. That's what tells us (a) whether we want to step
3946 into it at all, and (b) what prologue we want to run to the
3947 end of, if we do step into it. */
3948 real_stop_pc = skip_language_trampoline (frame, stop_pc);
3949 if (real_stop_pc == 0)
3950 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
3951 if (real_stop_pc != 0)
3952 ecs->stop_func_start = real_stop_pc;
3953
3954 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
3955 {
3956 struct symtab_and_line sr_sal;
3957 init_sal (&sr_sal);
3958 sr_sal.pc = ecs->stop_func_start;
3959
3960 insert_step_resume_breakpoint_at_sal (gdbarch,
3961 sr_sal, null_frame_id);
3962 keep_going (ecs);
3963 return;
3964 }
3965
3966 /* If we have line number information for the function we are
3967 thinking of stepping into, step into it.
3968
3969 If there are several symtabs at that PC (e.g. with include
3970 files), just want to know whether *any* of them have line
3971 numbers. find_pc_line handles this. */
3972 {
3973 struct symtab_and_line tmp_sal;
3974
3975 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
3976 if (tmp_sal.line != 0)
3977 {
3978 if (execution_direction == EXEC_REVERSE)
3979 handle_step_into_function_backward (gdbarch, ecs);
3980 else
3981 handle_step_into_function (gdbarch, ecs);
3982 return;
3983 }
3984 }
3985
3986 /* If we have no line number and the step-stop-if-no-debug is
3987 set, we stop the step so that the user has a chance to switch
3988 in assembly mode. */
3989 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3990 && step_stop_if_no_debug)
3991 {
3992 ecs->event_thread->stop_step = 1;
3993 print_stop_reason (END_STEPPING_RANGE, 0);
3994 stop_stepping (ecs);
3995 return;
3996 }
3997
3998 if (execution_direction == EXEC_REVERSE)
3999 {
4000 /* Set a breakpoint at callee's start address.
4001 From there we can step once and be back in the caller. */
4002 struct symtab_and_line sr_sal;
4003 init_sal (&sr_sal);
4004 sr_sal.pc = ecs->stop_func_start;
4005 insert_step_resume_breakpoint_at_sal (gdbarch,
4006 sr_sal, null_frame_id);
4007 }
4008 else
4009 /* Set a breakpoint at callee's return address (the address
4010 at which the caller will resume). */
4011 insert_step_resume_breakpoint_at_caller (frame);
4012
4013 keep_going (ecs);
4014 return;
4015 }
4016
4017 /* Reverse stepping through solib trampolines. */
4018
4019 if (execution_direction == EXEC_REVERSE
4020 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4021 {
4022 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4023 || (ecs->stop_func_start == 0
4024 && in_solib_dynsym_resolve_code (stop_pc)))
4025 {
4026 /* Any solib trampoline code can be handled in reverse
4027 by simply continuing to single-step. We have already
4028 executed the solib function (backwards), and a few
4029 steps will take us back through the trampoline to the
4030 caller. */
4031 keep_going (ecs);
4032 return;
4033 }
4034 else if (in_solib_dynsym_resolve_code (stop_pc))
4035 {
4036 /* Stepped backward into the solib dynsym resolver.
4037 Set a breakpoint at its start and continue, then
4038 one more step will take us out. */
4039 struct symtab_and_line sr_sal;
4040 init_sal (&sr_sal);
4041 sr_sal.pc = ecs->stop_func_start;
4042 insert_step_resume_breakpoint_at_sal (gdbarch,
4043 sr_sal, null_frame_id);
4044 keep_going (ecs);
4045 return;
4046 }
4047 }
4048
4049 /* If we're in the return path from a shared library trampoline,
4050 we want to proceed through the trampoline when stepping. */
4051 if (gdbarch_in_solib_return_trampoline (gdbarch,
4052 stop_pc, ecs->stop_func_name))
4053 {
4054 /* Determine where this trampoline returns. */
4055 CORE_ADDR real_stop_pc;
4056 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4057
4058 if (debug_infrun)
4059 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4060
4061 /* Only proceed through if we know where it's going. */
4062 if (real_stop_pc)
4063 {
4064 /* And put the step-breakpoint there and go until there. */
4065 struct symtab_and_line sr_sal;
4066
4067 init_sal (&sr_sal); /* initialize to zeroes */
4068 sr_sal.pc = real_stop_pc;
4069 sr_sal.section = find_pc_overlay (sr_sal.pc);
4070
4071 /* Do not specify what the fp should be when we stop since
4072 on some machines the prologue is where the new fp value
4073 is established. */
4074 insert_step_resume_breakpoint_at_sal (gdbarch,
4075 sr_sal, null_frame_id);
4076
4077 /* Restart without fiddling with the step ranges or
4078 other state. */
4079 keep_going (ecs);
4080 return;
4081 }
4082 }
4083
4084 stop_pc_sal = find_pc_line (stop_pc, 0);
4085
4086 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4087 the trampoline processing logic, however, there are some trampolines
4088 that have no names, so we should do trampoline handling first. */
4089 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4090 && ecs->stop_func_name == NULL
4091 && stop_pc_sal.line == 0)
4092 {
4093 if (debug_infrun)
4094 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4095
4096 /* The inferior just stepped into, or returned to, an
4097 undebuggable function (where there is no debugging information
4098 and no line number corresponding to the address where the
4099 inferior stopped). Since we want to skip this kind of code,
4100 we keep going until the inferior returns from this
4101 function - unless the user has asked us not to (via
4102 set step-mode) or we no longer know how to get back
4103 to the call site. */
4104 if (step_stop_if_no_debug
4105 || !frame_id_p (frame_unwind_caller_id (frame)))
4106 {
4107 /* If we have no line number and the step-stop-if-no-debug
4108 is set, we stop the step so that the user has a chance to
4109 switch in assembly mode. */
4110 ecs->event_thread->stop_step = 1;
4111 print_stop_reason (END_STEPPING_RANGE, 0);
4112 stop_stepping (ecs);
4113 return;
4114 }
4115 else
4116 {
4117 /* Set a breakpoint at callee's return address (the address
4118 at which the caller will resume). */
4119 insert_step_resume_breakpoint_at_caller (frame);
4120 keep_going (ecs);
4121 return;
4122 }
4123 }
4124
4125 if (ecs->event_thread->step_range_end == 1)
4126 {
4127 /* It is stepi or nexti. We always want to stop stepping after
4128 one instruction. */
4129 if (debug_infrun)
4130 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4131 ecs->event_thread->stop_step = 1;
4132 print_stop_reason (END_STEPPING_RANGE, 0);
4133 stop_stepping (ecs);
4134 return;
4135 }
4136
4137 if (stop_pc_sal.line == 0)
4138 {
4139 /* We have no line number information. That means to stop
4140 stepping (does this always happen right after one instruction,
4141 when we do "s" in a function with no line numbers,
4142 or can this happen as a result of a return or longjmp?). */
4143 if (debug_infrun)
4144 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4145 ecs->event_thread->stop_step = 1;
4146 print_stop_reason (END_STEPPING_RANGE, 0);
4147 stop_stepping (ecs);
4148 return;
4149 }
4150
4151 /* Look for "calls" to inlined functions, part one. If the inline
4152 frame machinery detected some skipped call sites, we have entered
4153 a new inline function. */
4154
4155 if (frame_id_eq (get_frame_id (get_current_frame ()),
4156 ecs->event_thread->step_frame_id)
4157 && inline_skipped_frames (ecs->ptid))
4158 {
4159 struct symtab_and_line call_sal;
4160
4161 if (debug_infrun)
4162 fprintf_unfiltered (gdb_stdlog,
4163 "infrun: stepped into inlined function\n");
4164
4165 find_frame_sal (get_current_frame (), &call_sal);
4166
4167 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4168 {
4169 /* For "step", we're going to stop. But if the call site
4170 for this inlined function is on the same source line as
4171 we were previously stepping, go down into the function
4172 first. Otherwise stop at the call site. */
4173
4174 if (call_sal.line == ecs->event_thread->current_line
4175 && call_sal.symtab == ecs->event_thread->current_symtab)
4176 step_into_inline_frame (ecs->ptid);
4177
4178 ecs->event_thread->stop_step = 1;
4179 print_stop_reason (END_STEPPING_RANGE, 0);
4180 stop_stepping (ecs);
4181 return;
4182 }
4183 else
4184 {
4185 /* For "next", we should stop at the call site if it is on a
4186 different source line. Otherwise continue through the
4187 inlined function. */
4188 if (call_sal.line == ecs->event_thread->current_line
4189 && call_sal.symtab == ecs->event_thread->current_symtab)
4190 keep_going (ecs);
4191 else
4192 {
4193 ecs->event_thread->stop_step = 1;
4194 print_stop_reason (END_STEPPING_RANGE, 0);
4195 stop_stepping (ecs);
4196 }
4197 return;
4198 }
4199 }
4200
4201 /* Look for "calls" to inlined functions, part two. If we are still
4202 in the same real function we were stepping through, but we have
4203 to go further up to find the exact frame ID, we are stepping
4204 through a more inlined call beyond its call site. */
4205
4206 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4207 && !frame_id_eq (get_frame_id (get_current_frame ()),
4208 ecs->event_thread->step_frame_id)
4209 && stepped_in_from (get_current_frame (),
4210 ecs->event_thread->step_frame_id))
4211 {
4212 if (debug_infrun)
4213 fprintf_unfiltered (gdb_stdlog,
4214 "infrun: stepping through inlined function\n");
4215
4216 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4217 keep_going (ecs);
4218 else
4219 {
4220 ecs->event_thread->stop_step = 1;
4221 print_stop_reason (END_STEPPING_RANGE, 0);
4222 stop_stepping (ecs);
4223 }
4224 return;
4225 }
4226
4227 if ((stop_pc == stop_pc_sal.pc)
4228 && (ecs->event_thread->current_line != stop_pc_sal.line
4229 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4230 {
4231 /* We are at the start of a different line. So stop. Note that
4232 we don't stop if we step into the middle of a different line.
4233 That is said to make things like for (;;) statements work
4234 better. */
4235 if (debug_infrun)
4236 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4237 ecs->event_thread->stop_step = 1;
4238 print_stop_reason (END_STEPPING_RANGE, 0);
4239 stop_stepping (ecs);
4240 return;
4241 }
4242
4243 /* We aren't done stepping.
4244
4245 Optimize by setting the stepping range to the line.
4246 (We might not be in the original line, but if we entered a
4247 new line in mid-statement, we continue stepping. This makes
4248 things like for(;;) statements work better.) */
4249
4250 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4251 ecs->event_thread->step_range_end = stop_pc_sal.end;
4252 set_step_info (frame, stop_pc_sal);
4253
4254 if (debug_infrun)
4255 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4256 keep_going (ecs);
4257 }
4258
4259 /* Is thread TP in the middle of single-stepping? */
4260
4261 static int
4262 currently_stepping (struct thread_info *tp)
4263 {
4264 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4265 || tp->trap_expected
4266 || tp->stepping_through_solib_after_catch
4267 || bpstat_should_step ());
4268 }
4269
4270 /* Returns true if any thread *but* the one passed in "data" is in the
4271 middle of stepping or of handling a "next". */
4272
4273 static int
4274 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4275 {
4276 if (tp == data)
4277 return 0;
4278
4279 return (tp->step_range_end
4280 || tp->trap_expected
4281 || tp->stepping_through_solib_after_catch);
4282 }
4283
4284 /* Inferior has stepped into a subroutine call with source code that
4285 we should not step over. Do step to the first line of code in
4286 it. */
4287
4288 static void
4289 handle_step_into_function (struct gdbarch *gdbarch,
4290 struct execution_control_state *ecs)
4291 {
4292 struct symtab *s;
4293 struct symtab_and_line stop_func_sal, sr_sal;
4294
4295 s = find_pc_symtab (stop_pc);
4296 if (s && s->language != language_asm)
4297 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4298 ecs->stop_func_start);
4299
4300 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4301 /* Use the step_resume_break to step until the end of the prologue,
4302 even if that involves jumps (as it seems to on the vax under
4303 4.2). */
4304 /* If the prologue ends in the middle of a source line, continue to
4305 the end of that source line (if it is still within the function).
4306 Otherwise, just go to end of prologue. */
4307 if (stop_func_sal.end
4308 && stop_func_sal.pc != ecs->stop_func_start
4309 && stop_func_sal.end < ecs->stop_func_end)
4310 ecs->stop_func_start = stop_func_sal.end;
4311
4312 /* Architectures which require breakpoint adjustment might not be able
4313 to place a breakpoint at the computed address. If so, the test
4314 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4315 ecs->stop_func_start to an address at which a breakpoint may be
4316 legitimately placed.
4317
4318 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4319 made, GDB will enter an infinite loop when stepping through
4320 optimized code consisting of VLIW instructions which contain
4321 subinstructions corresponding to different source lines. On
4322 FR-V, it's not permitted to place a breakpoint on any but the
4323 first subinstruction of a VLIW instruction. When a breakpoint is
4324 set, GDB will adjust the breakpoint address to the beginning of
4325 the VLIW instruction. Thus, we need to make the corresponding
4326 adjustment here when computing the stop address. */
4327
4328 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4329 {
4330 ecs->stop_func_start
4331 = gdbarch_adjust_breakpoint_address (gdbarch,
4332 ecs->stop_func_start);
4333 }
4334
4335 if (ecs->stop_func_start == stop_pc)
4336 {
4337 /* We are already there: stop now. */
4338 ecs->event_thread->stop_step = 1;
4339 print_stop_reason (END_STEPPING_RANGE, 0);
4340 stop_stepping (ecs);
4341 return;
4342 }
4343 else
4344 {
4345 /* Put the step-breakpoint there and go until there. */
4346 init_sal (&sr_sal); /* initialize to zeroes */
4347 sr_sal.pc = ecs->stop_func_start;
4348 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4349
4350 /* Do not specify what the fp should be when we stop since on
4351 some machines the prologue is where the new fp value is
4352 established. */
4353 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4354
4355 /* And make sure stepping stops right away then. */
4356 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4357 }
4358 keep_going (ecs);
4359 }
4360
4361 /* Inferior has stepped backward into a subroutine call with source
4362 code that we should not step over. Do step to the beginning of the
4363 last line of code in it. */
4364
4365 static void
4366 handle_step_into_function_backward (struct gdbarch *gdbarch,
4367 struct execution_control_state *ecs)
4368 {
4369 struct symtab *s;
4370 struct symtab_and_line stop_func_sal, sr_sal;
4371
4372 s = find_pc_symtab (stop_pc);
4373 if (s && s->language != language_asm)
4374 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4375 ecs->stop_func_start);
4376
4377 stop_func_sal = find_pc_line (stop_pc, 0);
4378
4379 /* OK, we're just going to keep stepping here. */
4380 if (stop_func_sal.pc == stop_pc)
4381 {
4382 /* We're there already. Just stop stepping now. */
4383 ecs->event_thread->stop_step = 1;
4384 print_stop_reason (END_STEPPING_RANGE, 0);
4385 stop_stepping (ecs);
4386 }
4387 else
4388 {
4389 /* Else just reset the step range and keep going.
4390 No step-resume breakpoint, they don't work for
4391 epilogues, which can have multiple entry paths. */
4392 ecs->event_thread->step_range_start = stop_func_sal.pc;
4393 ecs->event_thread->step_range_end = stop_func_sal.end;
4394 keep_going (ecs);
4395 }
4396 return;
4397 }
4398
4399 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4400 This is used to both functions and to skip over code. */
4401
4402 static void
4403 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4404 struct symtab_and_line sr_sal,
4405 struct frame_id sr_id)
4406 {
4407 /* There should never be more than one step-resume or longjmp-resume
4408 breakpoint per thread, so we should never be setting a new
4409 step_resume_breakpoint when one is already active. */
4410 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4411
4412 if (debug_infrun)
4413 fprintf_unfiltered (gdb_stdlog,
4414 "infrun: inserting step-resume breakpoint at %s\n",
4415 paddress (gdbarch, sr_sal.pc));
4416
4417 inferior_thread ()->step_resume_breakpoint
4418 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4419 }
4420
4421 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4422 to skip a potential signal handler.
4423
4424 This is called with the interrupted function's frame. The signal
4425 handler, when it returns, will resume the interrupted function at
4426 RETURN_FRAME.pc. */
4427
4428 static void
4429 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4430 {
4431 struct symtab_and_line sr_sal;
4432 struct gdbarch *gdbarch;
4433
4434 gdb_assert (return_frame != NULL);
4435 init_sal (&sr_sal); /* initialize to zeros */
4436
4437 gdbarch = get_frame_arch (return_frame);
4438 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4439 sr_sal.section = find_pc_overlay (sr_sal.pc);
4440
4441 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4442 get_stack_frame_id (return_frame));
4443 }
4444
4445 /* Similar to insert_step_resume_breakpoint_at_frame, except
4446 but a breakpoint at the previous frame's PC. This is used to
4447 skip a function after stepping into it (for "next" or if the called
4448 function has no debugging information).
4449
4450 The current function has almost always been reached by single
4451 stepping a call or return instruction. NEXT_FRAME belongs to the
4452 current function, and the breakpoint will be set at the caller's
4453 resume address.
4454
4455 This is a separate function rather than reusing
4456 insert_step_resume_breakpoint_at_frame in order to avoid
4457 get_prev_frame, which may stop prematurely (see the implementation
4458 of frame_unwind_caller_id for an example). */
4459
4460 static void
4461 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4462 {
4463 struct symtab_and_line sr_sal;
4464 struct gdbarch *gdbarch;
4465
4466 /* We shouldn't have gotten here if we don't know where the call site
4467 is. */
4468 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4469
4470 init_sal (&sr_sal); /* initialize to zeros */
4471
4472 gdbarch = frame_unwind_caller_arch (next_frame);
4473 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4474 frame_unwind_caller_pc (next_frame));
4475 sr_sal.section = find_pc_overlay (sr_sal.pc);
4476
4477 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4478 frame_unwind_caller_id (next_frame));
4479 }
4480
4481 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4482 new breakpoint at the target of a jmp_buf. The handling of
4483 longjmp-resume uses the same mechanisms used for handling
4484 "step-resume" breakpoints. */
4485
4486 static void
4487 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4488 {
4489 /* There should never be more than one step-resume or longjmp-resume
4490 breakpoint per thread, so we should never be setting a new
4491 longjmp_resume_breakpoint when one is already active. */
4492 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4493
4494 if (debug_infrun)
4495 fprintf_unfiltered (gdb_stdlog,
4496 "infrun: inserting longjmp-resume breakpoint at %s\n",
4497 paddress (gdbarch, pc));
4498
4499 inferior_thread ()->step_resume_breakpoint =
4500 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4501 }
4502
4503 static void
4504 stop_stepping (struct execution_control_state *ecs)
4505 {
4506 if (debug_infrun)
4507 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4508
4509 /* Let callers know we don't want to wait for the inferior anymore. */
4510 ecs->wait_some_more = 0;
4511 }
4512
4513 /* This function handles various cases where we need to continue
4514 waiting for the inferior. */
4515 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4516
4517 static void
4518 keep_going (struct execution_control_state *ecs)
4519 {
4520 /* Save the pc before execution, to compare with pc after stop. */
4521 ecs->event_thread->prev_pc
4522 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4523
4524 /* If we did not do break;, it means we should keep running the
4525 inferior and not return to debugger. */
4526
4527 if (ecs->event_thread->trap_expected
4528 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4529 {
4530 /* We took a signal (which we are supposed to pass through to
4531 the inferior, else we'd not get here) and we haven't yet
4532 gotten our trap. Simply continue. */
4533 resume (currently_stepping (ecs->event_thread),
4534 ecs->event_thread->stop_signal);
4535 }
4536 else
4537 {
4538 /* Either the trap was not expected, but we are continuing
4539 anyway (the user asked that this signal be passed to the
4540 child)
4541 -- or --
4542 The signal was SIGTRAP, e.g. it was our signal, but we
4543 decided we should resume from it.
4544
4545 We're going to run this baby now!
4546
4547 Note that insert_breakpoints won't try to re-insert
4548 already inserted breakpoints. Therefore, we don't
4549 care if breakpoints were already inserted, or not. */
4550
4551 if (ecs->event_thread->stepping_over_breakpoint)
4552 {
4553 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4554 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4555 /* Since we can't do a displaced step, we have to remove
4556 the breakpoint while we step it. To keep things
4557 simple, we remove them all. */
4558 remove_breakpoints ();
4559 }
4560 else
4561 {
4562 struct gdb_exception e;
4563 /* Stop stepping when inserting breakpoints
4564 has failed. */
4565 TRY_CATCH (e, RETURN_MASK_ERROR)
4566 {
4567 insert_breakpoints ();
4568 }
4569 if (e.reason < 0)
4570 {
4571 stop_stepping (ecs);
4572 return;
4573 }
4574 }
4575
4576 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4577
4578 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4579 specifies that such a signal should be delivered to the
4580 target program).
4581
4582 Typically, this would occure when a user is debugging a
4583 target monitor on a simulator: the target monitor sets a
4584 breakpoint; the simulator encounters this break-point and
4585 halts the simulation handing control to GDB; GDB, noteing
4586 that the break-point isn't valid, returns control back to the
4587 simulator; the simulator then delivers the hardware
4588 equivalent of a SIGNAL_TRAP to the program being debugged. */
4589
4590 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4591 && !signal_program[ecs->event_thread->stop_signal])
4592 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4593
4594 resume (currently_stepping (ecs->event_thread),
4595 ecs->event_thread->stop_signal);
4596 }
4597
4598 prepare_to_wait (ecs);
4599 }
4600
4601 /* This function normally comes after a resume, before
4602 handle_inferior_event exits. It takes care of any last bits of
4603 housekeeping, and sets the all-important wait_some_more flag. */
4604
4605 static void
4606 prepare_to_wait (struct execution_control_state *ecs)
4607 {
4608 if (debug_infrun)
4609 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4610
4611 /* This is the old end of the while loop. Let everybody know we
4612 want to wait for the inferior some more and get called again
4613 soon. */
4614 ecs->wait_some_more = 1;
4615 }
4616
4617 /* Print why the inferior has stopped. We always print something when
4618 the inferior exits, or receives a signal. The rest of the cases are
4619 dealt with later on in normal_stop() and print_it_typical(). Ideally
4620 there should be a call to this function from handle_inferior_event()
4621 each time stop_stepping() is called.*/
4622 static void
4623 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4624 {
4625 switch (stop_reason)
4626 {
4627 case END_STEPPING_RANGE:
4628 /* We are done with a step/next/si/ni command. */
4629 /* For now print nothing. */
4630 /* Print a message only if not in the middle of doing a "step n"
4631 operation for n > 1 */
4632 if (!inferior_thread ()->step_multi
4633 || !inferior_thread ()->stop_step)
4634 if (ui_out_is_mi_like_p (uiout))
4635 ui_out_field_string
4636 (uiout, "reason",
4637 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4638 break;
4639 case SIGNAL_EXITED:
4640 /* The inferior was terminated by a signal. */
4641 annotate_signalled ();
4642 if (ui_out_is_mi_like_p (uiout))
4643 ui_out_field_string
4644 (uiout, "reason",
4645 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4646 ui_out_text (uiout, "\nProgram terminated with signal ");
4647 annotate_signal_name ();
4648 ui_out_field_string (uiout, "signal-name",
4649 target_signal_to_name (stop_info));
4650 annotate_signal_name_end ();
4651 ui_out_text (uiout, ", ");
4652 annotate_signal_string ();
4653 ui_out_field_string (uiout, "signal-meaning",
4654 target_signal_to_string (stop_info));
4655 annotate_signal_string_end ();
4656 ui_out_text (uiout, ".\n");
4657 ui_out_text (uiout, "The program no longer exists.\n");
4658 break;
4659 case EXITED:
4660 /* The inferior program is finished. */
4661 annotate_exited (stop_info);
4662 if (stop_info)
4663 {
4664 if (ui_out_is_mi_like_p (uiout))
4665 ui_out_field_string (uiout, "reason",
4666 async_reason_lookup (EXEC_ASYNC_EXITED));
4667 ui_out_text (uiout, "\nProgram exited with code ");
4668 ui_out_field_fmt (uiout, "exit-code", "0%o",
4669 (unsigned int) stop_info);
4670 ui_out_text (uiout, ".\n");
4671 }
4672 else
4673 {
4674 if (ui_out_is_mi_like_p (uiout))
4675 ui_out_field_string
4676 (uiout, "reason",
4677 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
4678 ui_out_text (uiout, "\nProgram exited normally.\n");
4679 }
4680 /* Support the --return-child-result option. */
4681 return_child_result_value = stop_info;
4682 break;
4683 case SIGNAL_RECEIVED:
4684 /* Signal received. The signal table tells us to print about
4685 it. */
4686 annotate_signal ();
4687
4688 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
4689 {
4690 struct thread_info *t = inferior_thread ();
4691
4692 ui_out_text (uiout, "\n[");
4693 ui_out_field_string (uiout, "thread-name",
4694 target_pid_to_str (t->ptid));
4695 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
4696 ui_out_text (uiout, " stopped");
4697 }
4698 else
4699 {
4700 ui_out_text (uiout, "\nProgram received signal ");
4701 annotate_signal_name ();
4702 if (ui_out_is_mi_like_p (uiout))
4703 ui_out_field_string
4704 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
4705 ui_out_field_string (uiout, "signal-name",
4706 target_signal_to_name (stop_info));
4707 annotate_signal_name_end ();
4708 ui_out_text (uiout, ", ");
4709 annotate_signal_string ();
4710 ui_out_field_string (uiout, "signal-meaning",
4711 target_signal_to_string (stop_info));
4712 annotate_signal_string_end ();
4713 }
4714 ui_out_text (uiout, ".\n");
4715 break;
4716 case NO_HISTORY:
4717 /* Reverse execution: target ran out of history info. */
4718 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
4719 break;
4720 default:
4721 internal_error (__FILE__, __LINE__,
4722 _("print_stop_reason: unrecognized enum value"));
4723 break;
4724 }
4725 }
4726 \f
4727
4728 /* Here to return control to GDB when the inferior stops for real.
4729 Print appropriate messages, remove breakpoints, give terminal our modes.
4730
4731 STOP_PRINT_FRAME nonzero means print the executing frame
4732 (pc, function, args, file, line number and line text).
4733 BREAKPOINTS_FAILED nonzero means stop was due to error
4734 attempting to insert breakpoints. */
4735
4736 void
4737 normal_stop (void)
4738 {
4739 struct target_waitstatus last;
4740 ptid_t last_ptid;
4741 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
4742
4743 get_last_target_status (&last_ptid, &last);
4744
4745 /* If an exception is thrown from this point on, make sure to
4746 propagate GDB's knowledge of the executing state to the
4747 frontend/user running state. A QUIT is an easy exception to see
4748 here, so do this before any filtered output. */
4749 if (!non_stop)
4750 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
4751 else if (last.kind != TARGET_WAITKIND_SIGNALLED
4752 && last.kind != TARGET_WAITKIND_EXITED)
4753 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
4754
4755 /* In non-stop mode, we don't want GDB to switch threads behind the
4756 user's back, to avoid races where the user is typing a command to
4757 apply to thread x, but GDB switches to thread y before the user
4758 finishes entering the command. */
4759
4760 /* As with the notification of thread events, we want to delay
4761 notifying the user that we've switched thread context until
4762 the inferior actually stops.
4763
4764 There's no point in saying anything if the inferior has exited.
4765 Note that SIGNALLED here means "exited with a signal", not
4766 "received a signal". */
4767 if (!non_stop
4768 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
4769 && target_has_execution
4770 && last.kind != TARGET_WAITKIND_SIGNALLED
4771 && last.kind != TARGET_WAITKIND_EXITED)
4772 {
4773 target_terminal_ours_for_output ();
4774 printf_filtered (_("[Switching to %s]\n"),
4775 target_pid_to_str (inferior_ptid));
4776 annotate_thread_changed ();
4777 previous_inferior_ptid = inferior_ptid;
4778 }
4779
4780 if (!breakpoints_always_inserted_mode () && target_has_execution)
4781 {
4782 if (remove_breakpoints ())
4783 {
4784 target_terminal_ours_for_output ();
4785 printf_filtered (_("\
4786 Cannot remove breakpoints because program is no longer writable.\n\
4787 Further execution is probably impossible.\n"));
4788 }
4789 }
4790
4791 /* If an auto-display called a function and that got a signal,
4792 delete that auto-display to avoid an infinite recursion. */
4793
4794 if (stopped_by_random_signal)
4795 disable_current_display ();
4796
4797 /* Don't print a message if in the middle of doing a "step n"
4798 operation for n > 1 */
4799 if (target_has_execution
4800 && last.kind != TARGET_WAITKIND_SIGNALLED
4801 && last.kind != TARGET_WAITKIND_EXITED
4802 && inferior_thread ()->step_multi
4803 && inferior_thread ()->stop_step)
4804 goto done;
4805
4806 target_terminal_ours ();
4807
4808 /* Set the current source location. This will also happen if we
4809 display the frame below, but the current SAL will be incorrect
4810 during a user hook-stop function. */
4811 if (has_stack_frames () && !stop_stack_dummy)
4812 set_current_sal_from_frame (get_current_frame (), 1);
4813
4814 /* Let the user/frontend see the threads as stopped. */
4815 do_cleanups (old_chain);
4816
4817 /* Look up the hook_stop and run it (CLI internally handles problem
4818 of stop_command's pre-hook not existing). */
4819 if (stop_command)
4820 catch_errors (hook_stop_stub, stop_command,
4821 "Error while running hook_stop:\n", RETURN_MASK_ALL);
4822
4823 if (!has_stack_frames ())
4824 goto done;
4825
4826 if (last.kind == TARGET_WAITKIND_SIGNALLED
4827 || last.kind == TARGET_WAITKIND_EXITED)
4828 goto done;
4829
4830 /* Select innermost stack frame - i.e., current frame is frame 0,
4831 and current location is based on that.
4832 Don't do this on return from a stack dummy routine,
4833 or if the program has exited. */
4834
4835 if (!stop_stack_dummy)
4836 {
4837 select_frame (get_current_frame ());
4838
4839 /* Print current location without a level number, if
4840 we have changed functions or hit a breakpoint.
4841 Print source line if we have one.
4842 bpstat_print() contains the logic deciding in detail
4843 what to print, based on the event(s) that just occurred. */
4844
4845 /* If --batch-silent is enabled then there's no need to print the current
4846 source location, and to try risks causing an error message about
4847 missing source files. */
4848 if (stop_print_frame && !batch_silent)
4849 {
4850 int bpstat_ret;
4851 int source_flag;
4852 int do_frame_printing = 1;
4853 struct thread_info *tp = inferior_thread ();
4854
4855 bpstat_ret = bpstat_print (tp->stop_bpstat);
4856 switch (bpstat_ret)
4857 {
4858 case PRINT_UNKNOWN:
4859 /* If we had hit a shared library event breakpoint,
4860 bpstat_print would print out this message. If we hit
4861 an OS-level shared library event, do the same
4862 thing. */
4863 if (last.kind == TARGET_WAITKIND_LOADED)
4864 {
4865 printf_filtered (_("Stopped due to shared library event\n"));
4866 source_flag = SRC_LINE; /* something bogus */
4867 do_frame_printing = 0;
4868 break;
4869 }
4870
4871 /* FIXME: cagney/2002-12-01: Given that a frame ID does
4872 (or should) carry around the function and does (or
4873 should) use that when doing a frame comparison. */
4874 if (tp->stop_step
4875 && frame_id_eq (tp->step_frame_id,
4876 get_frame_id (get_current_frame ()))
4877 && step_start_function == find_pc_function (stop_pc))
4878 source_flag = SRC_LINE; /* finished step, just print source line */
4879 else
4880 source_flag = SRC_AND_LOC; /* print location and source line */
4881 break;
4882 case PRINT_SRC_AND_LOC:
4883 source_flag = SRC_AND_LOC; /* print location and source line */
4884 break;
4885 case PRINT_SRC_ONLY:
4886 source_flag = SRC_LINE;
4887 break;
4888 case PRINT_NOTHING:
4889 source_flag = SRC_LINE; /* something bogus */
4890 do_frame_printing = 0;
4891 break;
4892 default:
4893 internal_error (__FILE__, __LINE__, _("Unknown value."));
4894 }
4895
4896 /* The behavior of this routine with respect to the source
4897 flag is:
4898 SRC_LINE: Print only source line
4899 LOCATION: Print only location
4900 SRC_AND_LOC: Print location and source line */
4901 if (do_frame_printing)
4902 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
4903
4904 /* Display the auto-display expressions. */
4905 do_displays ();
4906 }
4907 }
4908
4909 /* Save the function value return registers, if we care.
4910 We might be about to restore their previous contents. */
4911 if (inferior_thread ()->proceed_to_finish)
4912 {
4913 /* This should not be necessary. */
4914 if (stop_registers)
4915 regcache_xfree (stop_registers);
4916
4917 /* NB: The copy goes through to the target picking up the value of
4918 all the registers. */
4919 stop_registers = regcache_dup (get_current_regcache ());
4920 }
4921
4922 if (stop_stack_dummy)
4923 {
4924 /* Pop the empty frame that contains the stack dummy.
4925 This also restores inferior state prior to the call
4926 (struct inferior_thread_state). */
4927 struct frame_info *frame = get_current_frame ();
4928 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
4929 frame_pop (frame);
4930 /* frame_pop() calls reinit_frame_cache as the last thing it does
4931 which means there's currently no selected frame. We don't need
4932 to re-establish a selected frame if the dummy call returns normally,
4933 that will be done by restore_inferior_status. However, we do have
4934 to handle the case where the dummy call is returning after being
4935 stopped (e.g. the dummy call previously hit a breakpoint). We
4936 can't know which case we have so just always re-establish a
4937 selected frame here. */
4938 select_frame (get_current_frame ());
4939 }
4940
4941 done:
4942 annotate_stopped ();
4943
4944 /* Suppress the stop observer if we're in the middle of:
4945
4946 - a step n (n > 1), as there still more steps to be done.
4947
4948 - a "finish" command, as the observer will be called in
4949 finish_command_continuation, so it can include the inferior
4950 function's return value.
4951
4952 - calling an inferior function, as we pretend we inferior didn't
4953 run at all. The return value of the call is handled by the
4954 expression evaluator, through call_function_by_hand. */
4955
4956 if (!target_has_execution
4957 || last.kind == TARGET_WAITKIND_SIGNALLED
4958 || last.kind == TARGET_WAITKIND_EXITED
4959 || (!inferior_thread ()->step_multi
4960 && !(inferior_thread ()->stop_bpstat
4961 && inferior_thread ()->proceed_to_finish)
4962 && !inferior_thread ()->in_infcall))
4963 {
4964 if (!ptid_equal (inferior_ptid, null_ptid))
4965 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
4966 stop_print_frame);
4967 else
4968 observer_notify_normal_stop (NULL, stop_print_frame);
4969 }
4970
4971 if (target_has_execution)
4972 {
4973 if (last.kind != TARGET_WAITKIND_SIGNALLED
4974 && last.kind != TARGET_WAITKIND_EXITED)
4975 /* Delete the breakpoint we stopped at, if it wants to be deleted.
4976 Delete any breakpoint that is to be deleted at the next stop. */
4977 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
4978 }
4979 }
4980
4981 static int
4982 hook_stop_stub (void *cmd)
4983 {
4984 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
4985 return (0);
4986 }
4987 \f
4988 int
4989 signal_stop_state (int signo)
4990 {
4991 return signal_stop[signo];
4992 }
4993
4994 int
4995 signal_print_state (int signo)
4996 {
4997 return signal_print[signo];
4998 }
4999
5000 int
5001 signal_pass_state (int signo)
5002 {
5003 return signal_program[signo];
5004 }
5005
5006 int
5007 signal_stop_update (int signo, int state)
5008 {
5009 int ret = signal_stop[signo];
5010 signal_stop[signo] = state;
5011 return ret;
5012 }
5013
5014 int
5015 signal_print_update (int signo, int state)
5016 {
5017 int ret = signal_print[signo];
5018 signal_print[signo] = state;
5019 return ret;
5020 }
5021
5022 int
5023 signal_pass_update (int signo, int state)
5024 {
5025 int ret = signal_program[signo];
5026 signal_program[signo] = state;
5027 return ret;
5028 }
5029
5030 static void
5031 sig_print_header (void)
5032 {
5033 printf_filtered (_("\
5034 Signal Stop\tPrint\tPass to program\tDescription\n"));
5035 }
5036
5037 static void
5038 sig_print_info (enum target_signal oursig)
5039 {
5040 const char *name = target_signal_to_name (oursig);
5041 int name_padding = 13 - strlen (name);
5042
5043 if (name_padding <= 0)
5044 name_padding = 0;
5045
5046 printf_filtered ("%s", name);
5047 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5048 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5049 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5050 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5051 printf_filtered ("%s\n", target_signal_to_string (oursig));
5052 }
5053
5054 /* Specify how various signals in the inferior should be handled. */
5055
5056 static void
5057 handle_command (char *args, int from_tty)
5058 {
5059 char **argv;
5060 int digits, wordlen;
5061 int sigfirst, signum, siglast;
5062 enum target_signal oursig;
5063 int allsigs;
5064 int nsigs;
5065 unsigned char *sigs;
5066 struct cleanup *old_chain;
5067
5068 if (args == NULL)
5069 {
5070 error_no_arg (_("signal to handle"));
5071 }
5072
5073 /* Allocate and zero an array of flags for which signals to handle. */
5074
5075 nsigs = (int) TARGET_SIGNAL_LAST;
5076 sigs = (unsigned char *) alloca (nsigs);
5077 memset (sigs, 0, nsigs);
5078
5079 /* Break the command line up into args. */
5080
5081 argv = gdb_buildargv (args);
5082 old_chain = make_cleanup_freeargv (argv);
5083
5084 /* Walk through the args, looking for signal oursigs, signal names, and
5085 actions. Signal numbers and signal names may be interspersed with
5086 actions, with the actions being performed for all signals cumulatively
5087 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5088
5089 while (*argv != NULL)
5090 {
5091 wordlen = strlen (*argv);
5092 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5093 {;
5094 }
5095 allsigs = 0;
5096 sigfirst = siglast = -1;
5097
5098 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5099 {
5100 /* Apply action to all signals except those used by the
5101 debugger. Silently skip those. */
5102 allsigs = 1;
5103 sigfirst = 0;
5104 siglast = nsigs - 1;
5105 }
5106 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5107 {
5108 SET_SIGS (nsigs, sigs, signal_stop);
5109 SET_SIGS (nsigs, sigs, signal_print);
5110 }
5111 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5112 {
5113 UNSET_SIGS (nsigs, sigs, signal_program);
5114 }
5115 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5116 {
5117 SET_SIGS (nsigs, sigs, signal_print);
5118 }
5119 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5120 {
5121 SET_SIGS (nsigs, sigs, signal_program);
5122 }
5123 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5124 {
5125 UNSET_SIGS (nsigs, sigs, signal_stop);
5126 }
5127 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5128 {
5129 SET_SIGS (nsigs, sigs, signal_program);
5130 }
5131 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5132 {
5133 UNSET_SIGS (nsigs, sigs, signal_print);
5134 UNSET_SIGS (nsigs, sigs, signal_stop);
5135 }
5136 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5137 {
5138 UNSET_SIGS (nsigs, sigs, signal_program);
5139 }
5140 else if (digits > 0)
5141 {
5142 /* It is numeric. The numeric signal refers to our own
5143 internal signal numbering from target.h, not to host/target
5144 signal number. This is a feature; users really should be
5145 using symbolic names anyway, and the common ones like
5146 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5147
5148 sigfirst = siglast = (int)
5149 target_signal_from_command (atoi (*argv));
5150 if ((*argv)[digits] == '-')
5151 {
5152 siglast = (int)
5153 target_signal_from_command (atoi ((*argv) + digits + 1));
5154 }
5155 if (sigfirst > siglast)
5156 {
5157 /* Bet he didn't figure we'd think of this case... */
5158 signum = sigfirst;
5159 sigfirst = siglast;
5160 siglast = signum;
5161 }
5162 }
5163 else
5164 {
5165 oursig = target_signal_from_name (*argv);
5166 if (oursig != TARGET_SIGNAL_UNKNOWN)
5167 {
5168 sigfirst = siglast = (int) oursig;
5169 }
5170 else
5171 {
5172 /* Not a number and not a recognized flag word => complain. */
5173 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5174 }
5175 }
5176
5177 /* If any signal numbers or symbol names were found, set flags for
5178 which signals to apply actions to. */
5179
5180 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5181 {
5182 switch ((enum target_signal) signum)
5183 {
5184 case TARGET_SIGNAL_TRAP:
5185 case TARGET_SIGNAL_INT:
5186 if (!allsigs && !sigs[signum])
5187 {
5188 if (query (_("%s is used by the debugger.\n\
5189 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5190 {
5191 sigs[signum] = 1;
5192 }
5193 else
5194 {
5195 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5196 gdb_flush (gdb_stdout);
5197 }
5198 }
5199 break;
5200 case TARGET_SIGNAL_0:
5201 case TARGET_SIGNAL_DEFAULT:
5202 case TARGET_SIGNAL_UNKNOWN:
5203 /* Make sure that "all" doesn't print these. */
5204 break;
5205 default:
5206 sigs[signum] = 1;
5207 break;
5208 }
5209 }
5210
5211 argv++;
5212 }
5213
5214 for (signum = 0; signum < nsigs; signum++)
5215 if (sigs[signum])
5216 {
5217 target_notice_signals (inferior_ptid);
5218
5219 if (from_tty)
5220 {
5221 /* Show the results. */
5222 sig_print_header ();
5223 for (; signum < nsigs; signum++)
5224 if (sigs[signum])
5225 sig_print_info (signum);
5226 }
5227
5228 break;
5229 }
5230
5231 do_cleanups (old_chain);
5232 }
5233
5234 static void
5235 xdb_handle_command (char *args, int from_tty)
5236 {
5237 char **argv;
5238 struct cleanup *old_chain;
5239
5240 if (args == NULL)
5241 error_no_arg (_("xdb command"));
5242
5243 /* Break the command line up into args. */
5244
5245 argv = gdb_buildargv (args);
5246 old_chain = make_cleanup_freeargv (argv);
5247 if (argv[1] != (char *) NULL)
5248 {
5249 char *argBuf;
5250 int bufLen;
5251
5252 bufLen = strlen (argv[0]) + 20;
5253 argBuf = (char *) xmalloc (bufLen);
5254 if (argBuf)
5255 {
5256 int validFlag = 1;
5257 enum target_signal oursig;
5258
5259 oursig = target_signal_from_name (argv[0]);
5260 memset (argBuf, 0, bufLen);
5261 if (strcmp (argv[1], "Q") == 0)
5262 sprintf (argBuf, "%s %s", argv[0], "noprint");
5263 else
5264 {
5265 if (strcmp (argv[1], "s") == 0)
5266 {
5267 if (!signal_stop[oursig])
5268 sprintf (argBuf, "%s %s", argv[0], "stop");
5269 else
5270 sprintf (argBuf, "%s %s", argv[0], "nostop");
5271 }
5272 else if (strcmp (argv[1], "i") == 0)
5273 {
5274 if (!signal_program[oursig])
5275 sprintf (argBuf, "%s %s", argv[0], "pass");
5276 else
5277 sprintf (argBuf, "%s %s", argv[0], "nopass");
5278 }
5279 else if (strcmp (argv[1], "r") == 0)
5280 {
5281 if (!signal_print[oursig])
5282 sprintf (argBuf, "%s %s", argv[0], "print");
5283 else
5284 sprintf (argBuf, "%s %s", argv[0], "noprint");
5285 }
5286 else
5287 validFlag = 0;
5288 }
5289 if (validFlag)
5290 handle_command (argBuf, from_tty);
5291 else
5292 printf_filtered (_("Invalid signal handling flag.\n"));
5293 if (argBuf)
5294 xfree (argBuf);
5295 }
5296 }
5297 do_cleanups (old_chain);
5298 }
5299
5300 /* Print current contents of the tables set by the handle command.
5301 It is possible we should just be printing signals actually used
5302 by the current target (but for things to work right when switching
5303 targets, all signals should be in the signal tables). */
5304
5305 static void
5306 signals_info (char *signum_exp, int from_tty)
5307 {
5308 enum target_signal oursig;
5309 sig_print_header ();
5310
5311 if (signum_exp)
5312 {
5313 /* First see if this is a symbol name. */
5314 oursig = target_signal_from_name (signum_exp);
5315 if (oursig == TARGET_SIGNAL_UNKNOWN)
5316 {
5317 /* No, try numeric. */
5318 oursig =
5319 target_signal_from_command (parse_and_eval_long (signum_exp));
5320 }
5321 sig_print_info (oursig);
5322 return;
5323 }
5324
5325 printf_filtered ("\n");
5326 /* These ugly casts brought to you by the native VAX compiler. */
5327 for (oursig = TARGET_SIGNAL_FIRST;
5328 (int) oursig < (int) TARGET_SIGNAL_LAST;
5329 oursig = (enum target_signal) ((int) oursig + 1))
5330 {
5331 QUIT;
5332
5333 if (oursig != TARGET_SIGNAL_UNKNOWN
5334 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5335 sig_print_info (oursig);
5336 }
5337
5338 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5339 }
5340
5341 /* The $_siginfo convenience variable is a bit special. We don't know
5342 for sure the type of the value until we actually have a chance to
5343 fetch the data. The type can change depending on gdbarch, so it it
5344 also dependent on which thread you have selected.
5345
5346 1. making $_siginfo be an internalvar that creates a new value on
5347 access.
5348
5349 2. making the value of $_siginfo be an lval_computed value. */
5350
5351 /* This function implements the lval_computed support for reading a
5352 $_siginfo value. */
5353
5354 static void
5355 siginfo_value_read (struct value *v)
5356 {
5357 LONGEST transferred;
5358
5359 transferred =
5360 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5361 NULL,
5362 value_contents_all_raw (v),
5363 value_offset (v),
5364 TYPE_LENGTH (value_type (v)));
5365
5366 if (transferred != TYPE_LENGTH (value_type (v)))
5367 error (_("Unable to read siginfo"));
5368 }
5369
5370 /* This function implements the lval_computed support for writing a
5371 $_siginfo value. */
5372
5373 static void
5374 siginfo_value_write (struct value *v, struct value *fromval)
5375 {
5376 LONGEST transferred;
5377
5378 transferred = target_write (&current_target,
5379 TARGET_OBJECT_SIGNAL_INFO,
5380 NULL,
5381 value_contents_all_raw (fromval),
5382 value_offset (v),
5383 TYPE_LENGTH (value_type (fromval)));
5384
5385 if (transferred != TYPE_LENGTH (value_type (fromval)))
5386 error (_("Unable to write siginfo"));
5387 }
5388
5389 static struct lval_funcs siginfo_value_funcs =
5390 {
5391 siginfo_value_read,
5392 siginfo_value_write
5393 };
5394
5395 /* Return a new value with the correct type for the siginfo object of
5396 the current thread using architecture GDBARCH. Return a void value
5397 if there's no object available. */
5398
5399 static struct value *
5400 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5401 {
5402 if (target_has_stack
5403 && !ptid_equal (inferior_ptid, null_ptid)
5404 && gdbarch_get_siginfo_type_p (gdbarch))
5405 {
5406 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5407 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5408 }
5409
5410 return allocate_value (builtin_type (gdbarch)->builtin_void);
5411 }
5412
5413 \f
5414 /* Inferior thread state.
5415 These are details related to the inferior itself, and don't include
5416 things like what frame the user had selected or what gdb was doing
5417 with the target at the time.
5418 For inferior function calls these are things we want to restore
5419 regardless of whether the function call successfully completes
5420 or the dummy frame has to be manually popped. */
5421
5422 struct inferior_thread_state
5423 {
5424 enum target_signal stop_signal;
5425 CORE_ADDR stop_pc;
5426 struct regcache *registers;
5427 };
5428
5429 struct inferior_thread_state *
5430 save_inferior_thread_state (void)
5431 {
5432 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5433 struct thread_info *tp = inferior_thread ();
5434
5435 inf_state->stop_signal = tp->stop_signal;
5436 inf_state->stop_pc = stop_pc;
5437
5438 inf_state->registers = regcache_dup (get_current_regcache ());
5439
5440 return inf_state;
5441 }
5442
5443 /* Restore inferior session state to INF_STATE. */
5444
5445 void
5446 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5447 {
5448 struct thread_info *tp = inferior_thread ();
5449
5450 tp->stop_signal = inf_state->stop_signal;
5451 stop_pc = inf_state->stop_pc;
5452
5453 /* The inferior can be gone if the user types "print exit(0)"
5454 (and perhaps other times). */
5455 if (target_has_execution)
5456 /* NB: The register write goes through to the target. */
5457 regcache_cpy (get_current_regcache (), inf_state->registers);
5458 regcache_xfree (inf_state->registers);
5459 xfree (inf_state);
5460 }
5461
5462 static void
5463 do_restore_inferior_thread_state_cleanup (void *state)
5464 {
5465 restore_inferior_thread_state (state);
5466 }
5467
5468 struct cleanup *
5469 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5470 {
5471 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5472 }
5473
5474 void
5475 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5476 {
5477 regcache_xfree (inf_state->registers);
5478 xfree (inf_state);
5479 }
5480
5481 struct regcache *
5482 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5483 {
5484 return inf_state->registers;
5485 }
5486
5487 /* Session related state for inferior function calls.
5488 These are the additional bits of state that need to be restored
5489 when an inferior function call successfully completes. */
5490
5491 struct inferior_status
5492 {
5493 bpstat stop_bpstat;
5494 int stop_step;
5495 int stop_stack_dummy;
5496 int stopped_by_random_signal;
5497 int stepping_over_breakpoint;
5498 CORE_ADDR step_range_start;
5499 CORE_ADDR step_range_end;
5500 struct frame_id step_frame_id;
5501 struct frame_id step_stack_frame_id;
5502 enum step_over_calls_kind step_over_calls;
5503 CORE_ADDR step_resume_break_address;
5504 int stop_after_trap;
5505 int stop_soon;
5506
5507 /* ID if the selected frame when the inferior function call was made. */
5508 struct frame_id selected_frame_id;
5509
5510 int proceed_to_finish;
5511 int in_infcall;
5512 };
5513
5514 /* Save all of the information associated with the inferior<==>gdb
5515 connection. */
5516
5517 struct inferior_status *
5518 save_inferior_status (void)
5519 {
5520 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5521 struct thread_info *tp = inferior_thread ();
5522 struct inferior *inf = current_inferior ();
5523
5524 inf_status->stop_step = tp->stop_step;
5525 inf_status->stop_stack_dummy = stop_stack_dummy;
5526 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5527 inf_status->stepping_over_breakpoint = tp->trap_expected;
5528 inf_status->step_range_start = tp->step_range_start;
5529 inf_status->step_range_end = tp->step_range_end;
5530 inf_status->step_frame_id = tp->step_frame_id;
5531 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5532 inf_status->step_over_calls = tp->step_over_calls;
5533 inf_status->stop_after_trap = stop_after_trap;
5534 inf_status->stop_soon = inf->stop_soon;
5535 /* Save original bpstat chain here; replace it with copy of chain.
5536 If caller's caller is walking the chain, they'll be happier if we
5537 hand them back the original chain when restore_inferior_status is
5538 called. */
5539 inf_status->stop_bpstat = tp->stop_bpstat;
5540 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5541 inf_status->proceed_to_finish = tp->proceed_to_finish;
5542 inf_status->in_infcall = tp->in_infcall;
5543
5544 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5545
5546 return inf_status;
5547 }
5548
5549 static int
5550 restore_selected_frame (void *args)
5551 {
5552 struct frame_id *fid = (struct frame_id *) args;
5553 struct frame_info *frame;
5554
5555 frame = frame_find_by_id (*fid);
5556
5557 /* If inf_status->selected_frame_id is NULL, there was no previously
5558 selected frame. */
5559 if (frame == NULL)
5560 {
5561 warning (_("Unable to restore previously selected frame."));
5562 return 0;
5563 }
5564
5565 select_frame (frame);
5566
5567 return (1);
5568 }
5569
5570 /* Restore inferior session state to INF_STATUS. */
5571
5572 void
5573 restore_inferior_status (struct inferior_status *inf_status)
5574 {
5575 struct thread_info *tp = inferior_thread ();
5576 struct inferior *inf = current_inferior ();
5577
5578 tp->stop_step = inf_status->stop_step;
5579 stop_stack_dummy = inf_status->stop_stack_dummy;
5580 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5581 tp->trap_expected = inf_status->stepping_over_breakpoint;
5582 tp->step_range_start = inf_status->step_range_start;
5583 tp->step_range_end = inf_status->step_range_end;
5584 tp->step_frame_id = inf_status->step_frame_id;
5585 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5586 tp->step_over_calls = inf_status->step_over_calls;
5587 stop_after_trap = inf_status->stop_after_trap;
5588 inf->stop_soon = inf_status->stop_soon;
5589 bpstat_clear (&tp->stop_bpstat);
5590 tp->stop_bpstat = inf_status->stop_bpstat;
5591 inf_status->stop_bpstat = NULL;
5592 tp->proceed_to_finish = inf_status->proceed_to_finish;
5593 tp->in_infcall = inf_status->in_infcall;
5594
5595 if (target_has_stack)
5596 {
5597 /* The point of catch_errors is that if the stack is clobbered,
5598 walking the stack might encounter a garbage pointer and
5599 error() trying to dereference it. */
5600 if (catch_errors
5601 (restore_selected_frame, &inf_status->selected_frame_id,
5602 "Unable to restore previously selected frame:\n",
5603 RETURN_MASK_ERROR) == 0)
5604 /* Error in restoring the selected frame. Select the innermost
5605 frame. */
5606 select_frame (get_current_frame ());
5607 }
5608
5609 xfree (inf_status);
5610 }
5611
5612 static void
5613 do_restore_inferior_status_cleanup (void *sts)
5614 {
5615 restore_inferior_status (sts);
5616 }
5617
5618 struct cleanup *
5619 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5620 {
5621 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5622 }
5623
5624 void
5625 discard_inferior_status (struct inferior_status *inf_status)
5626 {
5627 /* See save_inferior_status for info on stop_bpstat. */
5628 bpstat_clear (&inf_status->stop_bpstat);
5629 xfree (inf_status);
5630 }
5631 \f
5632 int
5633 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5634 {
5635 struct target_waitstatus last;
5636 ptid_t last_ptid;
5637
5638 get_last_target_status (&last_ptid, &last);
5639
5640 if (last.kind != TARGET_WAITKIND_FORKED)
5641 return 0;
5642
5643 if (!ptid_equal (last_ptid, pid))
5644 return 0;
5645
5646 *child_pid = last.value.related_pid;
5647 return 1;
5648 }
5649
5650 int
5651 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
5652 {
5653 struct target_waitstatus last;
5654 ptid_t last_ptid;
5655
5656 get_last_target_status (&last_ptid, &last);
5657
5658 if (last.kind != TARGET_WAITKIND_VFORKED)
5659 return 0;
5660
5661 if (!ptid_equal (last_ptid, pid))
5662 return 0;
5663
5664 *child_pid = last.value.related_pid;
5665 return 1;
5666 }
5667
5668 int
5669 inferior_has_execd (ptid_t pid, char **execd_pathname)
5670 {
5671 struct target_waitstatus last;
5672 ptid_t last_ptid;
5673
5674 get_last_target_status (&last_ptid, &last);
5675
5676 if (last.kind != TARGET_WAITKIND_EXECD)
5677 return 0;
5678
5679 if (!ptid_equal (last_ptid, pid))
5680 return 0;
5681
5682 *execd_pathname = xstrdup (last.value.execd_pathname);
5683 return 1;
5684 }
5685
5686 int
5687 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
5688 {
5689 struct target_waitstatus last;
5690 ptid_t last_ptid;
5691
5692 get_last_target_status (&last_ptid, &last);
5693
5694 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
5695 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
5696 return 0;
5697
5698 if (!ptid_equal (last_ptid, pid))
5699 return 0;
5700
5701 *syscall_number = last.value.syscall_number;
5702 return 1;
5703 }
5704
5705 /* Oft used ptids */
5706 ptid_t null_ptid;
5707 ptid_t minus_one_ptid;
5708
5709 /* Create a ptid given the necessary PID, LWP, and TID components. */
5710
5711 ptid_t
5712 ptid_build (int pid, long lwp, long tid)
5713 {
5714 ptid_t ptid;
5715
5716 ptid.pid = pid;
5717 ptid.lwp = lwp;
5718 ptid.tid = tid;
5719 return ptid;
5720 }
5721
5722 /* Create a ptid from just a pid. */
5723
5724 ptid_t
5725 pid_to_ptid (int pid)
5726 {
5727 return ptid_build (pid, 0, 0);
5728 }
5729
5730 /* Fetch the pid (process id) component from a ptid. */
5731
5732 int
5733 ptid_get_pid (ptid_t ptid)
5734 {
5735 return ptid.pid;
5736 }
5737
5738 /* Fetch the lwp (lightweight process) component from a ptid. */
5739
5740 long
5741 ptid_get_lwp (ptid_t ptid)
5742 {
5743 return ptid.lwp;
5744 }
5745
5746 /* Fetch the tid (thread id) component from a ptid. */
5747
5748 long
5749 ptid_get_tid (ptid_t ptid)
5750 {
5751 return ptid.tid;
5752 }
5753
5754 /* ptid_equal() is used to test equality of two ptids. */
5755
5756 int
5757 ptid_equal (ptid_t ptid1, ptid_t ptid2)
5758 {
5759 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
5760 && ptid1.tid == ptid2.tid);
5761 }
5762
5763 /* Returns true if PTID represents a process. */
5764
5765 int
5766 ptid_is_pid (ptid_t ptid)
5767 {
5768 if (ptid_equal (minus_one_ptid, ptid))
5769 return 0;
5770 if (ptid_equal (null_ptid, ptid))
5771 return 0;
5772
5773 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
5774 }
5775
5776 /* restore_inferior_ptid() will be used by the cleanup machinery
5777 to restore the inferior_ptid value saved in a call to
5778 save_inferior_ptid(). */
5779
5780 static void
5781 restore_inferior_ptid (void *arg)
5782 {
5783 ptid_t *saved_ptid_ptr = arg;
5784 inferior_ptid = *saved_ptid_ptr;
5785 xfree (arg);
5786 }
5787
5788 /* Save the value of inferior_ptid so that it may be restored by a
5789 later call to do_cleanups(). Returns the struct cleanup pointer
5790 needed for later doing the cleanup. */
5791
5792 struct cleanup *
5793 save_inferior_ptid (void)
5794 {
5795 ptid_t *saved_ptid_ptr;
5796
5797 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
5798 *saved_ptid_ptr = inferior_ptid;
5799 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
5800 }
5801 \f
5802
5803 /* User interface for reverse debugging:
5804 Set exec-direction / show exec-direction commands
5805 (returns error unless target implements to_set_exec_direction method). */
5806
5807 enum exec_direction_kind execution_direction = EXEC_FORWARD;
5808 static const char exec_forward[] = "forward";
5809 static const char exec_reverse[] = "reverse";
5810 static const char *exec_direction = exec_forward;
5811 static const char *exec_direction_names[] = {
5812 exec_forward,
5813 exec_reverse,
5814 NULL
5815 };
5816
5817 static void
5818 set_exec_direction_func (char *args, int from_tty,
5819 struct cmd_list_element *cmd)
5820 {
5821 if (target_can_execute_reverse)
5822 {
5823 if (!strcmp (exec_direction, exec_forward))
5824 execution_direction = EXEC_FORWARD;
5825 else if (!strcmp (exec_direction, exec_reverse))
5826 execution_direction = EXEC_REVERSE;
5827 }
5828 }
5829
5830 static void
5831 show_exec_direction_func (struct ui_file *out, int from_tty,
5832 struct cmd_list_element *cmd, const char *value)
5833 {
5834 switch (execution_direction) {
5835 case EXEC_FORWARD:
5836 fprintf_filtered (out, _("Forward.\n"));
5837 break;
5838 case EXEC_REVERSE:
5839 fprintf_filtered (out, _("Reverse.\n"));
5840 break;
5841 case EXEC_ERROR:
5842 default:
5843 fprintf_filtered (out,
5844 _("Forward (target `%s' does not support exec-direction).\n"),
5845 target_shortname);
5846 break;
5847 }
5848 }
5849
5850 /* User interface for non-stop mode. */
5851
5852 int non_stop = 0;
5853 static int non_stop_1 = 0;
5854
5855 static void
5856 set_non_stop (char *args, int from_tty,
5857 struct cmd_list_element *c)
5858 {
5859 if (target_has_execution)
5860 {
5861 non_stop_1 = non_stop;
5862 error (_("Cannot change this setting while the inferior is running."));
5863 }
5864
5865 non_stop = non_stop_1;
5866 }
5867
5868 static void
5869 show_non_stop (struct ui_file *file, int from_tty,
5870 struct cmd_list_element *c, const char *value)
5871 {
5872 fprintf_filtered (file,
5873 _("Controlling the inferior in non-stop mode is %s.\n"),
5874 value);
5875 }
5876
5877 static void
5878 show_schedule_multiple (struct ui_file *file, int from_tty,
5879 struct cmd_list_element *c, const char *value)
5880 {
5881 fprintf_filtered (file, _("\
5882 Resuming the execution of threads of all processes is %s.\n"), value);
5883 }
5884
5885 void
5886 _initialize_infrun (void)
5887 {
5888 int i;
5889 int numsigs;
5890 struct cmd_list_element *c;
5891
5892 add_info ("signals", signals_info, _("\
5893 What debugger does when program gets various signals.\n\
5894 Specify a signal as argument to print info on that signal only."));
5895 add_info_alias ("handle", "signals", 0);
5896
5897 add_com ("handle", class_run, handle_command, _("\
5898 Specify how to handle a signal.\n\
5899 Args are signals and actions to apply to those signals.\n\
5900 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
5901 from 1-15 are allowed for compatibility with old versions of GDB.\n\
5902 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
5903 The special arg \"all\" is recognized to mean all signals except those\n\
5904 used by the debugger, typically SIGTRAP and SIGINT.\n\
5905 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
5906 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
5907 Stop means reenter debugger if this signal happens (implies print).\n\
5908 Print means print a message if this signal happens.\n\
5909 Pass means let program see this signal; otherwise program doesn't know.\n\
5910 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
5911 Pass and Stop may be combined."));
5912 if (xdb_commands)
5913 {
5914 add_com ("lz", class_info, signals_info, _("\
5915 What debugger does when program gets various signals.\n\
5916 Specify a signal as argument to print info on that signal only."));
5917 add_com ("z", class_run, xdb_handle_command, _("\
5918 Specify how to handle a signal.\n\
5919 Args are signals and actions to apply to those signals.\n\
5920 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
5921 from 1-15 are allowed for compatibility with old versions of GDB.\n\
5922 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
5923 The special arg \"all\" is recognized to mean all signals except those\n\
5924 used by the debugger, typically SIGTRAP and SIGINT.\n\
5925 Recognized actions include \"s\" (toggles between stop and nostop), \n\
5926 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
5927 nopass), \"Q\" (noprint)\n\
5928 Stop means reenter debugger if this signal happens (implies print).\n\
5929 Print means print a message if this signal happens.\n\
5930 Pass means let program see this signal; otherwise program doesn't know.\n\
5931 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
5932 Pass and Stop may be combined."));
5933 }
5934
5935 if (!dbx_commands)
5936 stop_command = add_cmd ("stop", class_obscure,
5937 not_just_help_class_command, _("\
5938 There is no `stop' command, but you can set a hook on `stop'.\n\
5939 This allows you to set a list of commands to be run each time execution\n\
5940 of the program stops."), &cmdlist);
5941
5942 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
5943 Set inferior debugging."), _("\
5944 Show inferior debugging."), _("\
5945 When non-zero, inferior specific debugging is enabled."),
5946 NULL,
5947 show_debug_infrun,
5948 &setdebuglist, &showdebuglist);
5949
5950 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
5951 Set displaced stepping debugging."), _("\
5952 Show displaced stepping debugging."), _("\
5953 When non-zero, displaced stepping specific debugging is enabled."),
5954 NULL,
5955 show_debug_displaced,
5956 &setdebuglist, &showdebuglist);
5957
5958 add_setshow_boolean_cmd ("non-stop", no_class,
5959 &non_stop_1, _("\
5960 Set whether gdb controls the inferior in non-stop mode."), _("\
5961 Show whether gdb controls the inferior in non-stop mode."), _("\
5962 When debugging a multi-threaded program and this setting is\n\
5963 off (the default, also called all-stop mode), when one thread stops\n\
5964 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
5965 all other threads in the program while you interact with the thread of\n\
5966 interest. When you continue or step a thread, you can allow the other\n\
5967 threads to run, or have them remain stopped, but while you inspect any\n\
5968 thread's state, all threads stop.\n\
5969 \n\
5970 In non-stop mode, when one thread stops, other threads can continue\n\
5971 to run freely. You'll be able to step each thread independently,\n\
5972 leave it stopped or free to run as needed."),
5973 set_non_stop,
5974 show_non_stop,
5975 &setlist,
5976 &showlist);
5977
5978 numsigs = (int) TARGET_SIGNAL_LAST;
5979 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
5980 signal_print = (unsigned char *)
5981 xmalloc (sizeof (signal_print[0]) * numsigs);
5982 signal_program = (unsigned char *)
5983 xmalloc (sizeof (signal_program[0]) * numsigs);
5984 for (i = 0; i < numsigs; i++)
5985 {
5986 signal_stop[i] = 1;
5987 signal_print[i] = 1;
5988 signal_program[i] = 1;
5989 }
5990
5991 /* Signals caused by debugger's own actions
5992 should not be given to the program afterwards. */
5993 signal_program[TARGET_SIGNAL_TRAP] = 0;
5994 signal_program[TARGET_SIGNAL_INT] = 0;
5995
5996 /* Signals that are not errors should not normally enter the debugger. */
5997 signal_stop[TARGET_SIGNAL_ALRM] = 0;
5998 signal_print[TARGET_SIGNAL_ALRM] = 0;
5999 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6000 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6001 signal_stop[TARGET_SIGNAL_PROF] = 0;
6002 signal_print[TARGET_SIGNAL_PROF] = 0;
6003 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6004 signal_print[TARGET_SIGNAL_CHLD] = 0;
6005 signal_stop[TARGET_SIGNAL_IO] = 0;
6006 signal_print[TARGET_SIGNAL_IO] = 0;
6007 signal_stop[TARGET_SIGNAL_POLL] = 0;
6008 signal_print[TARGET_SIGNAL_POLL] = 0;
6009 signal_stop[TARGET_SIGNAL_URG] = 0;
6010 signal_print[TARGET_SIGNAL_URG] = 0;
6011 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6012 signal_print[TARGET_SIGNAL_WINCH] = 0;
6013
6014 /* These signals are used internally by user-level thread
6015 implementations. (See signal(5) on Solaris.) Like the above
6016 signals, a healthy program receives and handles them as part of
6017 its normal operation. */
6018 signal_stop[TARGET_SIGNAL_LWP] = 0;
6019 signal_print[TARGET_SIGNAL_LWP] = 0;
6020 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6021 signal_print[TARGET_SIGNAL_WAITING] = 0;
6022 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6023 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6024
6025 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6026 &stop_on_solib_events, _("\
6027 Set stopping for shared library events."), _("\
6028 Show stopping for shared library events."), _("\
6029 If nonzero, gdb will give control to the user when the dynamic linker\n\
6030 notifies gdb of shared library events. The most common event of interest\n\
6031 to the user would be loading/unloading of a new library."),
6032 NULL,
6033 show_stop_on_solib_events,
6034 &setlist, &showlist);
6035
6036 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6037 follow_fork_mode_kind_names,
6038 &follow_fork_mode_string, _("\
6039 Set debugger response to a program call of fork or vfork."), _("\
6040 Show debugger response to a program call of fork or vfork."), _("\
6041 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6042 parent - the original process is debugged after a fork\n\
6043 child - the new process is debugged after a fork\n\
6044 The unfollowed process will continue to run.\n\
6045 By default, the debugger will follow the parent process."),
6046 NULL,
6047 show_follow_fork_mode_string,
6048 &setlist, &showlist);
6049
6050 add_setshow_enum_cmd ("scheduler-locking", class_run,
6051 scheduler_enums, &scheduler_mode, _("\
6052 Set mode for locking scheduler during execution."), _("\
6053 Show mode for locking scheduler during execution."), _("\
6054 off == no locking (threads may preempt at any time)\n\
6055 on == full locking (no thread except the current thread may run)\n\
6056 step == scheduler locked during every single-step operation.\n\
6057 In this mode, no other thread may run during a step command.\n\
6058 Other threads may run while stepping over a function call ('next')."),
6059 set_schedlock_func, /* traps on target vector */
6060 show_scheduler_mode,
6061 &setlist, &showlist);
6062
6063 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6064 Set mode for resuming threads of all processes."), _("\
6065 Show mode for resuming threads of all processes."), _("\
6066 When on, execution commands (such as 'continue' or 'next') resume all\n\
6067 threads of all processes. When off (which is the default), execution\n\
6068 commands only resume the threads of the current process. The set of\n\
6069 threads that are resumed is further refined by the scheduler-locking\n\
6070 mode (see help set scheduler-locking)."),
6071 NULL,
6072 show_schedule_multiple,
6073 &setlist, &showlist);
6074
6075 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6076 Set mode of the step operation."), _("\
6077 Show mode of the step operation."), _("\
6078 When set, doing a step over a function without debug line information\n\
6079 will stop at the first instruction of that function. Otherwise, the\n\
6080 function is skipped and the step command stops at a different source line."),
6081 NULL,
6082 show_step_stop_if_no_debug,
6083 &setlist, &showlist);
6084
6085 add_setshow_enum_cmd ("displaced-stepping", class_run,
6086 can_use_displaced_stepping_enum,
6087 &can_use_displaced_stepping, _("\
6088 Set debugger's willingness to use displaced stepping."), _("\
6089 Show debugger's willingness to use displaced stepping."), _("\
6090 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6091 supported by the target architecture. If off, gdb will not use displaced\n\
6092 stepping to step over breakpoints, even if such is supported by the target\n\
6093 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6094 if the target architecture supports it and non-stop mode is active, but will not\n\
6095 use it in all-stop mode (see help set non-stop)."),
6096 NULL,
6097 show_can_use_displaced_stepping,
6098 &setlist, &showlist);
6099
6100 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6101 &exec_direction, _("Set direction of execution.\n\
6102 Options are 'forward' or 'reverse'."),
6103 _("Show direction of execution (forward/reverse)."),
6104 _("Tells gdb whether to execute forward or backward."),
6105 set_exec_direction_func, show_exec_direction_func,
6106 &setlist, &showlist);
6107
6108 /* ptid initializations */
6109 null_ptid = ptid_build (0, 0, 0);
6110 minus_one_ptid = ptid_build (-1, 0, 0);
6111 inferior_ptid = null_ptid;
6112 target_last_wait_ptid = minus_one_ptid;
6113 displaced_step_ptid = null_ptid;
6114
6115 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6116 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6117 observer_attach_thread_exit (infrun_thread_thread_exit);
6118
6119 /* Explicitly create without lookup, since that tries to create a
6120 value with a void typed value, and when we get here, gdbarch
6121 isn't initialized yet. At this point, we're quite sure there
6122 isn't another convenience variable of the same name. */
6123 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6124 }
This page took 0.257429 seconds and 4 git commands to generate.