gdb: move displaced stepping logic to gdbarch, allow starting concurrent displaced...
[deliverable/binutils-gdb.git] / gdb / gdbthread.h
1 /* Multi-process/thread control defs for GDB, the GNU debugger.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
3 Contributed by Lynx Real-Time Systems, Inc. Los Gatos, CA.
4
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #ifndef GDBTHREAD_H
22 #define GDBTHREAD_H
23
24 struct symtab;
25
26 #include "breakpoint.h"
27 #include "frame.h"
28 #include "ui-out.h"
29 #include "btrace.h"
30 #include "target/waitstatus.h"
31 #include "cli/cli-utils.h"
32 #include "gdbsupport/refcounted-object.h"
33 #include "gdbsupport/common-gdbthread.h"
34 #include "gdbsupport/forward-scope-exit.h"
35 #include "displaced-stepping.h"
36
37 struct inferior;
38
39 /* Frontend view of the thread state. Possible extensions: stepping,
40 finishing, until(ling),...
41
42 NOTE: Since the thread state is not a boolean, most times, you do
43 not want to check it with negation. If you really want to check if
44 the thread is stopped,
45
46 use (good):
47
48 if (tp->state == THREAD_STOPPED)
49
50 instead of (bad):
51
52 if (tp->state != THREAD_RUNNING)
53
54 The latter is also true for exited threads, most likely not what
55 you want. */
56 enum thread_state
57 {
58 /* In the frontend's perpective, the thread is stopped. */
59 THREAD_STOPPED,
60
61 /* In the frontend's perpective, the thread is running. */
62 THREAD_RUNNING,
63
64 /* The thread is listed, but known to have exited. We keep it
65 listed (but not visible) until it's safe to delete it. */
66 THREAD_EXITED,
67 };
68
69 /* STEP_OVER_ALL means step over all subroutine calls.
70 STEP_OVER_UNDEBUGGABLE means step over calls to undebuggable functions.
71 STEP_OVER_NONE means don't step over any subroutine calls. */
72
73 enum step_over_calls_kind
74 {
75 STEP_OVER_NONE,
76 STEP_OVER_ALL,
77 STEP_OVER_UNDEBUGGABLE
78 };
79
80 /* Inferior thread specific part of `struct infcall_control_state'.
81
82 Inferior process counterpart is `struct inferior_control_state'. */
83
84 struct thread_control_state
85 {
86 /* User/external stepping state. */
87
88 /* Step-resume or longjmp-resume breakpoint. */
89 struct breakpoint *step_resume_breakpoint = nullptr;
90
91 /* Exception-resume breakpoint. */
92 struct breakpoint *exception_resume_breakpoint = nullptr;
93
94 /* Breakpoints used for software single stepping. Plural, because
95 it may have multiple locations. E.g., if stepping over a
96 conditional branch instruction we can't decode the condition for,
97 we'll need to put a breakpoint at the branch destination, and
98 another at the instruction after the branch. */
99 struct breakpoint *single_step_breakpoints = nullptr;
100
101 /* Range to single step within.
102
103 If this is nonzero, respond to a single-step signal by continuing
104 to step if the pc is in this range.
105
106 If step_range_start and step_range_end are both 1, it means to
107 step for a single instruction (FIXME: it might clean up
108 wait_for_inferior in a minor way if this were changed to the
109 address of the instruction and that address plus one. But maybe
110 not). */
111 CORE_ADDR step_range_start = 0; /* Inclusive */
112 CORE_ADDR step_range_end = 0; /* Exclusive */
113
114 /* Function the thread was in as of last it started stepping. */
115 struct symbol *step_start_function = nullptr;
116
117 /* If GDB issues a target step request, and this is nonzero, the
118 target should single-step this thread once, and then continue
119 single-stepping it without GDB core involvement as long as the
120 thread stops in the step range above. If this is zero, the
121 target should ignore the step range, and only issue one single
122 step. */
123 int may_range_step = 0;
124
125 /* Stack frame address as of when stepping command was issued.
126 This is how we know when we step into a subroutine call, and how
127 to set the frame for the breakpoint used to step out. */
128 struct frame_id step_frame_id {};
129
130 /* Similarly, the frame ID of the underlying stack frame (skipping
131 any inlined frames). */
132 struct frame_id step_stack_frame_id {};
133
134 /* Nonzero if we are presently stepping over a breakpoint.
135
136 If we hit a breakpoint or watchpoint, and then continue, we need
137 to single step the current thread with breakpoints disabled, to
138 avoid hitting the same breakpoint or watchpoint again. And we
139 should step just a single thread and keep other threads stopped,
140 so that other threads don't miss breakpoints while they are
141 removed.
142
143 So, this variable simultaneously means that we need to single
144 step the current thread, keep other threads stopped, and that
145 breakpoints should be removed while we step.
146
147 This variable is set either:
148 - in proceed, when we resume inferior on user's explicit request
149 - in keep_going, if handle_inferior_event decides we need to
150 step over breakpoint.
151
152 The variable is cleared in normal_stop. The proceed calls
153 wait_for_inferior, which calls handle_inferior_event in a loop,
154 and until wait_for_inferior exits, this variable is changed only
155 by keep_going. */
156 int trap_expected = 0;
157
158 /* Nonzero if the thread is being proceeded for a "finish" command
159 or a similar situation when return value should be printed. */
160 int proceed_to_finish = 0;
161
162 /* Nonzero if the thread is being proceeded for an inferior function
163 call. */
164 int in_infcall = 0;
165
166 enum step_over_calls_kind step_over_calls = STEP_OVER_NONE;
167
168 /* Nonzero if stopped due to a step command. */
169 int stop_step = 0;
170
171 /* Chain containing status of breakpoint(s) the thread stopped
172 at. */
173 bpstat stop_bpstat = nullptr;
174
175 /* Whether the command that started the thread was a stepping
176 command. This is used to decide whether "set scheduler-locking
177 step" behaves like "on" or "off". */
178 int stepping_command = 0;
179 };
180
181 /* Inferior thread specific part of `struct infcall_suspend_state'. */
182
183 struct thread_suspend_state
184 {
185 /* Last signal that the inferior received (why it stopped). When
186 the thread is resumed, this signal is delivered. Note: the
187 target should not check whether the signal is in pass state,
188 because the signal may have been explicitly passed with the
189 "signal" command, which overrides "handle nopass". If the signal
190 should be suppressed, the core will take care of clearing this
191 before the target is resumed. */
192 enum gdb_signal stop_signal = GDB_SIGNAL_0;
193
194 /* The reason the thread last stopped, if we need to track it
195 (breakpoint, watchpoint, etc.) */
196 enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON;
197
198 /* The waitstatus for this thread's last event. */
199 struct target_waitstatus waitstatus {};
200 /* If true WAITSTATUS hasn't been handled yet. */
201 int waitstatus_pending_p = 0;
202
203 /* Record the pc of the thread the last time it stopped. (This is
204 not the current thread's PC as that may have changed since the
205 last stop, e.g., "return" command, or "p $pc = 0xf000").
206
207 - If the thread's PC has not changed since the thread last
208 stopped, then proceed skips a breakpoint at the current PC,
209 otherwise we let the thread run into the breakpoint.
210
211 - If the thread has an unprocessed event pending, as indicated by
212 waitstatus_pending_p, this is used in coordination with
213 stop_reason: if the thread's PC has changed since the thread
214 last stopped, a pending breakpoint waitstatus is discarded.
215
216 - If the thread is running, this is set to -1, to avoid leaving
217 it with a stale value, to make it easier to catch bugs. */
218 CORE_ADDR stop_pc = 0;
219 };
220
221 /* Base class for target-specific thread data. */
222 struct private_thread_info
223 {
224 virtual ~private_thread_info () = 0;
225 };
226
227 /* Threads are intrusively refcounted objects. Being the
228 user-selected thread is normally considered an implicit strong
229 reference and is thus not accounted in the refcount, unlike
230 inferior objects. This is necessary, because there's no "current
231 thread" pointer. Instead the current thread is inferred from the
232 inferior_ptid global. However, when GDB needs to remember the
233 selected thread to later restore it, GDB bumps the thread object's
234 refcount, to prevent something deleting the thread object before
235 reverting back (e.g., due to a "kill" command). If the thread
236 meanwhile exits before being re-selected, then the thread object is
237 left listed in the thread list, but marked with state
238 THREAD_EXITED. (See scoped_restore_current_thread and
239 delete_thread). All other thread references are considered weak
240 references. Placing a thread in the thread list is an implicit
241 strong reference, and is thus not accounted for in the thread's
242 refcount. */
243
244 class thread_info : public refcounted_object
245 {
246 public:
247 explicit thread_info (inferior *inf, ptid_t ptid);
248 ~thread_info ();
249
250 bool deletable () const;
251
252 /* Mark this thread as running and notify observers. */
253 void set_running (bool running);
254
255 struct regcache *regcache ();
256 struct gdbarch *arch ();
257
258 ptid_t ptid; /* "Actual process id";
259 In fact, this may be overloaded with
260 kernel thread id, etc. */
261
262 /* Each thread has two GDB IDs.
263
264 a) The thread ID (Id). This consists of the pair of:
265
266 - the number of the thread's inferior and,
267
268 - the thread's thread number in its inferior, aka, the
269 per-inferior thread number. This number is unique in the
270 inferior but not unique between inferiors.
271
272 b) The global ID (GId). This is a a single integer unique
273 between all inferiors.
274
275 E.g.:
276
277 (gdb) info threads -gid
278 Id GId Target Id Frame
279 * 1.1 1 Thread A 0x16a09237 in foo () at foo.c:10
280 1.2 3 Thread B 0x15ebc6ed in bar () at foo.c:20
281 1.3 5 Thread C 0x15ebc6ed in bar () at foo.c:20
282 2.1 2 Thread A 0x16a09237 in foo () at foo.c:10
283 2.2 4 Thread B 0x15ebc6ed in bar () at foo.c:20
284 2.3 6 Thread C 0x15ebc6ed in bar () at foo.c:20
285
286 Above, both inferiors 1 and 2 have threads numbered 1-3, but each
287 thread has its own unique global ID. */
288
289 /* The thread's global GDB thread number. This is exposed to MI,
290 Python/Scheme, visible with "info threads -gid", and is also what
291 the $_gthread convenience variable is bound to. */
292 int global_num;
293
294 /* The per-inferior thread number. This is unique in the inferior
295 the thread belongs to, but not unique between inferiors. This is
296 what the $_thread convenience variable is bound to. */
297 int per_inf_num;
298
299 /* The inferior this thread belongs to. */
300 struct inferior *inf;
301
302 /* The name of the thread, as specified by the user. This is NULL
303 if the thread does not have a user-given name. */
304 char *name = NULL;
305
306 /* Non-zero means the thread is executing. Note: this is different
307 from saying that there is an active target and we are stopped at
308 a breakpoint, for instance. This is a real indicator whether the
309 thread is off and running. */
310 int executing = 0;
311
312 /* Non-zero if this thread is resumed from infrun's perspective.
313 Note that a thread can be marked both as not-executing and
314 resumed at the same time. This happens if we try to resume a
315 thread that has a wait status pending. We shouldn't let the
316 thread really run until that wait status has been processed, but
317 we should not process that wait status if we didn't try to let
318 the thread run. */
319 int resumed = 0;
320
321 /* Frontend view of the thread state. Note that the THREAD_RUNNING/
322 THREAD_STOPPED states are different from EXECUTING. When the
323 thread is stopped internally while handling an internal event,
324 like a software single-step breakpoint, EXECUTING will be false,
325 but STATE will still be THREAD_RUNNING. */
326 enum thread_state state = THREAD_STOPPED;
327
328 /* State of GDB control of inferior thread execution.
329 See `struct thread_control_state'. */
330 thread_control_state control;
331
332 /* State of inferior thread to restore after GDB is done with an inferior
333 call. See `struct thread_suspend_state'. */
334 thread_suspend_state suspend;
335
336 int current_line = 0;
337 struct symtab *current_symtab = NULL;
338
339 /* Internal stepping state. */
340
341 /* Record the pc of the thread the last time it was resumed. (It
342 can't be done on stop as the PC may change since the last stop,
343 e.g., "return" command, or "p $pc = 0xf000"). This is maintained
344 by proceed and keep_going, and among other things, it's used in
345 adjust_pc_after_break to distinguish a hardware single-step
346 SIGTRAP from a breakpoint SIGTRAP. */
347 CORE_ADDR prev_pc = 0;
348
349 /* Did we set the thread stepping a breakpoint instruction? This is
350 used in conjunction with PREV_PC to decide whether to adjust the
351 PC. */
352 int stepped_breakpoint = 0;
353
354 /* Should we step over breakpoint next time keep_going is called? */
355 int stepping_over_breakpoint = 0;
356
357 /* Should we step over a watchpoint next time keep_going is called?
358 This is needed on targets with non-continuable, non-steppable
359 watchpoints. */
360 int stepping_over_watchpoint = 0;
361
362 /* Set to TRUE if we should finish single-stepping over a breakpoint
363 after hitting the current step-resume breakpoint. The context here
364 is that GDB is to do `next' or `step' while signal arrives.
365 When stepping over a breakpoint and signal arrives, GDB will attempt
366 to skip signal handler, so it inserts a step_resume_breakpoint at the
367 signal return address, and resume inferior.
368 step_after_step_resume_breakpoint is set to TRUE at this moment in
369 order to keep GDB in mind that there is still a breakpoint to step over
370 when GDB gets back SIGTRAP from step_resume_breakpoint. */
371 int step_after_step_resume_breakpoint = 0;
372
373 /* Pointer to the state machine manager object that handles what is
374 left to do for the thread's execution command after the target
375 stops. Several execution commands use it. */
376 struct thread_fsm *thread_fsm = NULL;
377
378 /* This is used to remember when a fork or vfork event was caught by
379 a catchpoint, and thus the event is to be followed at the next
380 resume of the thread, and not immediately. */
381 struct target_waitstatus pending_follow;
382
383 /* True if this thread has been explicitly requested to stop. */
384 int stop_requested = 0;
385
386 /* The initiating frame of a nexting operation, used for deciding
387 which exceptions to intercept. If it is null_frame_id no
388 bp_longjmp or bp_exception but longjmp has been caught just for
389 bp_longjmp_call_dummy. */
390 struct frame_id initiating_frame = null_frame_id;
391
392 /* Private data used by the target vector implementation. */
393 std::unique_ptr<private_thread_info> priv;
394
395 /* Branch trace information for this thread. */
396 struct btrace_thread_info btrace {};
397
398 /* Flag which indicates that the stack temporaries should be stored while
399 evaluating expressions. */
400 bool stack_temporaries_enabled = false;
401
402 /* Values that are stored as temporaries on stack while evaluating
403 expressions. */
404 std::vector<struct value *> stack_temporaries;
405
406 /* Step-over chain. A thread is in the step-over queue if these are
407 non-NULL. If only a single thread is in the chain, then these
408 fields point to self. */
409 struct thread_info *step_over_prev = NULL;
410 struct thread_info *step_over_next = NULL;
411
412 displaced_step_thread_state displaced_step_state;
413 };
414
415 /* A gdb::ref_ptr pointer to a thread_info. */
416
417 using thread_info_ref
418 = gdb::ref_ptr<struct thread_info, refcounted_object_ref_policy>;
419
420 /* Create an empty thread list, or empty the existing one. */
421 extern void init_thread_list (void);
422
423 /* Add a thread to the thread list, print a message
424 that a new thread is found, and return the pointer to
425 the new thread. Caller my use this pointer to
426 initialize the private thread data. */
427 extern struct thread_info *add_thread (ptid_t ptid);
428
429 /* Same as add_thread, but does not print a message
430 about new thread. */
431 extern struct thread_info *add_thread_silent (ptid_t ptid);
432
433 /* Same as add_thread, and sets the private info. */
434 extern struct thread_info *add_thread_with_info (ptid_t ptid,
435 struct private_thread_info *);
436
437 /* Delete an existing thread, removing the entry from its inferior's thread
438 map. */
439 extern void delete_thread (struct thread_info *thread);
440
441 /* Like the above, but don't remove the entry from the inferior thread map. */
442 extern void delete_thread_noremove(struct thread_info *thread);
443
444 /* Delete an existing thread list entry, and be quiet about it. Used
445 after the process this thread having belonged to having already
446 exited, for example. */
447 extern void delete_thread_silent (struct thread_info *thread);
448
449 /* Like the above, but don't remove the entry from the inferior thread map. */
450 extern void delete_thread_silent_noremove (thread_info *thread);
451
452 /* Delete a step_resume_breakpoint from the thread database. */
453 extern void delete_step_resume_breakpoint (struct thread_info *);
454
455 /* Delete an exception_resume_breakpoint from the thread database. */
456 extern void delete_exception_resume_breakpoint (struct thread_info *);
457
458 /* Delete the single-step breakpoints of thread TP, if any. */
459 extern void delete_single_step_breakpoints (struct thread_info *tp);
460
461 /* Check if the thread has software single stepping breakpoints
462 set. */
463 extern int thread_has_single_step_breakpoints_set (struct thread_info *tp);
464
465 /* Check whether the thread has software single stepping breakpoints
466 set at PC. */
467 extern int thread_has_single_step_breakpoint_here (struct thread_info *tp,
468 const address_space *aspace,
469 CORE_ADDR addr);
470
471 /* Returns whether to show inferior-qualified thread IDs, or plain
472 thread numbers. Inferior-qualified IDs are shown whenever we have
473 multiple inferiors, or the only inferior left has number > 1. */
474 extern int show_inferior_qualified_tids (void);
475
476 /* Return a string version of THR's thread ID. If there are multiple
477 inferiors, then this prints the inferior-qualifier form, otherwise
478 it only prints the thread number. The result is stored in a
479 circular static buffer, NUMCELLS deep. */
480 const char *print_thread_id (struct thread_info *thr);
481
482 /* Boolean test for an already-known ptid. */
483 extern int in_thread_list (ptid_t ptid);
484
485 /* Boolean test for an already-known global thread id (GDB's homegrown
486 global id, not the system's). */
487 extern int valid_global_thread_id (int global_id);
488
489 /* Search function to lookup a thread by 'pid'. */
490 extern struct thread_info *find_thread_ptid (ptid_t ptid);
491
492 /* Search function to lookup a thread by 'ptid'. Only searches in
493 threads of INF. */
494 extern struct thread_info *find_thread_ptid (inferior *inf, ptid_t ptid);
495
496 /* Find thread by GDB global thread ID. */
497 struct thread_info *find_thread_global_id (int global_id);
498
499 /* Find thread by thread library specific handle in inferior INF. */
500 struct thread_info *find_thread_by_handle
501 (gdb::array_view<const gdb_byte> handle, struct inferior *inf);
502
503 /* Finds the first thread of the specified inferior. */
504 extern struct thread_info *first_thread_of_inferior (inferior *inf);
505
506 /* Returns any thread of inferior INF, giving preference to the
507 current thread. */
508 extern struct thread_info *any_thread_of_inferior (inferior *inf);
509
510 /* Returns any non-exited thread of inferior INF, giving preference to
511 the current thread, and to not executing threads. */
512 extern struct thread_info *any_live_thread_of_inferior (inferior *inf);
513
514 /* Change the ptid of thread OLD_PTID to NEW_PTID. */
515 void thread_change_ptid (ptid_t old_ptid, ptid_t new_ptid);
516
517 /* Iterator function to call a user-provided callback function
518 once for each known thread. */
519 typedef int (*thread_callback_func) (struct thread_info *, void *);
520 extern struct thread_info *iterate_over_threads (thread_callback_func, void *);
521
522 /* Pull in the internals of the inferiors/threads ranges and
523 iterators. Must be done after struct thread_info is defined. */
524 #include "thread-iter.h"
525
526 /* Return a range that can be used to walk over all threads of all
527 inferiors, with range-for. Used like this:
528
529 for (thread_info *thr : all_threads ())
530 { .... }
531 */
532 inline all_threads_range
533 all_threads ()
534 {
535 return {};
536 }
537
538 /* Likewise, but accept a filter PTID. */
539
540 inline all_matching_threads_range
541 all_threads (ptid_t filter_ptid)
542 {
543 return all_matching_threads_range (filter_ptid);
544 }
545
546 /* Return a range that can be used to walk over all non-exited threads
547 of all inferiors, with range-for. FILTER_PTID can be used to
548 filter out thread that don't match. */
549
550 inline all_non_exited_threads_range
551 all_non_exited_threads (ptid_t filter_ptid = minus_one_ptid)
552 {
553 return all_non_exited_threads_range (filter_ptid);
554 }
555
556 /* Return a range that can be used to walk over all threads of all
557 inferiors, with range-for, safely. I.e., it is safe to delete the
558 currently-iterated thread. When combined with range-for, this
559 allow convenient patterns like this:
560
561 for (thread_info *t : all_threads_safe ())
562 if (some_condition ())
563 delete f;
564 */
565
566 inline all_threads_safe_range
567 all_threads_safe ()
568 {
569 return all_threads_safe_range ();
570 }
571
572 extern int thread_count (void);
573
574 /* Return true if we have any thread in any inferior. */
575 extern bool any_thread_p ();
576
577 /* Switch context to thread THR. Also sets the STOP_PC global. */
578 extern void switch_to_thread (struct thread_info *thr);
579
580 /* Switch context to no thread selected. */
581 extern void switch_to_no_thread ();
582
583 /* Switch from one thread to another. Does not read registers. */
584 extern void switch_to_thread_no_regs (struct thread_info *thread);
585
586 /* Marks or clears thread(s) PTID as resumed. If PTID is
587 MINUS_ONE_PTID, applies to all threads. If ptid_is_pid(PTID) is
588 true, applies to all threads of the process pointed at by PTID. */
589 extern void set_resumed (ptid_t ptid, int resumed);
590
591 /* Marks thread PTID is running, or stopped.
592 If PTID is minus_one_ptid, marks all threads. */
593 extern void set_running (ptid_t ptid, int running);
594
595 /* Marks or clears thread(s) PTID as having been requested to stop.
596 If PTID is MINUS_ONE_PTID, applies to all threads. If
597 ptid_is_pid(PTID) is true, applies to all threads of the process
598 pointed at by PTID. If STOP, then the THREAD_STOP_REQUESTED
599 observer is called with PTID as argument. */
600 extern void set_stop_requested (ptid_t ptid, int stop);
601
602 /* Marks thread PTID as executing, or not. If PTID is minus_one_ptid,
603 marks all threads.
604
605 Note that this is different from the running state. See the
606 description of state and executing fields of struct
607 thread_info. */
608 extern void set_executing (ptid_t ptid, int executing);
609
610 /* True if any (known or unknown) thread is or may be executing. */
611 extern int threads_are_executing (void);
612
613 /* Merge the executing property of thread PTID over to its thread
614 state property (frontend running/stopped view).
615
616 "not executing" -> "stopped"
617 "executing" -> "running"
618 "exited" -> "exited"
619
620 If PTID is minus_one_ptid, go over all threads.
621
622 Notifications are only emitted if the thread state did change. */
623 extern void finish_thread_state (ptid_t ptid);
624
625 /* Calls finish_thread_state on scope exit, unless release() is called
626 to disengage. */
627 using scoped_finish_thread_state
628 = FORWARD_SCOPE_EXIT (finish_thread_state);
629
630 /* Commands with a prefix of `thread'. */
631 extern struct cmd_list_element *thread_cmd_list;
632
633 extern void thread_command (const char *tidstr, int from_tty);
634
635 /* Print notices on thread events (attach, detach, etc.), set with
636 `set print thread-events'. */
637 extern bool print_thread_events;
638
639 /* Prints the list of threads and their details on UIOUT. If
640 REQUESTED_THREADS, a list of GDB ids/ranges, is not NULL, only
641 print threads whose ID is included in the list. If PID is not -1,
642 only print threads from the process PID. Otherwise, threads from
643 all attached PIDs are printed. If both REQUESTED_THREADS is not
644 NULL and PID is not -1, then the thread is printed if it belongs to
645 the specified process. Otherwise, an error is raised. */
646 extern void print_thread_info (struct ui_out *uiout,
647 const char *requested_threads,
648 int pid);
649
650 /* Save/restore current inferior/thread/frame. */
651
652 class scoped_restore_current_thread
653 {
654 public:
655 scoped_restore_current_thread ();
656 ~scoped_restore_current_thread ();
657
658 DISABLE_COPY_AND_ASSIGN (scoped_restore_current_thread);
659
660 private:
661 /* Use the "class" keyword here, because of a clash with a "thread_info"
662 function in the Darwin API. */
663 class thread_info *m_thread;
664 inferior *m_inf;
665 frame_id m_selected_frame_id;
666 int m_selected_frame_level;
667 bool m_was_stopped;
668 };
669
670 /* Returns a pointer into the thread_info corresponding to
671 INFERIOR_PTID. INFERIOR_PTID *must* be in the thread list. */
672 extern struct thread_info* inferior_thread (void);
673
674 extern void update_thread_list (void);
675
676 /* Delete any thread the target says is no longer alive. */
677
678 extern void prune_threads (void);
679
680 /* Delete threads marked THREAD_EXITED. Unlike prune_threads, this
681 does not consult the target about whether the thread is alive right
682 now. */
683 extern void delete_exited_threads (void);
684
685 /* Return true if PC is in the stepping range of THREAD. */
686
687 int pc_in_thread_step_range (CORE_ADDR pc, struct thread_info *thread);
688
689 /* Enable storing stack temporaries for thread THR and disable and
690 clear the stack temporaries on destruction. Holds a strong
691 reference to THR. */
692
693 class enable_thread_stack_temporaries
694 {
695 public:
696
697 explicit enable_thread_stack_temporaries (struct thread_info *thr)
698 : m_thr (thr)
699 {
700 gdb_assert (m_thr != NULL);
701
702 m_thr->incref ();
703
704 m_thr->stack_temporaries_enabled = true;
705 m_thr->stack_temporaries.clear ();
706 }
707
708 ~enable_thread_stack_temporaries ()
709 {
710 m_thr->stack_temporaries_enabled = false;
711 m_thr->stack_temporaries.clear ();
712
713 m_thr->decref ();
714 }
715
716 DISABLE_COPY_AND_ASSIGN (enable_thread_stack_temporaries);
717
718 private:
719
720 struct thread_info *m_thr;
721 };
722
723 extern bool thread_stack_temporaries_enabled_p (struct thread_info *tp);
724
725 extern void push_thread_stack_temporary (struct thread_info *tp, struct value *v);
726
727 extern value *get_last_thread_stack_temporary (struct thread_info *tp);
728
729 extern bool value_in_thread_stack_temporaries (struct value *,
730 struct thread_info *thr);
731
732 /* Add TP to the end of the global pending step-over chain. */
733
734 extern void global_thread_step_over_chain_enqueue (struct thread_info *tp);
735
736 /* Remove TP from step-over chain LIST_P. */
737
738 extern void thread_step_over_chain_remove (thread_info **list_p,
739 thread_info *tp);
740
741 /* Remove TP from the global pending step-over chain. */
742
743 extern void global_thread_step_over_chain_remove (thread_info *tp);
744
745 /* Return the next thread in the step-over chain whose head is CHAIN_HEAD.
746 Return NULL if TP is the last entry in the chain. */
747
748 extern thread_info *thread_step_over_chain_next (thread_info *chain_head,
749 thread_info *tp);
750
751 /* Return the next thread in the global step-over chain. Return NULL if TP is
752 the last entry in the chain. */
753
754 extern thread_info *global_thread_step_over_chain_next (thread_info *tp);
755
756 /* Return true if TP is in any step-over chain. */
757
758 extern int thread_is_in_step_over_chain (struct thread_info *tp);
759
760 /* Return the length of the the step over chain TP is in. */
761
762 extern int thread_step_over_chain_length (thread_info *tp);
763
764 /* Cancel any ongoing execution command. */
765
766 extern void thread_cancel_execution_command (struct thread_info *thr);
767
768 /* Check whether it makes sense to access a register of the current
769 thread at this point. If not, throw an error (e.g., the thread is
770 executing). */
771 extern void validate_registers_access (void);
772
773 /* Check whether it makes sense to access a register of THREAD at this point.
774 Returns true if registers may be accessed; false otherwise. */
775 extern bool can_access_registers_thread (struct thread_info *thread);
776
777 /* Returns whether to show which thread hit the breakpoint, received a
778 signal, etc. and ended up causing a user-visible stop. This is
779 true iff we ever detected multiple threads. */
780 extern int show_thread_that_caused_stop (void);
781
782 /* Print the message for a thread or/and frame selected. */
783 extern void print_selected_thread_frame (struct ui_out *uiout,
784 user_selected_what selection);
785
786 /* Helper for the CLI's "thread" command and for MI's -thread-select.
787 Selects thread THR. TIDSTR is the original string the thread ID
788 was parsed from. This is used in the error message if THR is not
789 alive anymore. */
790 extern void thread_select (const char *tidstr, class thread_info *thr);
791
792 #endif /* GDBTHREAD_H */
This page took 0.06162 seconds and 4 git commands to generate.