Change inferior thread list to be a thread map
[deliverable/binutils-gdb.git] / gdb / gdbthread.h
1 /* Multi-process/thread control defs for GDB, the GNU debugger.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 Contributed by Lynx Real-Time Systems, Inc. Los Gatos, CA.
4
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #ifndef GDBTHREAD_H
22 #define GDBTHREAD_H
23
24 struct symtab;
25
26 #include "breakpoint.h"
27 #include "frame.h"
28 #include "ui-out.h"
29 #include "btrace.h"
30 #include "target/waitstatus.h"
31 #include "cli/cli-utils.h"
32 #include "gdbsupport/refcounted-object.h"
33 #include "gdbsupport/common-gdbthread.h"
34 #include "gdbsupport/forward-scope-exit.h"
35
36 struct inferior;
37
38 /* Frontend view of the thread state. Possible extensions: stepping,
39 finishing, until(ling),...
40
41 NOTE: Since the thread state is not a boolean, most times, you do
42 not want to check it with negation. If you really want to check if
43 the thread is stopped,
44
45 use (good):
46
47 if (tp->state == THREAD_STOPPED)
48
49 instead of (bad):
50
51 if (tp->state != THREAD_RUNNING)
52
53 The latter is also true for exited threads, most likely not what
54 you want. */
55 enum thread_state
56 {
57 /* In the frontend's perpective, the thread is stopped. */
58 THREAD_STOPPED,
59
60 /* In the frontend's perpective, the thread is running. */
61 THREAD_RUNNING,
62
63 /* The thread is listed, but known to have exited. We keep it
64 listed (but not visible) until it's safe to delete it. */
65 THREAD_EXITED,
66 };
67
68 /* STEP_OVER_ALL means step over all subroutine calls.
69 STEP_OVER_UNDEBUGGABLE means step over calls to undebuggable functions.
70 STEP_OVER_NONE means don't step over any subroutine calls. */
71
72 enum step_over_calls_kind
73 {
74 STEP_OVER_NONE,
75 STEP_OVER_ALL,
76 STEP_OVER_UNDEBUGGABLE
77 };
78
79 /* Inferior thread specific part of `struct infcall_control_state'.
80
81 Inferior process counterpart is `struct inferior_control_state'. */
82
83 struct thread_control_state
84 {
85 /* User/external stepping state. */
86
87 /* Step-resume or longjmp-resume breakpoint. */
88 struct breakpoint *step_resume_breakpoint = nullptr;
89
90 /* Exception-resume breakpoint. */
91 struct breakpoint *exception_resume_breakpoint = nullptr;
92
93 /* Breakpoints used for software single stepping. Plural, because
94 it may have multiple locations. E.g., if stepping over a
95 conditional branch instruction we can't decode the condition for,
96 we'll need to put a breakpoint at the branch destination, and
97 another at the instruction after the branch. */
98 struct breakpoint *single_step_breakpoints = nullptr;
99
100 /* Range to single step within.
101
102 If this is nonzero, respond to a single-step signal by continuing
103 to step if the pc is in this range.
104
105 If step_range_start and step_range_end are both 1, it means to
106 step for a single instruction (FIXME: it might clean up
107 wait_for_inferior in a minor way if this were changed to the
108 address of the instruction and that address plus one. But maybe
109 not). */
110 CORE_ADDR step_range_start = 0; /* Inclusive */
111 CORE_ADDR step_range_end = 0; /* Exclusive */
112
113 /* Function the thread was in as of last it started stepping. */
114 struct symbol *step_start_function = nullptr;
115
116 /* If GDB issues a target step request, and this is nonzero, the
117 target should single-step this thread once, and then continue
118 single-stepping it without GDB core involvement as long as the
119 thread stops in the step range above. If this is zero, the
120 target should ignore the step range, and only issue one single
121 step. */
122 int may_range_step = 0;
123
124 /* Stack frame address as of when stepping command was issued.
125 This is how we know when we step into a subroutine call, and how
126 to set the frame for the breakpoint used to step out. */
127 struct frame_id step_frame_id {};
128
129 /* Similarly, the frame ID of the underlying stack frame (skipping
130 any inlined frames). */
131 struct frame_id step_stack_frame_id {};
132
133 /* Nonzero if we are presently stepping over a breakpoint.
134
135 If we hit a breakpoint or watchpoint, and then continue, we need
136 to single step the current thread with breakpoints disabled, to
137 avoid hitting the same breakpoint or watchpoint again. And we
138 should step just a single thread and keep other threads stopped,
139 so that other threads don't miss breakpoints while they are
140 removed.
141
142 So, this variable simultaneously means that we need to single
143 step the current thread, keep other threads stopped, and that
144 breakpoints should be removed while we step.
145
146 This variable is set either:
147 - in proceed, when we resume inferior on user's explicit request
148 - in keep_going, if handle_inferior_event decides we need to
149 step over breakpoint.
150
151 The variable is cleared in normal_stop. The proceed calls
152 wait_for_inferior, which calls handle_inferior_event in a loop,
153 and until wait_for_inferior exits, this variable is changed only
154 by keep_going. */
155 int trap_expected = 0;
156
157 /* Nonzero if the thread is being proceeded for a "finish" command
158 or a similar situation when return value should be printed. */
159 int proceed_to_finish = 0;
160
161 /* Nonzero if the thread is being proceeded for an inferior function
162 call. */
163 int in_infcall = 0;
164
165 enum step_over_calls_kind step_over_calls = STEP_OVER_NONE;
166
167 /* Nonzero if stopped due to a step command. */
168 int stop_step = 0;
169
170 /* Chain containing status of breakpoint(s) the thread stopped
171 at. */
172 bpstat stop_bpstat = nullptr;
173
174 /* Whether the command that started the thread was a stepping
175 command. This is used to decide whether "set scheduler-locking
176 step" behaves like "on" or "off". */
177 int stepping_command = 0;
178 };
179
180 /* Inferior thread specific part of `struct infcall_suspend_state'. */
181
182 struct thread_suspend_state
183 {
184 /* Last signal that the inferior received (why it stopped). When
185 the thread is resumed, this signal is delivered. Note: the
186 target should not check whether the signal is in pass state,
187 because the signal may have been explicitly passed with the
188 "signal" command, which overrides "handle nopass". If the signal
189 should be suppressed, the core will take care of clearing this
190 before the target is resumed. */
191 enum gdb_signal stop_signal = GDB_SIGNAL_0;
192
193 /* The reason the thread last stopped, if we need to track it
194 (breakpoint, watchpoint, etc.) */
195 enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON;
196
197 /* The waitstatus for this thread's last event. */
198 struct target_waitstatus waitstatus {};
199 /* If true WAITSTATUS hasn't been handled yet. */
200 int waitstatus_pending_p = 0;
201
202 /* Record the pc of the thread the last time it stopped. (This is
203 not the current thread's PC as that may have changed since the
204 last stop, e.g., "return" command, or "p $pc = 0xf000").
205
206 - If the thread's PC has not changed since the thread last
207 stopped, then proceed skips a breakpoint at the current PC,
208 otherwise we let the thread run into the breakpoint.
209
210 - If the thread has an unprocessed event pending, as indicated by
211 waitstatus_pending_p, this is used in coordination with
212 stop_reason: if the thread's PC has changed since the thread
213 last stopped, a pending breakpoint waitstatus is discarded.
214
215 - If the thread is running, this is set to -1, to avoid leaving
216 it with a stale value, to make it easier to catch bugs. */
217 CORE_ADDR stop_pc = 0;
218 };
219
220 /* Base class for target-specific thread data. */
221 struct private_thread_info
222 {
223 virtual ~private_thread_info () = 0;
224 };
225
226 /* Threads are intrusively refcounted objects. Being the
227 user-selected thread is normally considered an implicit strong
228 reference and is thus not accounted in the refcount, unlike
229 inferior objects. This is necessary, because there's no "current
230 thread" pointer. Instead the current thread is inferred from the
231 inferior_ptid global. However, when GDB needs to remember the
232 selected thread to later restore it, GDB bumps the thread object's
233 refcount, to prevent something deleting the thread object before
234 reverting back (e.g., due to a "kill" command). If the thread
235 meanwhile exits before being re-selected, then the thread object is
236 left listed in the thread list, but marked with state
237 THREAD_EXITED. (See scoped_restore_current_thread and
238 delete_thread). All other thread references are considered weak
239 references. Placing a thread in the thread list is an implicit
240 strong reference, and is thus not accounted for in the thread's
241 refcount. */
242
243 class thread_info : public refcounted_object
244 {
245 public:
246 explicit thread_info (inferior *inf, ptid_t ptid);
247 ~thread_info ();
248
249 bool deletable () const;
250
251 /* Mark this thread as running and notify observers. */
252 void set_running (bool running);
253
254 ptid_t ptid; /* "Actual process id";
255 In fact, this may be overloaded with
256 kernel thread id, etc. */
257
258 /* Each thread has two GDB IDs.
259
260 a) The thread ID (Id). This consists of the pair of:
261
262 - the number of the thread's inferior and,
263
264 - the thread's thread number in its inferior, aka, the
265 per-inferior thread number. This number is unique in the
266 inferior but not unique between inferiors.
267
268 b) The global ID (GId). This is a a single integer unique
269 between all inferiors.
270
271 E.g.:
272
273 (gdb) info threads -gid
274 Id GId Target Id Frame
275 * 1.1 1 Thread A 0x16a09237 in foo () at foo.c:10
276 1.2 3 Thread B 0x15ebc6ed in bar () at foo.c:20
277 1.3 5 Thread C 0x15ebc6ed in bar () at foo.c:20
278 2.1 2 Thread A 0x16a09237 in foo () at foo.c:10
279 2.2 4 Thread B 0x15ebc6ed in bar () at foo.c:20
280 2.3 6 Thread C 0x15ebc6ed in bar () at foo.c:20
281
282 Above, both inferiors 1 and 2 have threads numbered 1-3, but each
283 thread has its own unique global ID. */
284
285 /* The thread's global GDB thread number. This is exposed to MI,
286 Python/Scheme, visible with "info threads -gid", and is also what
287 the $_gthread convenience variable is bound to. */
288 int global_num;
289
290 /* The per-inferior thread number. This is unique in the inferior
291 the thread belongs to, but not unique between inferiors. This is
292 what the $_thread convenience variable is bound to. */
293 int per_inf_num;
294
295 /* The inferior this thread belongs to. */
296 struct inferior *inf;
297
298 /* The name of the thread, as specified by the user. This is NULL
299 if the thread does not have a user-given name. */
300 char *name = NULL;
301
302 /* Non-zero means the thread is executing. Note: this is different
303 from saying that there is an active target and we are stopped at
304 a breakpoint, for instance. This is a real indicator whether the
305 thread is off and running. */
306 int executing = 0;
307
308 /* Non-zero if this thread is resumed from infrun's perspective.
309 Note that a thread can be marked both as not-executing and
310 resumed at the same time. This happens if we try to resume a
311 thread that has a wait status pending. We shouldn't let the
312 thread really run until that wait status has been processed, but
313 we should not process that wait status if we didn't try to let
314 the thread run. */
315 int resumed = 0;
316
317 /* Frontend view of the thread state. Note that the THREAD_RUNNING/
318 THREAD_STOPPED states are different from EXECUTING. When the
319 thread is stopped internally while handling an internal event,
320 like a software single-step breakpoint, EXECUTING will be false,
321 but STATE will still be THREAD_RUNNING. */
322 enum thread_state state = THREAD_STOPPED;
323
324 /* State of GDB control of inferior thread execution.
325 See `struct thread_control_state'. */
326 thread_control_state control;
327
328 /* State of inferior thread to restore after GDB is done with an inferior
329 call. See `struct thread_suspend_state'. */
330 thread_suspend_state suspend;
331
332 int current_line = 0;
333 struct symtab *current_symtab = NULL;
334
335 /* Internal stepping state. */
336
337 /* Record the pc of the thread the last time it was resumed. (It
338 can't be done on stop as the PC may change since the last stop,
339 e.g., "return" command, or "p $pc = 0xf000"). This is maintained
340 by proceed and keep_going, and among other things, it's used in
341 adjust_pc_after_break to distinguish a hardware single-step
342 SIGTRAP from a breakpoint SIGTRAP. */
343 CORE_ADDR prev_pc = 0;
344
345 /* Did we set the thread stepping a breakpoint instruction? This is
346 used in conjunction with PREV_PC to decide whether to adjust the
347 PC. */
348 int stepped_breakpoint = 0;
349
350 /* Should we step over breakpoint next time keep_going is called? */
351 int stepping_over_breakpoint = 0;
352
353 /* Should we step over a watchpoint next time keep_going is called?
354 This is needed on targets with non-continuable, non-steppable
355 watchpoints. */
356 int stepping_over_watchpoint = 0;
357
358 /* Set to TRUE if we should finish single-stepping over a breakpoint
359 after hitting the current step-resume breakpoint. The context here
360 is that GDB is to do `next' or `step' while signal arrives.
361 When stepping over a breakpoint and signal arrives, GDB will attempt
362 to skip signal handler, so it inserts a step_resume_breakpoint at the
363 signal return address, and resume inferior.
364 step_after_step_resume_breakpoint is set to TRUE at this moment in
365 order to keep GDB in mind that there is still a breakpoint to step over
366 when GDB gets back SIGTRAP from step_resume_breakpoint. */
367 int step_after_step_resume_breakpoint = 0;
368
369 /* Pointer to the state machine manager object that handles what is
370 left to do for the thread's execution command after the target
371 stops. Several execution commands use it. */
372 struct thread_fsm *thread_fsm = NULL;
373
374 /* This is used to remember when a fork or vfork event was caught by
375 a catchpoint, and thus the event is to be followed at the next
376 resume of the thread, and not immediately. */
377 struct target_waitstatus pending_follow;
378
379 /* True if this thread has been explicitly requested to stop. */
380 int stop_requested = 0;
381
382 /* The initiating frame of a nexting operation, used for deciding
383 which exceptions to intercept. If it is null_frame_id no
384 bp_longjmp or bp_exception but longjmp has been caught just for
385 bp_longjmp_call_dummy. */
386 struct frame_id initiating_frame = null_frame_id;
387
388 /* Private data used by the target vector implementation. */
389 std::unique_ptr<private_thread_info> priv;
390
391 /* Branch trace information for this thread. */
392 struct btrace_thread_info btrace {};
393
394 /* Flag which indicates that the stack temporaries should be stored while
395 evaluating expressions. */
396 bool stack_temporaries_enabled = false;
397
398 /* Values that are stored as temporaries on stack while evaluating
399 expressions. */
400 std::vector<struct value *> stack_temporaries;
401
402 /* Step-over chain. A thread is in the step-over queue if these are
403 non-NULL. If only a single thread is in the chain, then these
404 fields point to self. */
405 struct thread_info *step_over_prev = NULL;
406 struct thread_info *step_over_next = NULL;
407 };
408
409 /* A gdb::ref_ptr pointer to a thread_info. */
410
411 using thread_info_ref
412 = gdb::ref_ptr<struct thread_info, refcounted_object_ref_policy>;
413
414 /* Create an empty thread list, or empty the existing one. */
415 extern void init_thread_list (void);
416
417 /* Add a thread to the thread list, print a message
418 that a new thread is found, and return the pointer to
419 the new thread. Caller my use this pointer to
420 initialize the private thread data. */
421 extern struct thread_info *add_thread (ptid_t ptid);
422
423 /* Same as add_thread, but does not print a message
424 about new thread. */
425 extern struct thread_info *add_thread_silent (ptid_t ptid);
426
427 /* Same as add_thread, and sets the private info. */
428 extern struct thread_info *add_thread_with_info (ptid_t ptid,
429 struct private_thread_info *);
430
431 /* Delete an existing thread, removing the entry from its inferior's thread
432 map. */
433 extern void delete_thread (struct thread_info *thread);
434
435 /* Like the above, but don't remove the entry from the inferior thread map. */
436 extern void delete_thread_noremove(struct thread_info *thread);
437
438 /* Delete an existing thread list entry, and be quiet about it. Used
439 after the process this thread having belonged to having already
440 exited, for example. */
441 extern void delete_thread_silent (struct thread_info *thread);
442
443 /* Like the above, but don't remove the entry from the inferior thread map. */
444 extern void delete_thread_silent_noremove (thread_info *thread);
445
446 /* Delete a step_resume_breakpoint from the thread database. */
447 extern void delete_step_resume_breakpoint (struct thread_info *);
448
449 /* Delete an exception_resume_breakpoint from the thread database. */
450 extern void delete_exception_resume_breakpoint (struct thread_info *);
451
452 /* Delete the single-step breakpoints of thread TP, if any. */
453 extern void delete_single_step_breakpoints (struct thread_info *tp);
454
455 /* Check if the thread has software single stepping breakpoints
456 set. */
457 extern int thread_has_single_step_breakpoints_set (struct thread_info *tp);
458
459 /* Check whether the thread has software single stepping breakpoints
460 set at PC. */
461 extern int thread_has_single_step_breakpoint_here (struct thread_info *tp,
462 const address_space *aspace,
463 CORE_ADDR addr);
464
465 /* Returns whether to show inferior-qualified thread IDs, or plain
466 thread numbers. Inferior-qualified IDs are shown whenever we have
467 multiple inferiors, or the only inferior left has number > 1. */
468 extern int show_inferior_qualified_tids (void);
469
470 /* Return a string version of THR's thread ID. If there are multiple
471 inferiors, then this prints the inferior-qualifier form, otherwise
472 it only prints the thread number. The result is stored in a
473 circular static buffer, NUMCELLS deep. */
474 const char *print_thread_id (struct thread_info *thr);
475
476 /* Boolean test for an already-known ptid. */
477 extern int in_thread_list (ptid_t ptid);
478
479 /* Boolean test for an already-known global thread id (GDB's homegrown
480 global id, not the system's). */
481 extern int valid_global_thread_id (int global_id);
482
483 /* Search function to lookup a thread by 'pid'. */
484 extern struct thread_info *find_thread_ptid (ptid_t ptid);
485
486 /* Search function to lookup a thread by 'ptid'. Only searches in
487 threads of INF. */
488 extern struct thread_info *find_thread_ptid (inferior *inf, ptid_t ptid);
489
490 /* Find thread by GDB global thread ID. */
491 struct thread_info *find_thread_global_id (int global_id);
492
493 /* Find thread by thread library specific handle in inferior INF. */
494 struct thread_info *find_thread_by_handle
495 (gdb::array_view<const gdb_byte> handle, struct inferior *inf);
496
497 /* Finds the first thread of the specified inferior. */
498 extern struct thread_info *first_thread_of_inferior (inferior *inf);
499
500 /* Returns any thread of inferior INF, giving preference to the
501 current thread. */
502 extern struct thread_info *any_thread_of_inferior (inferior *inf);
503
504 /* Returns any non-exited thread of inferior INF, giving preference to
505 the current thread, and to not executing threads. */
506 extern struct thread_info *any_live_thread_of_inferior (inferior *inf);
507
508 /* Change the ptid of thread OLD_PTID to NEW_PTID. */
509 void thread_change_ptid (ptid_t old_ptid, ptid_t new_ptid);
510
511 /* Iterator function to call a user-provided callback function
512 once for each known thread. */
513 typedef int (*thread_callback_func) (struct thread_info *, void *);
514 extern struct thread_info *iterate_over_threads (thread_callback_func, void *);
515
516 /* Pull in the internals of the inferiors/threads ranges and
517 iterators. Must be done after struct thread_info is defined. */
518 #include "thread-iter.h"
519
520 /* Return a range that can be used to walk over all threads of all
521 inferiors, with range-for. Used like this:
522
523 for (thread_info *thr : all_threads ())
524 { .... }
525 */
526 inline all_threads_range
527 all_threads ()
528 {
529 return {};
530 }
531
532 /* Likewise, but accept a filter PTID. */
533
534 inline all_matching_threads_range
535 all_threads (ptid_t filter_ptid)
536 {
537 return all_matching_threads_range (filter_ptid);
538 }
539
540 /* Return a range that can be used to walk over all non-exited threads
541 of all inferiors, with range-for. FILTER_PTID can be used to
542 filter out thread that don't match. */
543
544 inline all_non_exited_threads_range
545 all_non_exited_threads (ptid_t filter_ptid = minus_one_ptid)
546 {
547 return all_non_exited_threads_range (filter_ptid);
548 }
549
550 /* Return a range that can be used to walk over all threads of all
551 inferiors, with range-for, safely. I.e., it is safe to delete the
552 currently-iterated thread. When combined with range-for, this
553 allow convenient patterns like this:
554
555 for (thread_info *t : all_threads_safe ())
556 if (some_condition ())
557 delete f;
558 */
559
560 inline all_threads_safe_range
561 all_threads_safe ()
562 {
563 return all_threads_safe_range ();
564 }
565
566 extern int thread_count (void);
567
568 /* Return true if we have any thread in any inferior. */
569 extern bool any_thread_p ();
570
571 /* Switch context to thread THR. Also sets the STOP_PC global. */
572 extern void switch_to_thread (struct thread_info *thr);
573
574 /* Switch context to no thread selected. */
575 extern void switch_to_no_thread ();
576
577 /* Switch from one thread to another. Does not read registers. */
578 extern void switch_to_thread_no_regs (struct thread_info *thread);
579
580 /* Marks or clears thread(s) PTID as resumed. If PTID is
581 MINUS_ONE_PTID, applies to all threads. If ptid_is_pid(PTID) is
582 true, applies to all threads of the process pointed at by PTID. */
583 extern void set_resumed (ptid_t ptid, int resumed);
584
585 /* Marks thread PTID is running, or stopped.
586 If PTID is minus_one_ptid, marks all threads. */
587 extern void set_running (ptid_t ptid, int running);
588
589 /* Marks or clears thread(s) PTID as having been requested to stop.
590 If PTID is MINUS_ONE_PTID, applies to all threads. If
591 ptid_is_pid(PTID) is true, applies to all threads of the process
592 pointed at by PTID. If STOP, then the THREAD_STOP_REQUESTED
593 observer is called with PTID as argument. */
594 extern void set_stop_requested (ptid_t ptid, int stop);
595
596 /* Marks thread PTID as executing, or not. If PTID is minus_one_ptid,
597 marks all threads.
598
599 Note that this is different from the running state. See the
600 description of state and executing fields of struct
601 thread_info. */
602 extern void set_executing (ptid_t ptid, int executing);
603
604 /* True if any (known or unknown) thread is or may be executing. */
605 extern int threads_are_executing (void);
606
607 /* Merge the executing property of thread PTID over to its thread
608 state property (frontend running/stopped view).
609
610 "not executing" -> "stopped"
611 "executing" -> "running"
612 "exited" -> "exited"
613
614 If PTID is minus_one_ptid, go over all threads.
615
616 Notifications are only emitted if the thread state did change. */
617 extern void finish_thread_state (ptid_t ptid);
618
619 /* Calls finish_thread_state on scope exit, unless release() is called
620 to disengage. */
621 using scoped_finish_thread_state
622 = FORWARD_SCOPE_EXIT (finish_thread_state);
623
624 /* Commands with a prefix of `thread'. */
625 extern struct cmd_list_element *thread_cmd_list;
626
627 extern void thread_command (const char *tidstr, int from_tty);
628
629 /* Print notices on thread events (attach, detach, etc.), set with
630 `set print thread-events'. */
631 extern bool print_thread_events;
632
633 /* Prints the list of threads and their details on UIOUT. If
634 REQUESTED_THREADS, a list of GDB ids/ranges, is not NULL, only
635 print threads whose ID is included in the list. If PID is not -1,
636 only print threads from the process PID. Otherwise, threads from
637 all attached PIDs are printed. If both REQUESTED_THREADS is not
638 NULL and PID is not -1, then the thread is printed if it belongs to
639 the specified process. Otherwise, an error is raised. */
640 extern void print_thread_info (struct ui_out *uiout,
641 const char *requested_threads,
642 int pid);
643
644 /* Save/restore current inferior/thread/frame. */
645
646 class scoped_restore_current_thread
647 {
648 public:
649 scoped_restore_current_thread ();
650 ~scoped_restore_current_thread ();
651
652 DISABLE_COPY_AND_ASSIGN (scoped_restore_current_thread);
653
654 private:
655 /* Use the "class" keyword here, because of a clash with a "thread_info"
656 function in the Darwin API. */
657 class thread_info *m_thread;
658 inferior *m_inf;
659 frame_id m_selected_frame_id;
660 int m_selected_frame_level;
661 bool m_was_stopped;
662 };
663
664 /* Returns a pointer into the thread_info corresponding to
665 INFERIOR_PTID. INFERIOR_PTID *must* be in the thread list. */
666 extern struct thread_info* inferior_thread (void);
667
668 extern void update_thread_list (void);
669
670 /* Delete any thread the target says is no longer alive. */
671
672 extern void prune_threads (void);
673
674 /* Delete threads marked THREAD_EXITED. Unlike prune_threads, this
675 does not consult the target about whether the thread is alive right
676 now. */
677 extern void delete_exited_threads (void);
678
679 /* Return true if PC is in the stepping range of THREAD. */
680
681 int pc_in_thread_step_range (CORE_ADDR pc, struct thread_info *thread);
682
683 /* Enable storing stack temporaries for thread THR and disable and
684 clear the stack temporaries on destruction. Holds a strong
685 reference to THR. */
686
687 class enable_thread_stack_temporaries
688 {
689 public:
690
691 explicit enable_thread_stack_temporaries (struct thread_info *thr)
692 : m_thr (thr)
693 {
694 gdb_assert (m_thr != NULL);
695
696 m_thr->incref ();
697
698 m_thr->stack_temporaries_enabled = true;
699 m_thr->stack_temporaries.clear ();
700 }
701
702 ~enable_thread_stack_temporaries ()
703 {
704 m_thr->stack_temporaries_enabled = false;
705 m_thr->stack_temporaries.clear ();
706
707 m_thr->decref ();
708 }
709
710 DISABLE_COPY_AND_ASSIGN (enable_thread_stack_temporaries);
711
712 private:
713
714 struct thread_info *m_thr;
715 };
716
717 extern bool thread_stack_temporaries_enabled_p (struct thread_info *tp);
718
719 extern void push_thread_stack_temporary (struct thread_info *tp, struct value *v);
720
721 extern value *get_last_thread_stack_temporary (struct thread_info *tp);
722
723 extern bool value_in_thread_stack_temporaries (struct value *,
724 struct thread_info *thr);
725
726 /* Add TP to the end of its inferior's pending step-over chain. */
727
728 extern void thread_step_over_chain_enqueue (struct thread_info *tp);
729
730 /* Remove TP from its inferior's pending step-over chain. */
731
732 extern void thread_step_over_chain_remove (struct thread_info *tp);
733
734 /* Return the next thread in the step-over chain starting at TP. NULL
735 if TP is the last entry in the chain. */
736
737 extern struct thread_info *thread_step_over_chain_next (struct thread_info *tp);
738
739 /* Return true if TP is in the step-over chain. */
740
741 extern int thread_is_in_step_over_chain (struct thread_info *tp);
742
743 /* Cancel any ongoing execution command. */
744
745 extern void thread_cancel_execution_command (struct thread_info *thr);
746
747 /* Check whether it makes sense to access a register of the current
748 thread at this point. If not, throw an error (e.g., the thread is
749 executing). */
750 extern void validate_registers_access (void);
751
752 /* Check whether it makes sense to access a register of THREAD at this point.
753 Returns true if registers may be accessed; false otherwise. */
754 extern bool can_access_registers_thread (struct thread_info *thread);
755
756 /* Returns whether to show which thread hit the breakpoint, received a
757 signal, etc. and ended up causing a user-visible stop. This is
758 true iff we ever detected multiple threads. */
759 extern int show_thread_that_caused_stop (void);
760
761 /* Print the message for a thread or/and frame selected. */
762 extern void print_selected_thread_frame (struct ui_out *uiout,
763 user_selected_what selection);
764
765 /* Helper for the CLI's "thread" command and for MI's -thread-select.
766 Selects thread THR. TIDSTR is the original string the thread ID
767 was parsed from. This is used in the error message if THR is not
768 alive anymore. */
769 extern void thread_select (const char *tidstr, class thread_info *thr);
770
771 #endif /* GDBTHREAD_H */
This page took 0.045014 seconds and 4 git commands to generate.