94fc1b7472157f077f13a247bfc5886ccba9d0af
[deliverable/binutils-gdb.git] / gdb / gdbthread.h
1 /* Multi-process/thread control defs for GDB, the GNU debugger.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 Contributed by Lynx Real-Time Systems, Inc. Los Gatos, CA.
4
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #ifndef GDBTHREAD_H
22 #define GDBTHREAD_H
23
24 struct symtab;
25
26 #include "breakpoint.h"
27 #include "frame.h"
28 #include "ui-out.h"
29 #include "btrace.h"
30 #include "common/vec.h"
31 #include "target/waitstatus.h"
32 #include "cli/cli-utils.h"
33 #include "common/refcounted-object.h"
34 #include "common-gdbthread.h"
35
36 struct inferior;
37
38 /* Frontend view of the thread state. Possible extensions: stepping,
39 finishing, until(ling),... */
40 enum thread_state
41 {
42 THREAD_STOPPED,
43 THREAD_RUNNING,
44 THREAD_EXITED,
45 };
46
47 /* STEP_OVER_ALL means step over all subroutine calls.
48 STEP_OVER_UNDEBUGGABLE means step over calls to undebuggable functions.
49 STEP_OVER_NONE means don't step over any subroutine calls. */
50
51 enum step_over_calls_kind
52 {
53 STEP_OVER_NONE,
54 STEP_OVER_ALL,
55 STEP_OVER_UNDEBUGGABLE
56 };
57
58 /* Inferior thread specific part of `struct infcall_control_state'.
59
60 Inferior process counterpart is `struct inferior_control_state'. */
61
62 struct thread_control_state
63 {
64 /* User/external stepping state. */
65
66 /* Step-resume or longjmp-resume breakpoint. */
67 struct breakpoint *step_resume_breakpoint = nullptr;
68
69 /* Exception-resume breakpoint. */
70 struct breakpoint *exception_resume_breakpoint = nullptr;
71
72 /* Breakpoints used for software single stepping. Plural, because
73 it may have multiple locations. E.g., if stepping over a
74 conditional branch instruction we can't decode the condition for,
75 we'll need to put a breakpoint at the branch destination, and
76 another at the instruction after the branch. */
77 struct breakpoint *single_step_breakpoints = nullptr;
78
79 /* Range to single step within.
80
81 If this is nonzero, respond to a single-step signal by continuing
82 to step if the pc is in this range.
83
84 If step_range_start and step_range_end are both 1, it means to
85 step for a single instruction (FIXME: it might clean up
86 wait_for_inferior in a minor way if this were changed to the
87 address of the instruction and that address plus one. But maybe
88 not). */
89 CORE_ADDR step_range_start = 0; /* Inclusive */
90 CORE_ADDR step_range_end = 0; /* Exclusive */
91
92 /* Function the thread was in as of last it started stepping. */
93 struct symbol *step_start_function = nullptr;
94
95 /* If GDB issues a target step request, and this is nonzero, the
96 target should single-step this thread once, and then continue
97 single-stepping it without GDB core involvement as long as the
98 thread stops in the step range above. If this is zero, the
99 target should ignore the step range, and only issue one single
100 step. */
101 int may_range_step = 0;
102
103 /* Stack frame address as of when stepping command was issued.
104 This is how we know when we step into a subroutine call, and how
105 to set the frame for the breakpoint used to step out. */
106 struct frame_id step_frame_id {};
107
108 /* Similarly, the frame ID of the underlying stack frame (skipping
109 any inlined frames). */
110 struct frame_id step_stack_frame_id {};
111
112 /* Nonzero if we are presently stepping over a breakpoint.
113
114 If we hit a breakpoint or watchpoint, and then continue, we need
115 to single step the current thread with breakpoints disabled, to
116 avoid hitting the same breakpoint or watchpoint again. And we
117 should step just a single thread and keep other threads stopped,
118 so that other threads don't miss breakpoints while they are
119 removed.
120
121 So, this variable simultaneously means that we need to single
122 step the current thread, keep other threads stopped, and that
123 breakpoints should be removed while we step.
124
125 This variable is set either:
126 - in proceed, when we resume inferior on user's explicit request
127 - in keep_going, if handle_inferior_event decides we need to
128 step over breakpoint.
129
130 The variable is cleared in normal_stop. The proceed calls
131 wait_for_inferior, which calls handle_inferior_event in a loop,
132 and until wait_for_inferior exits, this variable is changed only
133 by keep_going. */
134 int trap_expected = 0;
135
136 /* Nonzero if the thread is being proceeded for a "finish" command
137 or a similar situation when return value should be printed. */
138 int proceed_to_finish = 0;
139
140 /* Nonzero if the thread is being proceeded for an inferior function
141 call. */
142 int in_infcall = 0;
143
144 enum step_over_calls_kind step_over_calls = STEP_OVER_NONE;
145
146 /* Nonzero if stopped due to a step command. */
147 int stop_step = 0;
148
149 /* Chain containing status of breakpoint(s) the thread stopped
150 at. */
151 bpstat stop_bpstat = nullptr;
152
153 /* Whether the command that started the thread was a stepping
154 command. This is used to decide whether "set scheduler-locking
155 step" behaves like "on" or "off". */
156 int stepping_command = 0;
157 };
158
159 /* Inferior thread specific part of `struct infcall_suspend_state'. */
160
161 struct thread_suspend_state
162 {
163 /* Last signal that the inferior received (why it stopped). When
164 the thread is resumed, this signal is delivered. Note: the
165 target should not check whether the signal is in pass state,
166 because the signal may have been explicitly passed with the
167 "signal" command, which overrides "handle nopass". If the signal
168 should be suppressed, the core will take care of clearing this
169 before the target is resumed. */
170 enum gdb_signal stop_signal = GDB_SIGNAL_0;
171
172 /* The reason the thread last stopped, if we need to track it
173 (breakpoint, watchpoint, etc.) */
174 enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON;
175
176 /* The waitstatus for this thread's last event. */
177 struct target_waitstatus waitstatus {};
178 /* If true WAITSTATUS hasn't been handled yet. */
179 int waitstatus_pending_p = 0;
180
181 /* Record the pc of the thread the last time it stopped. (This is
182 not the current thread's PC as that may have changed since the
183 last stop, e.g., "return" command, or "p $pc = 0xf000").
184
185 - If the thread's PC has not changed since the thread last
186 stopped, then proceed skips a breakpoint at the current PC,
187 otherwise we let the thread run into the breakpoint.
188
189 - If the thread has an unprocessed event pending, as indicated by
190 waitstatus_pending_p, this is used in coordination with
191 stop_reason: if the thread's PC has changed since the thread
192 last stopped, a pending breakpoint waitstatus is discarded.
193
194 - If the thread is running, this is set to -1, to avoid leaving
195 it with a stale value, to make it easier to catch bugs. */
196 CORE_ADDR stop_pc = 0;
197 };
198
199 /* Base class for target-specific thread data. */
200 struct private_thread_info
201 {
202 virtual ~private_thread_info () = 0;
203 };
204
205 /* Threads are intrusively refcounted objects. Being the
206 user-selected thread is normally considered an implicit strong
207 reference and is thus not accounted in the refcount, unlike
208 inferior objects. This is necessary, because there's no "current
209 thread" pointer. Instead the current thread is inferred from the
210 inferior_ptid global. However, when GDB needs to remember the
211 selected thread to later restore it, GDB bumps the thread object's
212 refcount, to prevent something deleting the thread object before
213 reverting back (e.g., due to a "kill" command). If the thread
214 meanwhile exits before being re-selected, then the thread object is
215 left listed in the thread list, but marked with state
216 THREAD_EXITED. (See make_cleanup_restore_current_thread and
217 delete_thread). All other thread references are considered weak
218 references. Placing a thread in the thread list is an implicit
219 strong reference, and is thus not accounted for in the thread's
220 refcount. */
221
222 class thread_info : public refcounted_object
223 {
224 public:
225 explicit thread_info (inferior *inf, ptid_t ptid);
226 ~thread_info ();
227
228 bool deletable () const;
229
230 /* Mark this thread as running and notify observers. */
231 void set_running (bool running);
232
233 struct thread_info *next = NULL;
234 ptid_t ptid; /* "Actual process id";
235 In fact, this may be overloaded with
236 kernel thread id, etc. */
237
238 /* Each thread has two GDB IDs.
239
240 a) The thread ID (Id). This consists of the pair of:
241
242 - the number of the thread's inferior and,
243
244 - the thread's thread number in its inferior, aka, the
245 per-inferior thread number. This number is unique in the
246 inferior but not unique between inferiors.
247
248 b) The global ID (GId). This is a a single integer unique
249 between all inferiors.
250
251 E.g.:
252
253 (gdb) info threads -gid
254 Id GId Target Id Frame
255 * 1.1 1 Thread A 0x16a09237 in foo () at foo.c:10
256 1.2 3 Thread B 0x15ebc6ed in bar () at foo.c:20
257 1.3 5 Thread C 0x15ebc6ed in bar () at foo.c:20
258 2.1 2 Thread A 0x16a09237 in foo () at foo.c:10
259 2.2 4 Thread B 0x15ebc6ed in bar () at foo.c:20
260 2.3 6 Thread C 0x15ebc6ed in bar () at foo.c:20
261
262 Above, both inferiors 1 and 2 have threads numbered 1-3, but each
263 thread has its own unique global ID. */
264
265 /* The thread's global GDB thread number. This is exposed to MI,
266 Python/Scheme, visible with "info threads -gid", and is also what
267 the $_gthread convenience variable is bound to. */
268 int global_num;
269
270 /* The per-inferior thread number. This is unique in the inferior
271 the thread belongs to, but not unique between inferiors. This is
272 what the $_thread convenience variable is bound to. */
273 int per_inf_num;
274
275 /* The inferior this thread belongs to. */
276 struct inferior *inf;
277
278 /* The name of the thread, as specified by the user. This is NULL
279 if the thread does not have a user-given name. */
280 char *name = NULL;
281
282 /* Non-zero means the thread is executing. Note: this is different
283 from saying that there is an active target and we are stopped at
284 a breakpoint, for instance. This is a real indicator whether the
285 thread is off and running. */
286 int executing = 0;
287
288 /* Non-zero if this thread is resumed from infrun's perspective.
289 Note that a thread can be marked both as not-executing and
290 resumed at the same time. This happens if we try to resume a
291 thread that has a wait status pending. We shouldn't let the
292 thread really run until that wait status has been processed, but
293 we should not process that wait status if we didn't try to let
294 the thread run. */
295 int resumed = 0;
296
297 /* Frontend view of the thread state. Note that the THREAD_RUNNING/
298 THREAD_STOPPED states are different from EXECUTING. When the
299 thread is stopped internally while handling an internal event,
300 like a software single-step breakpoint, EXECUTING will be false,
301 but STATE will still be THREAD_RUNNING. */
302 enum thread_state state = THREAD_STOPPED;
303
304 /* State of GDB control of inferior thread execution.
305 See `struct thread_control_state'. */
306 thread_control_state control;
307
308 /* State of inferior thread to restore after GDB is done with an inferior
309 call. See `struct thread_suspend_state'. */
310 thread_suspend_state suspend;
311
312 int current_line = 0;
313 struct symtab *current_symtab = NULL;
314
315 /* Internal stepping state. */
316
317 /* Record the pc of the thread the last time it was resumed. (It
318 can't be done on stop as the PC may change since the last stop,
319 e.g., "return" command, or "p $pc = 0xf000"). This is maintained
320 by proceed and keep_going, and among other things, it's used in
321 adjust_pc_after_break to distinguish a hardware single-step
322 SIGTRAP from a breakpoint SIGTRAP. */
323 CORE_ADDR prev_pc = 0;
324
325 /* Did we set the thread stepping a breakpoint instruction? This is
326 used in conjunction with PREV_PC to decide whether to adjust the
327 PC. */
328 int stepped_breakpoint = 0;
329
330 /* Should we step over breakpoint next time keep_going is called? */
331 int stepping_over_breakpoint = 0;
332
333 /* Should we step over a watchpoint next time keep_going is called?
334 This is needed on targets with non-continuable, non-steppable
335 watchpoints. */
336 int stepping_over_watchpoint = 0;
337
338 /* Set to TRUE if we should finish single-stepping over a breakpoint
339 after hitting the current step-resume breakpoint. The context here
340 is that GDB is to do `next' or `step' while signal arrives.
341 When stepping over a breakpoint and signal arrives, GDB will attempt
342 to skip signal handler, so it inserts a step_resume_breakpoint at the
343 signal return address, and resume inferior.
344 step_after_step_resume_breakpoint is set to TRUE at this moment in
345 order to keep GDB in mind that there is still a breakpoint to step over
346 when GDB gets back SIGTRAP from step_resume_breakpoint. */
347 int step_after_step_resume_breakpoint = 0;
348
349 /* Pointer to the state machine manager object that handles what is
350 left to do for the thread's execution command after the target
351 stops. Several execution commands use it. */
352 struct thread_fsm *thread_fsm = NULL;
353
354 /* This is used to remember when a fork or vfork event was caught by
355 a catchpoint, and thus the event is to be followed at the next
356 resume of the thread, and not immediately. */
357 struct target_waitstatus pending_follow;
358
359 /* True if this thread has been explicitly requested to stop. */
360 int stop_requested = 0;
361
362 /* The initiating frame of a nexting operation, used for deciding
363 which exceptions to intercept. If it is null_frame_id no
364 bp_longjmp or bp_exception but longjmp has been caught just for
365 bp_longjmp_call_dummy. */
366 struct frame_id initiating_frame = null_frame_id;
367
368 /* Private data used by the target vector implementation. */
369 std::unique_ptr<private_thread_info> priv;
370
371 /* Branch trace information for this thread. */
372 struct btrace_thread_info btrace {};
373
374 /* Flag which indicates that the stack temporaries should be stored while
375 evaluating expressions. */
376 bool stack_temporaries_enabled = false;
377
378 /* Values that are stored as temporaries on stack while evaluating
379 expressions. */
380 std::vector<struct value *> stack_temporaries;
381
382 /* Step-over chain. A thread is in the step-over queue if these are
383 non-NULL. If only a single thread is in the chain, then these
384 fields point to self. */
385 struct thread_info *step_over_prev = NULL;
386 struct thread_info *step_over_next = NULL;
387 };
388
389 /* A gdb::ref_ptr pointer to a thread_info. */
390
391 using thread_info_ref
392 = gdb::ref_ptr<struct thread_info, refcounted_object_ref_policy>;
393
394 /* Create an empty thread list, or empty the existing one. */
395 extern void init_thread_list (void);
396
397 /* Add a thread to the thread list, print a message
398 that a new thread is found, and return the pointer to
399 the new thread. Caller my use this pointer to
400 initialize the private thread data. */
401 extern struct thread_info *add_thread (ptid_t ptid);
402
403 /* Same as add_thread, but does not print a message
404 about new thread. */
405 extern struct thread_info *add_thread_silent (ptid_t ptid);
406
407 /* Same as add_thread, and sets the private info. */
408 extern struct thread_info *add_thread_with_info (ptid_t ptid,
409 struct private_thread_info *);
410
411 /* Delete an existing thread list entry. */
412 extern void delete_thread (struct thread_info *thread);
413
414 /* Delete an existing thread list entry, and be quiet about it. Used
415 after the process this thread having belonged to having already
416 exited, for example. */
417 extern void delete_thread_silent (struct thread_info *thread);
418
419 /* Delete a step_resume_breakpoint from the thread database. */
420 extern void delete_step_resume_breakpoint (struct thread_info *);
421
422 /* Delete an exception_resume_breakpoint from the thread database. */
423 extern void delete_exception_resume_breakpoint (struct thread_info *);
424
425 /* Delete the single-step breakpoints of thread TP, if any. */
426 extern void delete_single_step_breakpoints (struct thread_info *tp);
427
428 /* Check if the thread has software single stepping breakpoints
429 set. */
430 extern int thread_has_single_step_breakpoints_set (struct thread_info *tp);
431
432 /* Check whether the thread has software single stepping breakpoints
433 set at PC. */
434 extern int thread_has_single_step_breakpoint_here (struct thread_info *tp,
435 const address_space *aspace,
436 CORE_ADDR addr);
437
438 /* Returns whether to show inferior-qualified thread IDs, or plain
439 thread numbers. Inferior-qualified IDs are shown whenever we have
440 multiple inferiors, or the only inferior left has number > 1. */
441 extern int show_inferior_qualified_tids (void);
442
443 /* Return a string version of THR's thread ID. If there are multiple
444 inferiors, then this prints the inferior-qualifier form, otherwise
445 it only prints the thread number. The result is stored in a
446 circular static buffer, NUMCELLS deep. */
447 const char *print_thread_id (struct thread_info *thr);
448
449 /* Boolean test for an already-known ptid. */
450 extern int in_thread_list (ptid_t ptid);
451
452 /* Boolean test for an already-known global thread id (GDB's homegrown
453 global id, not the system's). */
454 extern int valid_global_thread_id (int global_id);
455
456 /* Search function to lookup a thread by 'pid'. */
457 extern struct thread_info *find_thread_ptid (ptid_t ptid);
458
459 /* Search function to lookup a thread by 'ptid'. Only searches in
460 threads of INF. */
461 extern struct thread_info *find_thread_ptid (inferior *inf, ptid_t ptid);
462
463 /* Find thread by GDB global thread ID. */
464 struct thread_info *find_thread_global_id (int global_id);
465
466 /* Find thread by thread library specific handle in inferior INF. */
467 struct thread_info *find_thread_by_handle (struct value *thread_handle,
468 struct inferior *inf);
469
470 /* Finds the first thread of the specified inferior. */
471 extern struct thread_info *first_thread_of_inferior (inferior *inf);
472
473 /* Returns any thread of inferior INF, giving preference to the
474 current thread. */
475 extern struct thread_info *any_thread_of_inferior (inferior *inf);
476
477 /* Returns any non-exited thread of inferior INF, giving preference to
478 the current thread, and to not executing threads. */
479 extern struct thread_info *any_live_thread_of_inferior (inferior *inf);
480
481 /* Change the ptid of thread OLD_PTID to NEW_PTID. */
482 void thread_change_ptid (ptid_t old_ptid, ptid_t new_ptid);
483
484 /* Iterator function to call a user-provided callback function
485 once for each known thread. */
486 typedef int (*thread_callback_func) (struct thread_info *, void *);
487 extern struct thread_info *iterate_over_threads (thread_callback_func, void *);
488
489 /* Pull in the internals of the inferiors/threads ranges and
490 iterators. Must be done after struct thread_info is defined. */
491 #include "thread-iter.h"
492
493 /* Return a range that can be used to walk over all threads of all
494 inferiors, with range-for. Used like this:
495
496 for (thread_info *thr : all_threads ())
497 { .... }
498 */
499 inline all_threads_range
500 all_threads ()
501 {
502 return {};
503 }
504
505 /* Likewise, but accept a filter PTID. */
506
507 inline all_matching_threads_range
508 all_threads (ptid_t filter_ptid)
509 {
510 return all_matching_threads_range (filter_ptid);
511 }
512
513 /* Return a range that can be used to walk over all non-exited threads
514 of all inferiors, with range-for. FILTER_PTID can be used to
515 filter out thread that don't match. */
516
517 inline all_non_exited_threads_range
518 all_non_exited_threads (ptid_t filter_ptid = minus_one_ptid)
519 {
520 return all_non_exited_threads_range (filter_ptid);
521 }
522
523 /* Return a range that can be used to walk over all threads of all
524 inferiors, with range-for, safely. I.e., it is safe to delete the
525 currently-iterated thread. When combined with range-for, this
526 allow convenient patterns like this:
527
528 for (thread_info *t : all_threads_safe ())
529 if (some_condition ())
530 delete f;
531 */
532
533 inline all_threads_safe_range
534 all_threads_safe ()
535 {
536 return all_threads_safe_range ();
537 }
538
539 extern int thread_count (void);
540
541 /* Return true if we have any thread in any inferior. */
542 extern bool any_thread_p ();
543
544 /* Switch context to thread THR. Also sets the STOP_PC global. */
545 extern void switch_to_thread (struct thread_info *thr);
546
547 /* Switch context to no thread selected. */
548 extern void switch_to_no_thread ();
549
550 /* Switch from one thread to another. Does not read registers. */
551 extern void switch_to_thread_no_regs (struct thread_info *thread);
552
553 /* Marks or clears thread(s) PTID as resumed. If PTID is
554 MINUS_ONE_PTID, applies to all threads. If ptid_is_pid(PTID) is
555 true, applies to all threads of the process pointed at by PTID. */
556 extern void set_resumed (ptid_t ptid, int resumed);
557
558 /* Marks thread PTID is running, or stopped.
559 If PTID is minus_one_ptid, marks all threads. */
560 extern void set_running (ptid_t ptid, int running);
561
562 /* Marks or clears thread(s) PTID as having been requested to stop.
563 If PTID is MINUS_ONE_PTID, applies to all threads. If
564 ptid_is_pid(PTID) is true, applies to all threads of the process
565 pointed at by PTID. If STOP, then the THREAD_STOP_REQUESTED
566 observer is called with PTID as argument. */
567 extern void set_stop_requested (ptid_t ptid, int stop);
568
569 /* NOTE: Since the thread state is not a boolean, most times, you do
570 not want to check it with negation. If you really want to check if
571 the thread is stopped,
572
573 use (good):
574
575 if (is_stopped (ptid))
576
577 instead of (bad):
578
579 if (!is_running (ptid))
580
581 The latter also returns true on exited threads, most likelly not
582 what you want. */
583
584 /* Reports if in the frontend's perpective, thread PTID is running. */
585 extern int is_running (ptid_t ptid);
586
587 /* Is this thread listed, but known to have exited? We keep it listed
588 (but not visible) until it's safe to delete. */
589 extern int is_exited (ptid_t ptid);
590
591 /* In the frontend's perpective, is this thread stopped? */
592 extern int is_stopped (ptid_t ptid);
593
594 /* Marks thread PTID as executing, or not. If PTID is minus_one_ptid,
595 marks all threads.
596
597 Note that this is different from the running state. See the
598 description of state and executing fields of struct
599 thread_info. */
600 extern void set_executing (ptid_t ptid, int executing);
601
602 /* True if any (known or unknown) thread is or may be executing. */
603 extern int threads_are_executing (void);
604
605 /* Merge the executing property of thread PTID over to its thread
606 state property (frontend running/stopped view).
607
608 "not executing" -> "stopped"
609 "executing" -> "running"
610 "exited" -> "exited"
611
612 If PTID is minus_one_ptid, go over all threads.
613
614 Notifications are only emitted if the thread state did change. */
615 extern void finish_thread_state (ptid_t ptid);
616
617 /* Calls finish_thread_state on scope exit, unless release() is called
618 to disengage. */
619 class scoped_finish_thread_state
620 {
621 public:
622 explicit scoped_finish_thread_state (ptid_t ptid)
623 : m_ptid (ptid)
624 {}
625
626 ~scoped_finish_thread_state ()
627 {
628 if (!m_released)
629 finish_thread_state (m_ptid);
630 }
631
632 /* Disengage. */
633 void release ()
634 {
635 m_released = true;
636 }
637
638 DISABLE_COPY_AND_ASSIGN (scoped_finish_thread_state);
639
640 private:
641 bool m_released = false;
642 ptid_t m_ptid;
643 };
644
645 /* Commands with a prefix of `thread'. */
646 extern struct cmd_list_element *thread_cmd_list;
647
648 extern void thread_command (const char *tidstr, int from_tty);
649
650 /* Print notices on thread events (attach, detach, etc.), set with
651 `set print thread-events'. */
652 extern int print_thread_events;
653
654 /* Prints the list of threads and their details on UIOUT. If
655 REQUESTED_THREADS, a list of GDB ids/ranges, is not NULL, only
656 print threads whose ID is included in the list. If PID is not -1,
657 only print threads from the process PID. Otherwise, threads from
658 all attached PIDs are printed. If both REQUESTED_THREADS is not
659 NULL and PID is not -1, then the thread is printed if it belongs to
660 the specified process. Otherwise, an error is raised. */
661 extern void print_thread_info (struct ui_out *uiout, char *requested_threads,
662 int pid);
663
664 /* Save/restore current inferior/thread/frame. */
665
666 class scoped_restore_current_thread
667 {
668 public:
669 scoped_restore_current_thread ();
670 ~scoped_restore_current_thread ();
671
672 DISABLE_COPY_AND_ASSIGN (scoped_restore_current_thread);
673
674 private:
675 /* Use the "class" keyword here, because of a clash with a "thread_info"
676 function in the Darwin API. */
677 class thread_info *m_thread;
678 inferior *m_inf;
679 frame_id m_selected_frame_id;
680 int m_selected_frame_level;
681 bool m_was_stopped;
682 };
683
684 /* Returns a pointer into the thread_info corresponding to
685 INFERIOR_PTID. INFERIOR_PTID *must* be in the thread list. */
686 extern struct thread_info* inferior_thread (void);
687
688 extern void update_thread_list (void);
689
690 /* Delete any thread the target says is no longer alive. */
691
692 extern void prune_threads (void);
693
694 /* Delete threads marked THREAD_EXITED. Unlike prune_threads, this
695 does not consult the target about whether the thread is alive right
696 now. */
697 extern void delete_exited_threads (void);
698
699 /* Return true if PC is in the stepping range of THREAD. */
700
701 int pc_in_thread_step_range (CORE_ADDR pc, struct thread_info *thread);
702
703 /* Enable storing stack temporaries for thread THR and disable and
704 clear the stack temporaries on destruction. Holds a strong
705 reference to THR. */
706
707 class enable_thread_stack_temporaries
708 {
709 public:
710
711 explicit enable_thread_stack_temporaries (struct thread_info *thr)
712 : m_thr (thr)
713 {
714 gdb_assert (m_thr != NULL);
715
716 m_thr->incref ();
717
718 m_thr->stack_temporaries_enabled = true;
719 m_thr->stack_temporaries.clear ();
720 }
721
722 ~enable_thread_stack_temporaries ()
723 {
724 m_thr->stack_temporaries_enabled = false;
725 m_thr->stack_temporaries.clear ();
726
727 m_thr->decref ();
728 }
729
730 DISABLE_COPY_AND_ASSIGN (enable_thread_stack_temporaries);
731
732 private:
733
734 struct thread_info *m_thr;
735 };
736
737 extern bool thread_stack_temporaries_enabled_p (struct thread_info *tp);
738
739 extern void push_thread_stack_temporary (struct thread_info *tp, struct value *v);
740
741 extern value *get_last_thread_stack_temporary (struct thread_info *tp);
742
743 extern bool value_in_thread_stack_temporaries (struct value *,
744 struct thread_info *thr);
745
746 /* Add TP to the end of its inferior's pending step-over chain. */
747
748 extern void thread_step_over_chain_enqueue (struct thread_info *tp);
749
750 /* Remove TP from its inferior's pending step-over chain. */
751
752 extern void thread_step_over_chain_remove (struct thread_info *tp);
753
754 /* Return the next thread in the step-over chain starting at TP. NULL
755 if TP is the last entry in the chain. */
756
757 extern struct thread_info *thread_step_over_chain_next (struct thread_info *tp);
758
759 /* Return true if TP is in the step-over chain. */
760
761 extern int thread_is_in_step_over_chain (struct thread_info *tp);
762
763 /* Cancel any ongoing execution command. */
764
765 extern void thread_cancel_execution_command (struct thread_info *thr);
766
767 /* Check whether it makes sense to access a register of the current
768 thread at this point. If not, throw an error (e.g., the thread is
769 executing). */
770 extern void validate_registers_access (void);
771
772 /* Check whether it makes sense to access a register of THREAD at this point.
773 Returns true if registers may be accessed; false otherwise. */
774 extern bool can_access_registers_thread (struct thread_info *thread);
775
776 /* Returns whether to show which thread hit the breakpoint, received a
777 signal, etc. and ended up causing a user-visible stop. This is
778 true iff we ever detected multiple threads. */
779 extern int show_thread_that_caused_stop (void);
780
781 /* Print the message for a thread or/and frame selected. */
782 extern void print_selected_thread_frame (struct ui_out *uiout,
783 user_selected_what selection);
784
785 /* Helper for the CLI's "thread" command and for MI's -thread-select.
786 Selects thread THR. TIDSTR is the original string the thread ID
787 was parsed from. This is used in the error message if THR is not
788 alive anymore. */
789 extern void thread_select (const char *tidstr, class thread_info *thr);
790
791 #endif /* GDBTHREAD_H */
This page took 0.069938 seconds and 4 git commands to generate.