gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 /* Target ops definitions for a Linux target. */
133
134 class linux_process_target : public process_stratum_target
135 {
136 public:
137
138 int create_inferior (const char *program,
139 const std::vector<char *> &program_args) override;
140
141 void post_create_inferior () override;
142
143 int attach (unsigned long pid) override;
144
145 int kill (process_info *proc) override;
146
147 int detach (process_info *proc) override;
148
149 void mourn (process_info *proc) override;
150
151 void join (int pid) override;
152
153 bool thread_alive (ptid_t pid) override;
154
155 void resume (thread_resume *resume_info, size_t n) override;
156
157 ptid_t wait (ptid_t ptid, target_waitstatus *status,
158 int options) override;
159
160 void fetch_registers (regcache *regcache, int regno) override;
161
162 void store_registers (regcache *regcache, int regno) override;
163
164 int prepare_to_access_memory () override;
165
166 void done_accessing_memory () override;
167
168 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
169 int len) override;
170
171 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
172 int len) override;
173
174 void look_up_symbols () override;
175
176 void request_interrupt () override;
177
178 bool supports_read_auxv () override;
179
180 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
181 unsigned int len) override;
182
183 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
184 int size, raw_breakpoint *bp) override;
185
186 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
187 int size, raw_breakpoint *bp) override;
188
189 bool stopped_by_sw_breakpoint () override;
190
191 bool supports_stopped_by_sw_breakpoint () override;
192
193 bool stopped_by_hw_breakpoint () override;
194
195 bool supports_stopped_by_hw_breakpoint () override;
196
197 bool supports_hardware_single_step () override;
198
199 bool stopped_by_watchpoint () override;
200
201 CORE_ADDR stopped_data_address () override;
202
203 bool supports_read_offsets () override;
204
205 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
206
207 bool supports_get_tls_address () override;
208
209 int get_tls_address (thread_info *thread, CORE_ADDR offset,
210 CORE_ADDR load_module, CORE_ADDR *address) override;
211
212 bool supports_qxfer_osdata () override;
213
214 int qxfer_osdata (const char *annex, unsigned char *readbuf,
215 unsigned const char *writebuf,
216 CORE_ADDR offset, int len) override;
217
218 bool supports_qxfer_siginfo () override;
219
220 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
221 unsigned const char *writebuf,
222 CORE_ADDR offset, int len) override;
223
224 bool supports_non_stop () override;
225
226 bool async (bool enable) override;
227
228 int start_non_stop (bool enable) override;
229
230 bool supports_multi_process () override;
231
232 bool supports_fork_events () override;
233
234 bool supports_vfork_events () override;
235
236 bool supports_exec_events () override;
237
238 void handle_new_gdb_connection () override;
239
240 int handle_monitor_command (char *mon) override;
241
242 int core_of_thread (ptid_t ptid) override;
243
244 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
245 bool supports_read_loadmap () override;
246
247 int read_loadmap (const char *annex, CORE_ADDR offset,
248 unsigned char *myaddr, unsigned int len) override;
249 #endif
250
251 CORE_ADDR read_pc (regcache *regcache) override;
252
253 void write_pc (regcache *regcache, CORE_ADDR pc) override;
254
255 bool supports_thread_stopped () override;
256
257 bool thread_stopped (thread_info *thread) override;
258
259 void pause_all (bool freeze) override;
260
261 void unpause_all (bool unfreeze) override;
262
263 void stabilize_threads () override;
264
265 bool supports_disable_randomization () override;
266
267 bool supports_qxfer_libraries_svr4 () override;
268
269 int qxfer_libraries_svr4 (const char *annex,
270 unsigned char *readbuf,
271 unsigned const char *writebuf,
272 CORE_ADDR offset, int len) override;
273
274 bool supports_agent () override;
275
276 #ifdef HAVE_LINUX_BTRACE
277 btrace_target_info *enable_btrace (ptid_t ptid,
278 const btrace_config *conf) override;
279
280 int disable_btrace (btrace_target_info *tinfo) override;
281
282 int read_btrace (btrace_target_info *tinfo, buffer *buf,
283 enum btrace_read_type type) override;
284
285 int read_btrace_conf (const btrace_target_info *tinfo,
286 buffer *buf) override;
287 #endif
288
289 bool supports_range_stepping () override;
290
291 bool supports_pid_to_exec_file () override;
292
293 char *pid_to_exec_file (int pid) override;
294
295 bool supports_multifs () override;
296
297 int multifs_open (int pid, const char *filename, int flags,
298 mode_t mode) override;
299
300 int multifs_unlink (int pid, const char *filename) override;
301
302 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
303 size_t bufsiz) override;
304
305 const char *thread_name (ptid_t thread) override;
306
307 #if USE_THREAD_DB
308 bool thread_handle (ptid_t ptid, gdb_byte **handle,
309 int *handle_len) override;
310 #endif
311
312 bool supports_catch_syscall () override;
313
314 /* Return the information to access registers. This has public
315 visibility because proc-service uses it. */
316 virtual const regs_info *get_regs_info () = 0;
317
318 private:
319
320 /* Handle a GNU/Linux extended wait response. If we see a clone,
321 fork, or vfork event, we need to add the new LWP to our list
322 (and return 0 so as not to report the trap to higher layers).
323 If we see an exec event, we will modify ORIG_EVENT_LWP to point
324 to a new LWP representing the new program. */
325 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
326
327 /* Do low-level handling of the event, and check if we should go on
328 and pass it to caller code. Return the affected lwp if we are, or
329 NULL otherwise. */
330 lwp_info *filter_event (int lwpid, int wstat);
331
332 /* Wait for an event from child(ren) WAIT_PTID, and return any that
333 match FILTER_PTID (leaving others pending). The PTIDs can be:
334 minus_one_ptid, to specify any child; a pid PTID, specifying all
335 lwps of a thread group; or a PTID representing a single lwp. Store
336 the stop status through the status pointer WSTAT. OPTIONS is
337 passed to the waitpid call. Return 0 if no event was found and
338 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
339 was found. Return the PID of the stopped child otherwise. */
340 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
341 int *wstatp, int options);
342
343 /* Wait for an event from child(ren) PTID. PTIDs can be:
344 minus_one_ptid, to specify any child; a pid PTID, specifying all
345 lwps of a thread group; or a PTID representing a single lwp. Store
346 the stop status through the status pointer WSTAT. OPTIONS is
347 passed to the waitpid call. Return 0 if no event was found and
348 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
349 was found. Return the PID of the stopped child otherwise. */
350 int wait_for_event (ptid_t ptid, int *wstatp, int options);
351
352 /* Wait for all children to stop for the SIGSTOPs we just queued. */
353 void wait_for_sigstop ();
354
355 /* Wait for process, returns status. */
356 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
357 int target_options);
358
359 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
360 If SUSPEND, then also increase the suspend count of every LWP,
361 except EXCEPT. */
362 void stop_all_lwps (int suspend, lwp_info *except);
363
364 /* Stopped LWPs that the client wanted to be running, that don't have
365 pending statuses, are set to run again, except for EXCEPT, if not
366 NULL. This undoes a stop_all_lwps call. */
367 void unstop_all_lwps (int unsuspend, lwp_info *except);
368
369 /* Start a step-over operation on LWP. When LWP stopped at a
370 breakpoint, to make progress, we need to remove the breakpoint out
371 of the way. If we let other threads run while we do that, they may
372 pass by the breakpoint location and miss hitting it. To avoid
373 that, a step-over momentarily stops all threads while LWP is
374 single-stepped by either hardware or software while the breakpoint
375 is temporarily uninserted from the inferior. When the single-step
376 finishes, we reinsert the breakpoint, and let all threads that are
377 supposed to be running, run again. */
378 void start_step_over (lwp_info *lwp);
379
380 /* If there's a step over in progress, wait until all threads stop
381 (that is, until the stepping thread finishes its step), and
382 unsuspend all lwps. The stepping thread ends with its status
383 pending, which is processed later when we get back to processing
384 events. */
385 void complete_ongoing_step_over ();
386
387 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
388 start_step_over, if still there, and delete any single-step
389 breakpoints we've set, on non hardware single-step targets.
390 Return true if step over finished. */
391 bool finish_step_over (lwp_info *lwp);
392
393 /* When we finish a step-over, set threads running again. If there's
394 another thread that may need a step-over, now's the time to start
395 it. Eventually, we'll move all threads past their breakpoints. */
396 void proceed_all_lwps ();
397
398 /* The reason we resume in the caller, is because we want to be able
399 to pass lwp->status_pending as WSTAT, and we need to clear
400 status_pending_p before resuming, otherwise, resume_one_lwp
401 refuses to resume. */
402 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
403
404 /* Move THREAD out of the jump pad. */
405 void move_out_of_jump_pad (thread_info *thread);
406
407 /* Call low_arch_setup on THREAD. */
408 void arch_setup_thread (thread_info *thread);
409
410 #ifdef HAVE_LINUX_USRREGS
411 /* Fetch one register. */
412 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
413 int regno);
414
415 /* Store one register. */
416 void store_register (const usrregs_info *usrregs, regcache *regcache,
417 int regno);
418 #endif
419
420 /* Fetch all registers, or just one, from the child process.
421 If REGNO is -1, do this for all registers, skipping any that are
422 assumed to have been retrieved by regsets_fetch_inferior_registers,
423 unless ALL is non-zero.
424 Otherwise, REGNO specifies which register (so we can save time). */
425 void usr_fetch_inferior_registers (const regs_info *regs_info,
426 regcache *regcache, int regno, int all);
427
428 /* Store our register values back into the inferior.
429 If REGNO is -1, do this for all registers, skipping any that are
430 assumed to have been saved by regsets_store_inferior_registers,
431 unless ALL is non-zero.
432 Otherwise, REGNO specifies which register (so we can save time). */
433 void usr_store_inferior_registers (const regs_info *regs_info,
434 regcache *regcache, int regno, int all);
435
436 /* Return the PC as read from the regcache of LWP, without any
437 adjustment. */
438 CORE_ADDR get_pc (lwp_info *lwp);
439
440 /* Called when the LWP stopped for a signal/trap. If it stopped for a
441 trap check what caused it (breakpoint, watchpoint, trace, etc.),
442 and save the result in the LWP's stop_reason field. If it stopped
443 for a breakpoint, decrement the PC if necessary on the lwp's
444 architecture. Returns true if we now have the LWP's stop PC. */
445 bool save_stop_reason (lwp_info *lwp);
446
447 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
448 SIGNAL is nonzero, give it that signal. */
449 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
450 siginfo_t *info);
451
452 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
453 disappears while we try to resume it. */
454 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
455
456 /* This function is called once per thread. We check the thread's
457 last resume request, which will tell us whether to resume, step, or
458 leave the thread stopped. Any signal the client requested to be
459 delivered has already been enqueued at this point.
460
461 If any thread that GDB wants running is stopped at an internal
462 breakpoint that needs stepping over, we start a step-over operation
463 on that particular thread, and leave all others stopped. */
464 void proceed_one_lwp (thread_info *thread, lwp_info *except);
465
466 /* This function is called once per thread. We check the thread's
467 resume request, which will tell us whether to resume, step, or
468 leave the thread stopped; and what signal, if any, it should be
469 sent.
470
471 For threads which we aren't explicitly told otherwise, we preserve
472 the stepping flag; this is used for stepping over gdbserver-placed
473 breakpoints.
474
475 If pending_flags was set in any thread, we queue any needed
476 signals, since we won't actually resume. We already have a pending
477 event to report, so we don't need to preserve any step requests;
478 they should be re-issued if necessary. */
479 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
480
481 /* Return true if this lwp has an interesting status pending. */
482 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
483
484 /* Resume LWPs that are currently stopped without any pending status
485 to report, but are resumed from the core's perspective. */
486 void resume_stopped_resumed_lwps (thread_info *thread);
487
488 /* Unsuspend THREAD, except EXCEPT, and proceed. */
489 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
490
491 /* Return true if this lwp still has an interesting status pending.
492 If not (e.g., it had stopped for a breakpoint that is gone), return
493 false. */
494 bool thread_still_has_status_pending (thread_info *thread);
495
496 /* Return true if this lwp is to-be-resumed and has an interesting
497 status pending. */
498 bool resume_status_pending (thread_info *thread);
499
500 /* Return true if this lwp that GDB wants running is stopped at an
501 internal breakpoint that we need to step over. It assumes that
502 any required STOP_PC adjustment has already been propagated to
503 the inferior's regcache. */
504 bool thread_needs_step_over (thread_info *thread);
505
506 /* Single step via hardware or software single step.
507 Return 1 if hardware single stepping, 0 if software single stepping
508 or can't single step. */
509 int single_step (lwp_info* lwp);
510
511 /* Return true if THREAD is doing hardware single step. */
512 bool maybe_hw_step (thread_info *thread);
513
514 /* Install breakpoints for software single stepping. */
515 void install_software_single_step_breakpoints (lwp_info *lwp);
516
517 /* Fetch the possibly triggered data watchpoint info and store it in
518 CHILD.
519
520 On some archs, like x86, that use debug registers to set
521 watchpoints, it's possible that the way to know which watched
522 address trapped, is to check the register that is used to select
523 which address to watch. Problem is, between setting the watchpoint
524 and reading back which data address trapped, the user may change
525 the set of watchpoints, and, as a consequence, GDB changes the
526 debug registers in the inferior. To avoid reading back a stale
527 stopped-data-address when that happens, we cache in LP the fact
528 that a watchpoint trapped, and the corresponding data address, as
529 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
530 registers meanwhile, we have the cached data we can rely on. */
531 bool check_stopped_by_watchpoint (lwp_info *child);
532
533 /* Convert a native/host siginfo object, into/from the siginfo in the
534 layout of the inferiors' architecture. */
535 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo,
536 int direction);
537
538 /* Add a process to the common process list, and set its private
539 data. */
540 process_info *add_linux_process (int pid, int attached);
541
542 /* Add a new thread. */
543 lwp_info *add_lwp (ptid_t ptid);
544
545 /* Delete a thread. */
546 void delete_lwp (lwp_info *lwp);
547
548 public: /* Make this public because it's used from outside. */
549 /* Attach to an inferior process. Returns 0 on success, ERRNO on
550 error. */
551 int attach_lwp (ptid_t ptid);
552
553 private: /* Back to private. */
554 /* Detach from LWP. */
555 void detach_one_lwp (lwp_info *lwp);
556
557 /* Detect zombie thread group leaders, and "exit" them. We can't
558 reap their exits until all other threads in the group have
559 exited. */
560 void check_zombie_leaders ();
561
562 /* Convenience function that is called when the kernel reports an exit
563 event. This decides whether to report the event to GDB as a
564 process exit event, a thread exit event, or to suppress the
565 event. */
566 ptid_t filter_exit_event (lwp_info *event_child,
567 target_waitstatus *ourstatus);
568
569 /* Returns true if THREAD is stopped in a jump pad, and we can't
570 move it out, because we need to report the stop event to GDB. For
571 example, if the user puts a breakpoint in the jump pad, it's
572 because she wants to debug it. */
573 bool stuck_in_jump_pad (thread_info *thread);
574
575 /* Convenience wrapper. Returns information about LWP's fast tracepoint
576 collection status. */
577 fast_tpoint_collect_result linux_fast_tracepoint_collecting
578 (lwp_info *lwp, fast_tpoint_collect_status *status);
579
580 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
581 Fill *SYSNO with the syscall nr trapped. */
582 void get_syscall_trapinfo (lwp_info *lwp, int *sysno);
583
584 /* Returns true if GDB is interested in the event_child syscall.
585 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
586 bool gdb_catch_this_syscall (lwp_info *event_child);
587
588 protected:
589 /* The architecture-specific "low" methods are listed below. */
590
591 /* Architecture-specific setup for the current thread. */
592 virtual void low_arch_setup () = 0;
593
594 /* Return false if we can fetch/store the register, true if we cannot
595 fetch/store the register. */
596 virtual bool low_cannot_fetch_register (int regno) = 0;
597
598 virtual bool low_cannot_store_register (int regno) = 0;
599
600 /* Hook to fetch a register in some non-standard way. Used for
601 example by backends that have read-only registers with hardcoded
602 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
603 REGNO was supplied, false if not, and we should fallback to the
604 standard ptrace methods. */
605 virtual bool low_fetch_register (regcache *regcache, int regno);
606
607 /* Return true if breakpoints are supported. Such targets must
608 implement the GET_PC and SET_PC methods. */
609 virtual bool low_supports_breakpoints ();
610
611 virtual CORE_ADDR low_get_pc (regcache *regcache);
612
613 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
614
615 /* Find the next possible PCs after the current instruction executes.
616 Targets that override this method should also override
617 'supports_software_single_step' to return true. */
618 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
619
620 /* Return true if there is a breakpoint at PC. */
621 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
622
623 /* Breakpoint and watchpoint related functions. See target.h for
624 comments. */
625 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
626 int size, raw_breakpoint *bp);
627
628 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
629 int size, raw_breakpoint *bp);
630
631 virtual bool low_stopped_by_watchpoint ();
632
633 virtual CORE_ADDR low_stopped_data_address ();
634
635 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
636 for registers smaller than an xfer unit). */
637 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
638 char *buf);
639
640 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
641 const char *buf);
642
643 /* Hook to convert from target format to ptrace format and back.
644 Returns true if any conversion was done; false otherwise.
645 If DIRECTION is 1, then copy from INF to NATIVE.
646 If DIRECTION is 0, copy from NATIVE to INF. */
647 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
648 int direction);
649
650 /* Hook to call when a new process is created or attached to.
651 If extra per-process architecture-specific data is needed,
652 allocate it here. */
653 virtual arch_process_info *low_new_process ();
654
655 /* Hook to call when a process is being deleted. If extra per-process
656 architecture-specific data is needed, delete it here. */
657 virtual void low_delete_process (arch_process_info *info);
658
659 /* Hook to call when a new thread is detected.
660 If extra per-thread architecture-specific data is needed,
661 allocate it here. */
662 virtual void low_new_thread (lwp_info *);
663
664 /* Hook to call when a thread is being deleted. If extra per-thread
665 architecture-specific data is needed, delete it here. */
666 virtual void low_delete_thread (arch_lwp_info *);
667
668 /* Hook to call, if any, when a new fork is attached. */
669 virtual void low_new_fork (process_info *parent, process_info *child);
670
671 /* Hook to call prior to resuming a thread. */
672 virtual void low_prepare_to_resume (lwp_info *lwp);
673
674 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
675 success, -1 on failure. */
676 virtual int low_get_thread_area (int lwpid, CORE_ADDR *addrp);
677
678 /* Returns true if the low target supports range stepping. */
679 virtual bool low_supports_range_stepping ();
680
681 /* Return true if the target supports catch syscall. Such targets
682 override the low_get_syscall_trapinfo method below. */
683 virtual bool low_supports_catch_syscall ();
684
685 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
686 inferior is stopped due to SYSCALL_SIGTRAP. */
687 virtual void low_get_syscall_trapinfo (regcache *regcache, int *sysno);
688
689 /* How many bytes the PC should be decremented after a break. */
690 virtual int low_decr_pc_after_break ();
691 };
692
693 extern linux_process_target *the_linux_target;
694
695 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
696 #define get_lwp_thread(lwp) ((lwp)->thread)
697
698 /* This struct is recorded in the target_data field of struct thread_info.
699
700 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
701 GDB protocol representation of the thread ID. Threads also have
702 a "process ID" (poorly named) which is (presently) the same as the
703 LWP ID.
704
705 There is also ``all_processes'' is keyed by the "overall process ID",
706 which GNU/Linux calls tgid, "thread group ID". */
707
708 struct lwp_info
709 {
710 /* Backlink to the parent object. */
711 struct thread_info *thread;
712
713 /* If this flag is set, the next SIGSTOP will be ignored (the
714 process will be immediately resumed). This means that either we
715 sent the SIGSTOP to it ourselves and got some other pending event
716 (so the SIGSTOP is still pending), or that we stopped the
717 inferior implicitly via PTRACE_ATTACH and have not waited for it
718 yet. */
719 int stop_expected;
720
721 /* When this is true, we shall not try to resume this thread, even
722 if last_resume_kind isn't resume_stop. */
723 int suspended;
724
725 /* If this flag is set, the lwp is known to be stopped right now (stop
726 event already received in a wait()). */
727 int stopped;
728
729 /* Signal whether we are in a SYSCALL_ENTRY or
730 in a SYSCALL_RETURN event.
731 Values:
732 - TARGET_WAITKIND_SYSCALL_ENTRY
733 - TARGET_WAITKIND_SYSCALL_RETURN */
734 enum target_waitkind syscall_state;
735
736 /* When stopped is set, the last wait status recorded for this lwp. */
737 int last_status;
738
739 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
740 this LWP's last event, to pass to GDB without any further
741 processing. This is used to store extended ptrace event
742 information or exit status until it can be reported to GDB. */
743 struct target_waitstatus waitstatus;
744
745 /* A pointer to the fork child/parent relative. Valid only while
746 the parent fork event is not reported to higher layers. Used to
747 avoid wildcard vCont actions resuming a fork child before GDB is
748 notified about the parent's fork event. */
749 struct lwp_info *fork_relative;
750
751 /* When stopped is set, this is where the lwp last stopped, with
752 decr_pc_after_break already accounted for. If the LWP is
753 running, this is the address at which the lwp was resumed. */
754 CORE_ADDR stop_pc;
755
756 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
757 been reported. */
758 int status_pending_p;
759 int status_pending;
760
761 /* The reason the LWP last stopped, if we need to track it
762 (breakpoint, watchpoint, etc.) */
763 enum target_stop_reason stop_reason;
764
765 /* On architectures where it is possible to know the data address of
766 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
767 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
768 is true. */
769 CORE_ADDR stopped_data_address;
770
771 /* If this is non-zero, it is a breakpoint to be reinserted at our next
772 stop (SIGTRAP stops only). */
773 CORE_ADDR bp_reinsert;
774
775 /* If this flag is set, the last continue operation at the ptrace
776 level on this process was a single-step. */
777 int stepping;
778
779 /* Range to single step within. This is a copy of the step range
780 passed along the last resume request. See 'struct
781 thread_resume'. */
782 CORE_ADDR step_range_start; /* Inclusive */
783 CORE_ADDR step_range_end; /* Exclusive */
784
785 /* If this flag is set, we need to set the event request flags the
786 next time we see this LWP stop. */
787 int must_set_ptrace_flags;
788
789 /* If this is non-zero, it points to a chain of signals which need to
790 be delivered to this process. */
791 struct pending_signals *pending_signals;
792
793 /* A link used when resuming. It is initialized from the resume request,
794 and then processed and cleared in linux_resume_one_lwp. */
795 struct thread_resume *resume;
796
797 /* Information bout this lwp's fast tracepoint collection status (is it
798 currently stopped in the jump pad, and if so, before or at/after the
799 relocated instruction). Normally, we won't care about this, but we will
800 if a signal arrives to this lwp while it is collecting. */
801 fast_tpoint_collect_result collecting_fast_tracepoint;
802
803 /* If this is non-zero, it points to a chain of signals which need
804 to be reported to GDB. These were deferred because the thread
805 was doing a fast tracepoint collect when they arrived. */
806 struct pending_signals *pending_signals_to_report;
807
808 /* When collecting_fast_tracepoint is first found to be 1, we insert
809 a exit-jump-pad-quickly breakpoint. This is it. */
810 struct breakpoint *exit_jump_pad_bkpt;
811
812 #ifdef USE_THREAD_DB
813 int thread_known;
814 /* The thread handle, used for e.g. TLS access. Only valid if
815 THREAD_KNOWN is set. */
816 td_thrhandle_t th;
817
818 /* The pthread_t handle. */
819 thread_t thread_handle;
820 #endif
821
822 /* Arch-specific additions. */
823 struct arch_lwp_info *arch_private;
824 };
825
826 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
827
828 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
829 errno). */
830 int linux_attach_lwp (ptid_t ptid);
831
832 struct lwp_info *find_lwp_pid (ptid_t ptid);
833 /* For linux_stop_lwp see nat/linux-nat.h. */
834
835 #ifdef HAVE_LINUX_REGSETS
836 void initialize_regsets_info (struct regsets_info *regsets_info);
837 #endif
838
839 void initialize_low_arch (void);
840
841 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
842 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
843
844 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
845 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
846
847 /* From thread-db.c */
848 int thread_db_init (void);
849 void thread_db_detach (struct process_info *);
850 void thread_db_mourn (struct process_info *);
851 int thread_db_handle_monitor_command (char *);
852 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
853 CORE_ADDR load_module, CORE_ADDR *address);
854 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
855
856 /* Called from linux-low.c when a clone event is detected. Upon entry,
857 both the clone and the parent should be stopped. This function does
858 whatever is required have the clone under thread_db's control. */
859
860 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
861
862 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
863
864 extern int have_ptrace_getregset;
865
866 /* Search for the value with type MATCH in the auxv vector with
867 entries of length WORDSIZE bytes. If found, store the value in
868 *VALP and return 1. If not found or if there is an error, return
869 0. */
870
871 int linux_get_auxv (int wordsize, CORE_ADDR match,
872 CORE_ADDR *valp);
873
874 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
875 WORDSIZE. If no entry was found, return zero. */
876
877 CORE_ADDR linux_get_hwcap (int wordsize);
878
879 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
880 WORDSIZE. If no entry was found, return zero. */
881
882 CORE_ADDR linux_get_hwcap2 (int wordsize);
883
884 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.046866 seconds and 4 git commands to generate.