gdbserver/linux-low: turn some more static functions into private methods
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 CORE_ADDR (*get_pc) (struct regcache *regcache);
135 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
136
137 /* See target.h for details. */
138 int (*breakpoint_kind_from_pc) (CORE_ADDR *pcptr);
139
140 /* See target.h for details. */
141 const gdb_byte *(*sw_breakpoint_from_kind) (int kind, int *size);
142
143 /* Find the next possible PCs after the current instruction executes. */
144 std::vector<CORE_ADDR> (*get_next_pcs) (struct regcache *regcache);
145
146 int decr_pc_after_break;
147 int (*breakpoint_at) (CORE_ADDR pc);
148
149 /* Breakpoint and watchpoint related functions. See target.h for
150 comments. */
151 int (*supports_z_point_type) (char z_type);
152 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
153 int size, struct raw_breakpoint *bp);
154 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
155 int size, struct raw_breakpoint *bp);
156
157 int (*stopped_by_watchpoint) (void);
158 CORE_ADDR (*stopped_data_address) (void);
159
160 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
161 for registers smaller than an xfer unit). */
162 void (*collect_ptrace_register) (struct regcache *regcache,
163 int regno, char *buf);
164 void (*supply_ptrace_register) (struct regcache *regcache,
165 int regno, const char *buf);
166
167 /* Hook to convert from target format to ptrace format and back.
168 Returns true if any conversion was done; false otherwise.
169 If DIRECTION is 1, then copy from INF to NATIVE.
170 If DIRECTION is 0, copy from NATIVE to INF. */
171 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
172
173 /* Hook to call when a new process is created or attached to.
174 If extra per-process architecture-specific data is needed,
175 allocate it here. */
176 struct arch_process_info * (*new_process) (void);
177
178 /* Hook to call when a process is being deleted. If extra per-process
179 architecture-specific data is needed, delete it here. */
180 void (*delete_process) (struct arch_process_info *info);
181
182 /* Hook to call when a new thread is detected.
183 If extra per-thread architecture-specific data is needed,
184 allocate it here. */
185 void (*new_thread) (struct lwp_info *);
186
187 /* Hook to call when a thread is being deleted. If extra per-thread
188 architecture-specific data is needed, delete it here. */
189 void (*delete_thread) (struct arch_lwp_info *);
190
191 /* Hook to call, if any, when a new fork is attached. */
192 void (*new_fork) (struct process_info *parent, struct process_info *child);
193
194 /* Hook to call prior to resuming a thread. */
195 void (*prepare_to_resume) (struct lwp_info *);
196
197 /* Hook to support target specific qSupported. */
198 void (*process_qsupported) (char **, int count);
199
200 /* Returns true if the low target supports tracepoints. */
201 int (*supports_tracepoints) (void);
202
203 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
204 success, -1 on failure. */
205 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
206
207 /* Install a fast tracepoint jump pad. See target.h for
208 comments. */
209 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
210 CORE_ADDR collector,
211 CORE_ADDR lockaddr,
212 ULONGEST orig_size,
213 CORE_ADDR *jump_entry,
214 CORE_ADDR *trampoline,
215 ULONGEST *trampoline_size,
216 unsigned char *jjump_pad_insn,
217 ULONGEST *jjump_pad_insn_size,
218 CORE_ADDR *adjusted_insn_addr,
219 CORE_ADDR *adjusted_insn_addr_end,
220 char *err);
221
222 /* Return the bytecode operations vector for the current inferior.
223 Returns NULL if bytecode compilation is not supported. */
224 struct emit_ops *(*emit_ops) (void);
225
226 /* Return the minimum length of an instruction that can be safely overwritten
227 for use as a fast tracepoint. */
228 int (*get_min_fast_tracepoint_insn_len) (void);
229
230 /* Returns true if the low target supports range stepping. */
231 int (*supports_range_stepping) (void);
232
233 /* See target.h. */
234 int (*breakpoint_kind_from_current_state) (CORE_ADDR *pcptr);
235
236 /* See target.h. */
237 int (*supports_hardware_single_step) (void);
238
239 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
240 inferior is stopped due to SYSCALL_SIGTRAP. */
241 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
242
243 /* See target.h. */
244 int (*get_ipa_tdesc_idx) (void);
245 };
246
247 extern struct linux_target_ops the_low_target;
248
249 /* Target ops definitions for a Linux target. */
250
251 class linux_process_target : public process_stratum_target
252 {
253 public:
254
255 int create_inferior (const char *program,
256 const std::vector<char *> &program_args) override;
257
258 void post_create_inferior () override;
259
260 int attach (unsigned long pid) override;
261
262 int kill (process_info *proc) override;
263
264 int detach (process_info *proc) override;
265
266 void mourn (process_info *proc) override;
267
268 void join (int pid) override;
269
270 bool thread_alive (ptid_t pid) override;
271
272 void resume (thread_resume *resume_info, size_t n) override;
273
274 ptid_t wait (ptid_t ptid, target_waitstatus *status,
275 int options) override;
276
277 void fetch_registers (regcache *regcache, int regno) override;
278
279 void store_registers (regcache *regcache, int regno) override;
280
281 int prepare_to_access_memory () override;
282
283 void done_accessing_memory () override;
284
285 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
286 int len) override;
287
288 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
289 int len) override;
290
291 void look_up_symbols () override;
292
293 void request_interrupt () override;
294
295 bool supports_read_auxv () override;
296
297 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
298 unsigned int len) override;
299
300 bool supports_z_point_type (char z_type) override;
301
302 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
303 int size, raw_breakpoint *bp) override;
304
305 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
306 int size, raw_breakpoint *bp) override;
307
308 bool stopped_by_sw_breakpoint () override;
309
310 bool supports_stopped_by_sw_breakpoint () override;
311
312 bool stopped_by_hw_breakpoint () override;
313
314 bool supports_stopped_by_hw_breakpoint () override;
315
316 bool supports_hardware_single_step () override;
317
318 bool stopped_by_watchpoint () override;
319
320 CORE_ADDR stopped_data_address () override;
321
322 bool supports_read_offsets () override;
323
324 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
325
326 bool supports_get_tls_address () override;
327
328 int get_tls_address (thread_info *thread, CORE_ADDR offset,
329 CORE_ADDR load_module, CORE_ADDR *address) override;
330
331 bool supports_qxfer_osdata () override;
332
333 int qxfer_osdata (const char *annex, unsigned char *readbuf,
334 unsigned const char *writebuf,
335 CORE_ADDR offset, int len) override;
336
337 bool supports_qxfer_siginfo () override;
338
339 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
340 unsigned const char *writebuf,
341 CORE_ADDR offset, int len) override;
342
343 bool supports_non_stop () override;
344
345 bool async (bool enable) override;
346
347 int start_non_stop (bool enable) override;
348
349 bool supports_multi_process () override;
350
351 bool supports_fork_events () override;
352
353 bool supports_vfork_events () override;
354
355 bool supports_exec_events () override;
356
357 void handle_new_gdb_connection () override;
358
359 int handle_monitor_command (char *mon) override;
360
361 int core_of_thread (ptid_t ptid) override;
362
363 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
364 bool supports_read_loadmap () override;
365
366 int read_loadmap (const char *annex, CORE_ADDR offset,
367 unsigned char *myaddr, unsigned int len) override;
368 #endif
369
370 void process_qsupported (char **features, int count) override;
371
372 bool supports_tracepoints () override;
373
374 CORE_ADDR read_pc (regcache *regcache) override;
375
376 void write_pc (regcache *regcache, CORE_ADDR pc) override;
377
378 bool supports_thread_stopped () override;
379
380 bool thread_stopped (thread_info *thread) override;
381
382 void pause_all (bool freeze) override;
383
384 void unpause_all (bool unfreeze) override;
385
386 void stabilize_threads () override;
387
388 bool supports_fast_tracepoints () override;
389
390 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
391 CORE_ADDR tpaddr,
392 CORE_ADDR collector,
393 CORE_ADDR lockaddr,
394 ULONGEST orig_size,
395 CORE_ADDR *jump_entry,
396 CORE_ADDR *trampoline,
397 ULONGEST *trampoline_size,
398 unsigned char *jjump_pad_insn,
399 ULONGEST *jjump_pad_insn_size,
400 CORE_ADDR *adjusted_insn_addr,
401 CORE_ADDR *adjusted_insn_addr_end,
402 char *err) override;
403
404 int get_min_fast_tracepoint_insn_len () override;
405
406 struct emit_ops *emit_ops () override;
407
408 bool supports_disable_randomization () override;
409
410 bool supports_qxfer_libraries_svr4 () override;
411
412 int qxfer_libraries_svr4 (const char *annex,
413 unsigned char *readbuf,
414 unsigned const char *writebuf,
415 CORE_ADDR offset, int len) override;
416
417 bool supports_agent () override;
418
419 #ifdef HAVE_LINUX_BTRACE
420 btrace_target_info *enable_btrace (ptid_t ptid,
421 const btrace_config *conf) override;
422
423 int disable_btrace (btrace_target_info *tinfo) override;
424
425 int read_btrace (btrace_target_info *tinfo, buffer *buf,
426 enum btrace_read_type type) override;
427
428 int read_btrace_conf (const btrace_target_info *tinfo,
429 buffer *buf) override;
430 #endif
431
432 bool supports_range_stepping () override;
433
434 bool supports_pid_to_exec_file () override;
435
436 char *pid_to_exec_file (int pid) override;
437
438 bool supports_multifs () override;
439
440 int multifs_open (int pid, const char *filename, int flags,
441 mode_t mode) override;
442
443 int multifs_unlink (int pid, const char *filename) override;
444
445 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
446 size_t bufsiz) override;
447
448 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
449
450 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
451
452 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
453
454 const char *thread_name (ptid_t thread) override;
455
456 #if USE_THREAD_DB
457 bool thread_handle (ptid_t ptid, gdb_byte **handle,
458 int *handle_len) override;
459 #endif
460
461 bool supports_software_single_step () override;
462
463 bool supports_catch_syscall () override;
464
465 int get_ipa_tdesc_idx () override;
466
467 /* Return the information to access registers. This has public
468 visibility because proc-service uses it. */
469 virtual const regs_info *get_regs_info () = 0;
470
471 private:
472
473 /* Handle a GNU/Linux extended wait response. If we see a clone,
474 fork, or vfork event, we need to add the new LWP to our list
475 (and return 0 so as not to report the trap to higher layers).
476 If we see an exec event, we will modify ORIG_EVENT_LWP to point
477 to a new LWP representing the new program. */
478 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
479
480 /* Do low-level handling of the event, and check if we should go on
481 and pass it to caller code. Return the affected lwp if we are, or
482 NULL otherwise. */
483 lwp_info *filter_event (int lwpid, int wstat);
484
485 /* Wait for an event from child(ren) WAIT_PTID, and return any that
486 match FILTER_PTID (leaving others pending). The PTIDs can be:
487 minus_one_ptid, to specify any child; a pid PTID, specifying all
488 lwps of a thread group; or a PTID representing a single lwp. Store
489 the stop status through the status pointer WSTAT. OPTIONS is
490 passed to the waitpid call. Return 0 if no event was found and
491 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
492 was found. Return the PID of the stopped child otherwise. */
493 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
494 int *wstatp, int options);
495
496 /* Wait for an event from child(ren) PTID. PTIDs can be:
497 minus_one_ptid, to specify any child; a pid PTID, specifying all
498 lwps of a thread group; or a PTID representing a single lwp. Store
499 the stop status through the status pointer WSTAT. OPTIONS is
500 passed to the waitpid call. Return 0 if no event was found and
501 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
502 was found. Return the PID of the stopped child otherwise. */
503 int wait_for_event (ptid_t ptid, int *wstatp, int options);
504
505 /* Wait for all children to stop for the SIGSTOPs we just queued. */
506 void wait_for_sigstop ();
507
508 /* Wait for process, returns status. */
509 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
510 int target_options);
511
512 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
513 If SUSPEND, then also increase the suspend count of every LWP,
514 except EXCEPT. */
515 void stop_all_lwps (int suspend, lwp_info *except);
516
517 /* Stopped LWPs that the client wanted to be running, that don't have
518 pending statuses, are set to run again, except for EXCEPT, if not
519 NULL. This undoes a stop_all_lwps call. */
520 void unstop_all_lwps (int unsuspend, lwp_info *except);
521
522 /* Start a step-over operation on LWP. When LWP stopped at a
523 breakpoint, to make progress, we need to remove the breakpoint out
524 of the way. If we let other threads run while we do that, they may
525 pass by the breakpoint location and miss hitting it. To avoid
526 that, a step-over momentarily stops all threads while LWP is
527 single-stepped by either hardware or software while the breakpoint
528 is temporarily uninserted from the inferior. When the single-step
529 finishes, we reinsert the breakpoint, and let all threads that are
530 supposed to be running, run again. */
531 void start_step_over (lwp_info *lwp);
532
533 /* If there's a step over in progress, wait until all threads stop
534 (that is, until the stepping thread finishes its step), and
535 unsuspend all lwps. The stepping thread ends with its status
536 pending, which is processed later when we get back to processing
537 events. */
538 void complete_ongoing_step_over ();
539
540 /* When we finish a step-over, set threads running again. If there's
541 another thread that may need a step-over, now's the time to start
542 it. Eventually, we'll move all threads past their breakpoints. */
543 void proceed_all_lwps ();
544
545 /* The reason we resume in the caller, is because we want to be able
546 to pass lwp->status_pending as WSTAT, and we need to clear
547 status_pending_p before resuming, otherwise, resume_one_lwp
548 refuses to resume. */
549 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
550
551 /* Move THREAD out of the jump pad. */
552 void move_out_of_jump_pad (thread_info *thread);
553
554 /* Call low_arch_setup on THREAD. */
555 void arch_setup_thread (thread_info *thread);
556
557 #ifdef HAVE_LINUX_USRREGS
558 /* Fetch one register. */
559 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
560 int regno);
561
562 /* Store one register. */
563 void store_register (const usrregs_info *usrregs, regcache *regcache,
564 int regno);
565 #endif
566
567 /* Fetch all registers, or just one, from the child process.
568 If REGNO is -1, do this for all registers, skipping any that are
569 assumed to have been retrieved by regsets_fetch_inferior_registers,
570 unless ALL is non-zero.
571 Otherwise, REGNO specifies which register (so we can save time). */
572 void usr_fetch_inferior_registers (const regs_info *regs_info,
573 regcache *regcache, int regno, int all);
574
575 /* Store our register values back into the inferior.
576 If REGNO is -1, do this for all registers, skipping any that are
577 assumed to have been saved by regsets_store_inferior_registers,
578 unless ALL is non-zero.
579 Otherwise, REGNO specifies which register (so we can save time). */
580 void usr_store_inferior_registers (const regs_info *regs_info,
581 regcache *regcache, int regno, int all);
582
583 /* Return the PC as read from the regcache of LWP, without any
584 adjustment. */
585 CORE_ADDR get_pc (lwp_info *lwp);
586
587 /* Called when the LWP stopped for a signal/trap. If it stopped for a
588 trap check what caused it (breakpoint, watchpoint, trace, etc.),
589 and save the result in the LWP's stop_reason field. If it stopped
590 for a breakpoint, decrement the PC if necessary on the lwp's
591 architecture. Returns true if we now have the LWP's stop PC. */
592 bool save_stop_reason (lwp_info *lwp);
593
594 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
595 SIGNAL is nonzero, give it that signal. */
596 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
597 siginfo_t *info);
598
599 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
600 disappears while we try to resume it. */
601 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
602
603 /* This function is called once per thread. We check the thread's
604 last resume request, which will tell us whether to resume, step, or
605 leave the thread stopped. Any signal the client requested to be
606 delivered has already been enqueued at this point.
607
608 If any thread that GDB wants running is stopped at an internal
609 breakpoint that needs stepping over, we start a step-over operation
610 on that particular thread, and leave all others stopped. */
611 void proceed_one_lwp (thread_info *thread, lwp_info *except);
612
613 /* This function is called once per thread. We check the thread's
614 resume request, which will tell us whether to resume, step, or
615 leave the thread stopped; and what signal, if any, it should be
616 sent.
617
618 For threads which we aren't explicitly told otherwise, we preserve
619 the stepping flag; this is used for stepping over gdbserver-placed
620 breakpoints.
621
622 If pending_flags was set in any thread, we queue any needed
623 signals, since we won't actually resume. We already have a pending
624 event to report, so we don't need to preserve any step requests;
625 they should be re-issued if necessary. */
626 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
627
628 /* Return true if this lwp has an interesting status pending. */
629 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
630
631 /* Resume LWPs that are currently stopped without any pending status
632 to report, but are resumed from the core's perspective. */
633 void resume_stopped_resumed_lwps (thread_info *thread);
634
635 /* Unsuspend THREAD, except EXCEPT, and proceed. */
636 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
637
638 /* Return true if this lwp still has an interesting status pending.
639 If not (e.g., it had stopped for a breakpoint that is gone), return
640 false. */
641 bool thread_still_has_status_pending (thread_info *thread);
642
643 /* Return true if this lwp is to-be-resumed and has an interesting
644 status pending. */
645 bool resume_status_pending (thread_info *thread);
646
647 /* Return true if this lwp that GDB wants running is stopped at an
648 internal breakpoint that we need to step over. It assumes that
649 any required STOP_PC adjustment has already been propagated to
650 the inferior's regcache. */
651 bool thread_needs_step_over (thread_info *thread);
652
653 /* Single step via hardware or software single step.
654 Return 1 if hardware single stepping, 0 if software single stepping
655 or can't single step. */
656 int single_step (lwp_info* lwp);
657
658 /* Install breakpoints for software single stepping. */
659 void install_software_single_step_breakpoints (lwp_info *lwp);
660
661 protected:
662 /* The architecture-specific "low" methods are listed below. */
663
664 /* Architecture-specific setup for the current thread. */
665 virtual void low_arch_setup () = 0;
666
667 /* Return false if we can fetch/store the register, true if we cannot
668 fetch/store the register. */
669 virtual bool low_cannot_fetch_register (int regno) = 0;
670
671 virtual bool low_cannot_store_register (int regno) = 0;
672
673 /* Hook to fetch a register in some non-standard way. Used for
674 example by backends that have read-only registers with hardcoded
675 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
676 REGNO was supplied, false if not, and we should fallback to the
677 standard ptrace methods. */
678 virtual bool low_fetch_register (regcache *regcache, int regno);
679 };
680
681 extern linux_process_target *the_linux_target;
682
683 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
684 #define get_lwp_thread(lwp) ((lwp)->thread)
685
686 /* This struct is recorded in the target_data field of struct thread_info.
687
688 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
689 GDB protocol representation of the thread ID. Threads also have
690 a "process ID" (poorly named) which is (presently) the same as the
691 LWP ID.
692
693 There is also ``all_processes'' is keyed by the "overall process ID",
694 which GNU/Linux calls tgid, "thread group ID". */
695
696 struct lwp_info
697 {
698 /* Backlink to the parent object. */
699 struct thread_info *thread;
700
701 /* If this flag is set, the next SIGSTOP will be ignored (the
702 process will be immediately resumed). This means that either we
703 sent the SIGSTOP to it ourselves and got some other pending event
704 (so the SIGSTOP is still pending), or that we stopped the
705 inferior implicitly via PTRACE_ATTACH and have not waited for it
706 yet. */
707 int stop_expected;
708
709 /* When this is true, we shall not try to resume this thread, even
710 if last_resume_kind isn't resume_stop. */
711 int suspended;
712
713 /* If this flag is set, the lwp is known to be stopped right now (stop
714 event already received in a wait()). */
715 int stopped;
716
717 /* Signal whether we are in a SYSCALL_ENTRY or
718 in a SYSCALL_RETURN event.
719 Values:
720 - TARGET_WAITKIND_SYSCALL_ENTRY
721 - TARGET_WAITKIND_SYSCALL_RETURN */
722 enum target_waitkind syscall_state;
723
724 /* When stopped is set, the last wait status recorded for this lwp. */
725 int last_status;
726
727 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
728 this LWP's last event, to pass to GDB without any further
729 processing. This is used to store extended ptrace event
730 information or exit status until it can be reported to GDB. */
731 struct target_waitstatus waitstatus;
732
733 /* A pointer to the fork child/parent relative. Valid only while
734 the parent fork event is not reported to higher layers. Used to
735 avoid wildcard vCont actions resuming a fork child before GDB is
736 notified about the parent's fork event. */
737 struct lwp_info *fork_relative;
738
739 /* When stopped is set, this is where the lwp last stopped, with
740 decr_pc_after_break already accounted for. If the LWP is
741 running, this is the address at which the lwp was resumed. */
742 CORE_ADDR stop_pc;
743
744 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
745 been reported. */
746 int status_pending_p;
747 int status_pending;
748
749 /* The reason the LWP last stopped, if we need to track it
750 (breakpoint, watchpoint, etc.) */
751 enum target_stop_reason stop_reason;
752
753 /* On architectures where it is possible to know the data address of
754 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
755 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
756 is true. */
757 CORE_ADDR stopped_data_address;
758
759 /* If this is non-zero, it is a breakpoint to be reinserted at our next
760 stop (SIGTRAP stops only). */
761 CORE_ADDR bp_reinsert;
762
763 /* If this flag is set, the last continue operation at the ptrace
764 level on this process was a single-step. */
765 int stepping;
766
767 /* Range to single step within. This is a copy of the step range
768 passed along the last resume request. See 'struct
769 thread_resume'. */
770 CORE_ADDR step_range_start; /* Inclusive */
771 CORE_ADDR step_range_end; /* Exclusive */
772
773 /* If this flag is set, we need to set the event request flags the
774 next time we see this LWP stop. */
775 int must_set_ptrace_flags;
776
777 /* If this is non-zero, it points to a chain of signals which need to
778 be delivered to this process. */
779 struct pending_signals *pending_signals;
780
781 /* A link used when resuming. It is initialized from the resume request,
782 and then processed and cleared in linux_resume_one_lwp. */
783 struct thread_resume *resume;
784
785 /* Information bout this lwp's fast tracepoint collection status (is it
786 currently stopped in the jump pad, and if so, before or at/after the
787 relocated instruction). Normally, we won't care about this, but we will
788 if a signal arrives to this lwp while it is collecting. */
789 fast_tpoint_collect_result collecting_fast_tracepoint;
790
791 /* If this is non-zero, it points to a chain of signals which need
792 to be reported to GDB. These were deferred because the thread
793 was doing a fast tracepoint collect when they arrived. */
794 struct pending_signals *pending_signals_to_report;
795
796 /* When collecting_fast_tracepoint is first found to be 1, we insert
797 a exit-jump-pad-quickly breakpoint. This is it. */
798 struct breakpoint *exit_jump_pad_bkpt;
799
800 #ifdef USE_THREAD_DB
801 int thread_known;
802 /* The thread handle, used for e.g. TLS access. Only valid if
803 THREAD_KNOWN is set. */
804 td_thrhandle_t th;
805
806 /* The pthread_t handle. */
807 thread_t thread_handle;
808 #endif
809
810 /* Arch-specific additions. */
811 struct arch_lwp_info *arch_private;
812 };
813
814 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
815
816 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
817 errno). */
818 int linux_attach_lwp (ptid_t ptid);
819
820 struct lwp_info *find_lwp_pid (ptid_t ptid);
821 /* For linux_stop_lwp see nat/linux-nat.h. */
822
823 #ifdef HAVE_LINUX_REGSETS
824 void initialize_regsets_info (struct regsets_info *regsets_info);
825 #endif
826
827 void initialize_low_arch (void);
828
829 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
830 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
831
832 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
833 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
834
835 /* From thread-db.c */
836 int thread_db_init (void);
837 void thread_db_detach (struct process_info *);
838 void thread_db_mourn (struct process_info *);
839 int thread_db_handle_monitor_command (char *);
840 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
841 CORE_ADDR load_module, CORE_ADDR *address);
842 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
843
844 /* Called from linux-low.c when a clone event is detected. Upon entry,
845 both the clone and the parent should be stopped. This function does
846 whatever is required have the clone under thread_db's control. */
847
848 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
849
850 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
851
852 extern int have_ptrace_getregset;
853
854 /* Search for the value with type MATCH in the auxv vector with
855 entries of length WORDSIZE bytes. If found, store the value in
856 *VALP and return 1. If not found or if there is an error, return
857 0. */
858
859 int linux_get_auxv (int wordsize, CORE_ADDR match,
860 CORE_ADDR *valp);
861
862 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
863 WORDSIZE. If no entry was found, return zero. */
864
865 CORE_ADDR linux_get_hwcap (int wordsize);
866
867 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
868 WORDSIZE. If no entry was found, return zero. */
869
870 CORE_ADDR linux_get_hwcap2 (int wordsize);
871
872 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.084978 seconds and 4 git commands to generate.