gdbserver/linux-low: turn 'regs_info' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Return 0 if we can fetch/store the register, 1 if we cannot
135 fetch/store the register. */
136 int (*cannot_fetch_register) (int);
137 int (*cannot_store_register) (int);
138
139 /* Hook to fetch a register in some non-standard way. Used for
140 example by backends that have read-only registers with hardcoded
141 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
142 REGNO was supplied, false if not, and we should fallback to the
143 standard ptrace methods. */
144 int (*fetch_register) (struct regcache *regcache, int regno);
145
146 CORE_ADDR (*get_pc) (struct regcache *regcache);
147 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
148
149 /* See target.h for details. */
150 int (*breakpoint_kind_from_pc) (CORE_ADDR *pcptr);
151
152 /* See target.h for details. */
153 const gdb_byte *(*sw_breakpoint_from_kind) (int kind, int *size);
154
155 /* Find the next possible PCs after the current instruction executes. */
156 std::vector<CORE_ADDR> (*get_next_pcs) (struct regcache *regcache);
157
158 int decr_pc_after_break;
159 int (*breakpoint_at) (CORE_ADDR pc);
160
161 /* Breakpoint and watchpoint related functions. See target.h for
162 comments. */
163 int (*supports_z_point_type) (char z_type);
164 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
165 int size, struct raw_breakpoint *bp);
166 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
167 int size, struct raw_breakpoint *bp);
168
169 int (*stopped_by_watchpoint) (void);
170 CORE_ADDR (*stopped_data_address) (void);
171
172 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
173 for registers smaller than an xfer unit). */
174 void (*collect_ptrace_register) (struct regcache *regcache,
175 int regno, char *buf);
176 void (*supply_ptrace_register) (struct regcache *regcache,
177 int regno, const char *buf);
178
179 /* Hook to convert from target format to ptrace format and back.
180 Returns true if any conversion was done; false otherwise.
181 If DIRECTION is 1, then copy from INF to NATIVE.
182 If DIRECTION is 0, copy from NATIVE to INF. */
183 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
184
185 /* Hook to call when a new process is created or attached to.
186 If extra per-process architecture-specific data is needed,
187 allocate it here. */
188 struct arch_process_info * (*new_process) (void);
189
190 /* Hook to call when a process is being deleted. If extra per-process
191 architecture-specific data is needed, delete it here. */
192 void (*delete_process) (struct arch_process_info *info);
193
194 /* Hook to call when a new thread is detected.
195 If extra per-thread architecture-specific data is needed,
196 allocate it here. */
197 void (*new_thread) (struct lwp_info *);
198
199 /* Hook to call when a thread is being deleted. If extra per-thread
200 architecture-specific data is needed, delete it here. */
201 void (*delete_thread) (struct arch_lwp_info *);
202
203 /* Hook to call, if any, when a new fork is attached. */
204 void (*new_fork) (struct process_info *parent, struct process_info *child);
205
206 /* Hook to call prior to resuming a thread. */
207 void (*prepare_to_resume) (struct lwp_info *);
208
209 /* Hook to support target specific qSupported. */
210 void (*process_qsupported) (char **, int count);
211
212 /* Returns true if the low target supports tracepoints. */
213 int (*supports_tracepoints) (void);
214
215 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
216 success, -1 on failure. */
217 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
218
219 /* Install a fast tracepoint jump pad. See target.h for
220 comments. */
221 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
222 CORE_ADDR collector,
223 CORE_ADDR lockaddr,
224 ULONGEST orig_size,
225 CORE_ADDR *jump_entry,
226 CORE_ADDR *trampoline,
227 ULONGEST *trampoline_size,
228 unsigned char *jjump_pad_insn,
229 ULONGEST *jjump_pad_insn_size,
230 CORE_ADDR *adjusted_insn_addr,
231 CORE_ADDR *adjusted_insn_addr_end,
232 char *err);
233
234 /* Return the bytecode operations vector for the current inferior.
235 Returns NULL if bytecode compilation is not supported. */
236 struct emit_ops *(*emit_ops) (void);
237
238 /* Return the minimum length of an instruction that can be safely overwritten
239 for use as a fast tracepoint. */
240 int (*get_min_fast_tracepoint_insn_len) (void);
241
242 /* Returns true if the low target supports range stepping. */
243 int (*supports_range_stepping) (void);
244
245 /* See target.h. */
246 int (*breakpoint_kind_from_current_state) (CORE_ADDR *pcptr);
247
248 /* See target.h. */
249 int (*supports_hardware_single_step) (void);
250
251 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
252 inferior is stopped due to SYSCALL_SIGTRAP. */
253 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
254
255 /* See target.h. */
256 int (*get_ipa_tdesc_idx) (void);
257 };
258
259 extern struct linux_target_ops the_low_target;
260
261 /* Target ops definitions for a Linux target. */
262
263 class linux_process_target : public process_stratum_target
264 {
265 public:
266
267 int create_inferior (const char *program,
268 const std::vector<char *> &program_args) override;
269
270 void post_create_inferior () override;
271
272 int attach (unsigned long pid) override;
273
274 int kill (process_info *proc) override;
275
276 int detach (process_info *proc) override;
277
278 void mourn (process_info *proc) override;
279
280 void join (int pid) override;
281
282 bool thread_alive (ptid_t pid) override;
283
284 void resume (thread_resume *resume_info, size_t n) override;
285
286 ptid_t wait (ptid_t ptid, target_waitstatus *status,
287 int options) override;
288
289 void fetch_registers (regcache *regcache, int regno) override;
290
291 void store_registers (regcache *regcache, int regno) override;
292
293 int prepare_to_access_memory () override;
294
295 void done_accessing_memory () override;
296
297 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
298 int len) override;
299
300 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
301 int len) override;
302
303 void look_up_symbols () override;
304
305 void request_interrupt () override;
306
307 bool supports_read_auxv () override;
308
309 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
310 unsigned int len) override;
311
312 bool supports_z_point_type (char z_type) override;
313
314 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
315 int size, raw_breakpoint *bp) override;
316
317 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
318 int size, raw_breakpoint *bp) override;
319
320 bool stopped_by_sw_breakpoint () override;
321
322 bool supports_stopped_by_sw_breakpoint () override;
323
324 bool stopped_by_hw_breakpoint () override;
325
326 bool supports_stopped_by_hw_breakpoint () override;
327
328 bool supports_hardware_single_step () override;
329
330 bool stopped_by_watchpoint () override;
331
332 CORE_ADDR stopped_data_address () override;
333
334 bool supports_read_offsets () override;
335
336 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
337
338 bool supports_get_tls_address () override;
339
340 int get_tls_address (thread_info *thread, CORE_ADDR offset,
341 CORE_ADDR load_module, CORE_ADDR *address) override;
342
343 bool supports_qxfer_osdata () override;
344
345 int qxfer_osdata (const char *annex, unsigned char *readbuf,
346 unsigned const char *writebuf,
347 CORE_ADDR offset, int len) override;
348
349 bool supports_qxfer_siginfo () override;
350
351 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
352 unsigned const char *writebuf,
353 CORE_ADDR offset, int len) override;
354
355 bool supports_non_stop () override;
356
357 bool async (bool enable) override;
358
359 int start_non_stop (bool enable) override;
360
361 bool supports_multi_process () override;
362
363 bool supports_fork_events () override;
364
365 bool supports_vfork_events () override;
366
367 bool supports_exec_events () override;
368
369 void handle_new_gdb_connection () override;
370
371 int handle_monitor_command (char *mon) override;
372
373 int core_of_thread (ptid_t ptid) override;
374
375 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
376 bool supports_read_loadmap () override;
377
378 int read_loadmap (const char *annex, CORE_ADDR offset,
379 unsigned char *myaddr, unsigned int len) override;
380 #endif
381
382 void process_qsupported (char **features, int count) override;
383
384 bool supports_tracepoints () override;
385
386 CORE_ADDR read_pc (regcache *regcache) override;
387
388 void write_pc (regcache *regcache, CORE_ADDR pc) override;
389
390 bool supports_thread_stopped () override;
391
392 bool thread_stopped (thread_info *thread) override;
393
394 void pause_all (bool freeze) override;
395
396 void unpause_all (bool unfreeze) override;
397
398 void stabilize_threads () override;
399
400 bool supports_fast_tracepoints () override;
401
402 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
403 CORE_ADDR tpaddr,
404 CORE_ADDR collector,
405 CORE_ADDR lockaddr,
406 ULONGEST orig_size,
407 CORE_ADDR *jump_entry,
408 CORE_ADDR *trampoline,
409 ULONGEST *trampoline_size,
410 unsigned char *jjump_pad_insn,
411 ULONGEST *jjump_pad_insn_size,
412 CORE_ADDR *adjusted_insn_addr,
413 CORE_ADDR *adjusted_insn_addr_end,
414 char *err) override;
415
416 int get_min_fast_tracepoint_insn_len () override;
417
418 struct emit_ops *emit_ops () override;
419
420 bool supports_disable_randomization () override;
421
422 bool supports_qxfer_libraries_svr4 () override;
423
424 int qxfer_libraries_svr4 (const char *annex,
425 unsigned char *readbuf,
426 unsigned const char *writebuf,
427 CORE_ADDR offset, int len) override;
428
429 bool supports_agent () override;
430
431 #ifdef HAVE_LINUX_BTRACE
432 btrace_target_info *enable_btrace (ptid_t ptid,
433 const btrace_config *conf) override;
434
435 int disable_btrace (btrace_target_info *tinfo) override;
436
437 int read_btrace (btrace_target_info *tinfo, buffer *buf,
438 enum btrace_read_type type) override;
439
440 int read_btrace_conf (const btrace_target_info *tinfo,
441 buffer *buf) override;
442 #endif
443
444 bool supports_range_stepping () override;
445
446 bool supports_pid_to_exec_file () override;
447
448 char *pid_to_exec_file (int pid) override;
449
450 bool supports_multifs () override;
451
452 int multifs_open (int pid, const char *filename, int flags,
453 mode_t mode) override;
454
455 int multifs_unlink (int pid, const char *filename) override;
456
457 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
458 size_t bufsiz) override;
459
460 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
461
462 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
463
464 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
465
466 const char *thread_name (ptid_t thread) override;
467
468 #if USE_THREAD_DB
469 bool thread_handle (ptid_t ptid, gdb_byte **handle,
470 int *handle_len) override;
471 #endif
472
473 bool supports_software_single_step () override;
474
475 bool supports_catch_syscall () override;
476
477 int get_ipa_tdesc_idx () override;
478
479 /* Return the information to access registers. This has public
480 visibility because proc-service uses it. */
481 virtual const regs_info *get_regs_info () = 0;
482
483 private:
484
485 /* Handle a GNU/Linux extended wait response. If we see a clone,
486 fork, or vfork event, we need to add the new LWP to our list
487 (and return 0 so as not to report the trap to higher layers).
488 If we see an exec event, we will modify ORIG_EVENT_LWP to point
489 to a new LWP representing the new program. */
490 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
491
492 /* Do low-level handling of the event, and check if we should go on
493 and pass it to caller code. Return the affected lwp if we are, or
494 NULL otherwise. */
495 lwp_info *filter_event (int lwpid, int wstat);
496
497 /* Wait for an event from child(ren) WAIT_PTID, and return any that
498 match FILTER_PTID (leaving others pending). The PTIDs can be:
499 minus_one_ptid, to specify any child; a pid PTID, specifying all
500 lwps of a thread group; or a PTID representing a single lwp. Store
501 the stop status through the status pointer WSTAT. OPTIONS is
502 passed to the waitpid call. Return 0 if no event was found and
503 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
504 was found. Return the PID of the stopped child otherwise. */
505 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
506 int *wstatp, int options);
507
508 /* Wait for an event from child(ren) PTID. PTIDs can be:
509 minus_one_ptid, to specify any child; a pid PTID, specifying all
510 lwps of a thread group; or a PTID representing a single lwp. Store
511 the stop status through the status pointer WSTAT. OPTIONS is
512 passed to the waitpid call. Return 0 if no event was found and
513 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
514 was found. Return the PID of the stopped child otherwise. */
515 int wait_for_event (ptid_t ptid, int *wstatp, int options);
516
517 /* Wait for all children to stop for the SIGSTOPs we just queued. */
518 void wait_for_sigstop ();
519
520 /* Wait for process, returns status. */
521 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
522 int target_options);
523
524 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
525 If SUSPEND, then also increase the suspend count of every LWP,
526 except EXCEPT. */
527 void stop_all_lwps (int suspend, lwp_info *except);
528
529 /* Stopped LWPs that the client wanted to be running, that don't have
530 pending statuses, are set to run again, except for EXCEPT, if not
531 NULL. This undoes a stop_all_lwps call. */
532 void unstop_all_lwps (int unsuspend, lwp_info *except);
533
534 /* Start a step-over operation on LWP. When LWP stopped at a
535 breakpoint, to make progress, we need to remove the breakpoint out
536 of the way. If we let other threads run while we do that, they may
537 pass by the breakpoint location and miss hitting it. To avoid
538 that, a step-over momentarily stops all threads while LWP is
539 single-stepped by either hardware or software while the breakpoint
540 is temporarily uninserted from the inferior. When the single-step
541 finishes, we reinsert the breakpoint, and let all threads that are
542 supposed to be running, run again. */
543 void start_step_over (lwp_info *lwp);
544
545 /* If there's a step over in progress, wait until all threads stop
546 (that is, until the stepping thread finishes its step), and
547 unsuspend all lwps. The stepping thread ends with its status
548 pending, which is processed later when we get back to processing
549 events. */
550 void complete_ongoing_step_over ();
551
552 /* When we finish a step-over, set threads running again. If there's
553 another thread that may need a step-over, now's the time to start
554 it. Eventually, we'll move all threads past their breakpoints. */
555 void proceed_all_lwps ();
556
557 /* The reason we resume in the caller, is because we want to be able
558 to pass lwp->status_pending as WSTAT, and we need to clear
559 status_pending_p before resuming, otherwise, resume_one_lwp
560 refuses to resume. */
561 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
562
563 /* Move THREAD out of the jump pad. */
564 void move_out_of_jump_pad (thread_info *thread);
565
566 /* Call low_arch_setup on THREAD. */
567 void arch_setup_thread (thread_info *thread);
568
569 protected:
570 /* The architecture-specific "low" methods are listed below. */
571
572 /* Architecture-specific setup for the current thread. */
573 virtual void low_arch_setup () = 0;
574 };
575
576 extern linux_process_target *the_linux_target;
577
578 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
579 #define get_lwp_thread(lwp) ((lwp)->thread)
580
581 /* This struct is recorded in the target_data field of struct thread_info.
582
583 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
584 GDB protocol representation of the thread ID. Threads also have
585 a "process ID" (poorly named) which is (presently) the same as the
586 LWP ID.
587
588 There is also ``all_processes'' is keyed by the "overall process ID",
589 which GNU/Linux calls tgid, "thread group ID". */
590
591 struct lwp_info
592 {
593 /* Backlink to the parent object. */
594 struct thread_info *thread;
595
596 /* If this flag is set, the next SIGSTOP will be ignored (the
597 process will be immediately resumed). This means that either we
598 sent the SIGSTOP to it ourselves and got some other pending event
599 (so the SIGSTOP is still pending), or that we stopped the
600 inferior implicitly via PTRACE_ATTACH and have not waited for it
601 yet. */
602 int stop_expected;
603
604 /* When this is true, we shall not try to resume this thread, even
605 if last_resume_kind isn't resume_stop. */
606 int suspended;
607
608 /* If this flag is set, the lwp is known to be stopped right now (stop
609 event already received in a wait()). */
610 int stopped;
611
612 /* Signal whether we are in a SYSCALL_ENTRY or
613 in a SYSCALL_RETURN event.
614 Values:
615 - TARGET_WAITKIND_SYSCALL_ENTRY
616 - TARGET_WAITKIND_SYSCALL_RETURN */
617 enum target_waitkind syscall_state;
618
619 /* When stopped is set, the last wait status recorded for this lwp. */
620 int last_status;
621
622 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
623 this LWP's last event, to pass to GDB without any further
624 processing. This is used to store extended ptrace event
625 information or exit status until it can be reported to GDB. */
626 struct target_waitstatus waitstatus;
627
628 /* A pointer to the fork child/parent relative. Valid only while
629 the parent fork event is not reported to higher layers. Used to
630 avoid wildcard vCont actions resuming a fork child before GDB is
631 notified about the parent's fork event. */
632 struct lwp_info *fork_relative;
633
634 /* When stopped is set, this is where the lwp last stopped, with
635 decr_pc_after_break already accounted for. If the LWP is
636 running, this is the address at which the lwp was resumed. */
637 CORE_ADDR stop_pc;
638
639 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
640 been reported. */
641 int status_pending_p;
642 int status_pending;
643
644 /* The reason the LWP last stopped, if we need to track it
645 (breakpoint, watchpoint, etc.) */
646 enum target_stop_reason stop_reason;
647
648 /* On architectures where it is possible to know the data address of
649 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
650 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
651 is true. */
652 CORE_ADDR stopped_data_address;
653
654 /* If this is non-zero, it is a breakpoint to be reinserted at our next
655 stop (SIGTRAP stops only). */
656 CORE_ADDR bp_reinsert;
657
658 /* If this flag is set, the last continue operation at the ptrace
659 level on this process was a single-step. */
660 int stepping;
661
662 /* Range to single step within. This is a copy of the step range
663 passed along the last resume request. See 'struct
664 thread_resume'. */
665 CORE_ADDR step_range_start; /* Inclusive */
666 CORE_ADDR step_range_end; /* Exclusive */
667
668 /* If this flag is set, we need to set the event request flags the
669 next time we see this LWP stop. */
670 int must_set_ptrace_flags;
671
672 /* If this is non-zero, it points to a chain of signals which need to
673 be delivered to this process. */
674 struct pending_signals *pending_signals;
675
676 /* A link used when resuming. It is initialized from the resume request,
677 and then processed and cleared in linux_resume_one_lwp. */
678 struct thread_resume *resume;
679
680 /* Information bout this lwp's fast tracepoint collection status (is it
681 currently stopped in the jump pad, and if so, before or at/after the
682 relocated instruction). Normally, we won't care about this, but we will
683 if a signal arrives to this lwp while it is collecting. */
684 fast_tpoint_collect_result collecting_fast_tracepoint;
685
686 /* If this is non-zero, it points to a chain of signals which need
687 to be reported to GDB. These were deferred because the thread
688 was doing a fast tracepoint collect when they arrived. */
689 struct pending_signals *pending_signals_to_report;
690
691 /* When collecting_fast_tracepoint is first found to be 1, we insert
692 a exit-jump-pad-quickly breakpoint. This is it. */
693 struct breakpoint *exit_jump_pad_bkpt;
694
695 #ifdef USE_THREAD_DB
696 int thread_known;
697 /* The thread handle, used for e.g. TLS access. Only valid if
698 THREAD_KNOWN is set. */
699 td_thrhandle_t th;
700
701 /* The pthread_t handle. */
702 thread_t thread_handle;
703 #endif
704
705 /* Arch-specific additions. */
706 struct arch_lwp_info *arch_private;
707 };
708
709 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
710
711 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
712 errno). */
713 int linux_attach_lwp (ptid_t ptid);
714
715 struct lwp_info *find_lwp_pid (ptid_t ptid);
716 /* For linux_stop_lwp see nat/linux-nat.h. */
717
718 #ifdef HAVE_LINUX_REGSETS
719 void initialize_regsets_info (struct regsets_info *regsets_info);
720 #endif
721
722 void initialize_low_arch (void);
723
724 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
725 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
726
727 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
728 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
729
730 /* From thread-db.c */
731 int thread_db_init (void);
732 void thread_db_detach (struct process_info *);
733 void thread_db_mourn (struct process_info *);
734 int thread_db_handle_monitor_command (char *);
735 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
736 CORE_ADDR load_module, CORE_ADDR *address);
737 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
738
739 /* Called from linux-low.c when a clone event is detected. Upon entry,
740 both the clone and the parent should be stopped. This function does
741 whatever is required have the clone under thread_db's control. */
742
743 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
744
745 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
746
747 extern int have_ptrace_getregset;
748
749 /* Search for the value with type MATCH in the auxv vector with
750 entries of length WORDSIZE bytes. If found, store the value in
751 *VALP and return 1. If not found or if there is an error, return
752 0. */
753
754 int linux_get_auxv (int wordsize, CORE_ADDR match,
755 CORE_ADDR *valp);
756
757 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
758 WORDSIZE. If no entry was found, return zero. */
759
760 CORE_ADDR linux_get_hwcap (int wordsize);
761
762 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
763 WORDSIZE. If no entry was found, return zero. */
764
765 CORE_ADDR linux_get_hwcap2 (int wordsize);
766
767 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.077632 seconds and 5 git commands to generate.