gdbserver/linux-low: turn 'process_qsupported' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Returns true if the low target supports tracepoints. */
135 int (*supports_tracepoints) (void);
136
137 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
138 success, -1 on failure. */
139 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
140
141 /* Install a fast tracepoint jump pad. See target.h for
142 comments. */
143 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
144 CORE_ADDR collector,
145 CORE_ADDR lockaddr,
146 ULONGEST orig_size,
147 CORE_ADDR *jump_entry,
148 CORE_ADDR *trampoline,
149 ULONGEST *trampoline_size,
150 unsigned char *jjump_pad_insn,
151 ULONGEST *jjump_pad_insn_size,
152 CORE_ADDR *adjusted_insn_addr,
153 CORE_ADDR *adjusted_insn_addr_end,
154 char *err);
155
156 /* Return the bytecode operations vector for the current inferior.
157 Returns NULL if bytecode compilation is not supported. */
158 struct emit_ops *(*emit_ops) (void);
159
160 /* Return the minimum length of an instruction that can be safely overwritten
161 for use as a fast tracepoint. */
162 int (*get_min_fast_tracepoint_insn_len) (void);
163
164 /* Returns true if the low target supports range stepping. */
165 int (*supports_range_stepping) (void);
166
167 /* See target.h. */
168 int (*supports_hardware_single_step) (void);
169
170 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
171 inferior is stopped due to SYSCALL_SIGTRAP. */
172 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
173
174 /* See target.h. */
175 int (*get_ipa_tdesc_idx) (void);
176 };
177
178 extern struct linux_target_ops the_low_target;
179
180 /* Target ops definitions for a Linux target. */
181
182 class linux_process_target : public process_stratum_target
183 {
184 public:
185
186 int create_inferior (const char *program,
187 const std::vector<char *> &program_args) override;
188
189 void post_create_inferior () override;
190
191 int attach (unsigned long pid) override;
192
193 int kill (process_info *proc) override;
194
195 int detach (process_info *proc) override;
196
197 void mourn (process_info *proc) override;
198
199 void join (int pid) override;
200
201 bool thread_alive (ptid_t pid) override;
202
203 void resume (thread_resume *resume_info, size_t n) override;
204
205 ptid_t wait (ptid_t ptid, target_waitstatus *status,
206 int options) override;
207
208 void fetch_registers (regcache *regcache, int regno) override;
209
210 void store_registers (regcache *regcache, int regno) override;
211
212 int prepare_to_access_memory () override;
213
214 void done_accessing_memory () override;
215
216 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
217 int len) override;
218
219 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
220 int len) override;
221
222 void look_up_symbols () override;
223
224 void request_interrupt () override;
225
226 bool supports_read_auxv () override;
227
228 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
229 unsigned int len) override;
230
231 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
232 int size, raw_breakpoint *bp) override;
233
234 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
235 int size, raw_breakpoint *bp) override;
236
237 bool stopped_by_sw_breakpoint () override;
238
239 bool supports_stopped_by_sw_breakpoint () override;
240
241 bool stopped_by_hw_breakpoint () override;
242
243 bool supports_stopped_by_hw_breakpoint () override;
244
245 bool supports_hardware_single_step () override;
246
247 bool stopped_by_watchpoint () override;
248
249 CORE_ADDR stopped_data_address () override;
250
251 bool supports_read_offsets () override;
252
253 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
254
255 bool supports_get_tls_address () override;
256
257 int get_tls_address (thread_info *thread, CORE_ADDR offset,
258 CORE_ADDR load_module, CORE_ADDR *address) override;
259
260 bool supports_qxfer_osdata () override;
261
262 int qxfer_osdata (const char *annex, unsigned char *readbuf,
263 unsigned const char *writebuf,
264 CORE_ADDR offset, int len) override;
265
266 bool supports_qxfer_siginfo () override;
267
268 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
269 unsigned const char *writebuf,
270 CORE_ADDR offset, int len) override;
271
272 bool supports_non_stop () override;
273
274 bool async (bool enable) override;
275
276 int start_non_stop (bool enable) override;
277
278 bool supports_multi_process () override;
279
280 bool supports_fork_events () override;
281
282 bool supports_vfork_events () override;
283
284 bool supports_exec_events () override;
285
286 void handle_new_gdb_connection () override;
287
288 int handle_monitor_command (char *mon) override;
289
290 int core_of_thread (ptid_t ptid) override;
291
292 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
293 bool supports_read_loadmap () override;
294
295 int read_loadmap (const char *annex, CORE_ADDR offset,
296 unsigned char *myaddr, unsigned int len) override;
297 #endif
298
299 bool supports_tracepoints () override;
300
301 CORE_ADDR read_pc (regcache *regcache) override;
302
303 void write_pc (regcache *regcache, CORE_ADDR pc) override;
304
305 bool supports_thread_stopped () override;
306
307 bool thread_stopped (thread_info *thread) override;
308
309 void pause_all (bool freeze) override;
310
311 void unpause_all (bool unfreeze) override;
312
313 void stabilize_threads () override;
314
315 bool supports_fast_tracepoints () override;
316
317 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
318 CORE_ADDR tpaddr,
319 CORE_ADDR collector,
320 CORE_ADDR lockaddr,
321 ULONGEST orig_size,
322 CORE_ADDR *jump_entry,
323 CORE_ADDR *trampoline,
324 ULONGEST *trampoline_size,
325 unsigned char *jjump_pad_insn,
326 ULONGEST *jjump_pad_insn_size,
327 CORE_ADDR *adjusted_insn_addr,
328 CORE_ADDR *adjusted_insn_addr_end,
329 char *err) override;
330
331 int get_min_fast_tracepoint_insn_len () override;
332
333 struct emit_ops *emit_ops () override;
334
335 bool supports_disable_randomization () override;
336
337 bool supports_qxfer_libraries_svr4 () override;
338
339 int qxfer_libraries_svr4 (const char *annex,
340 unsigned char *readbuf,
341 unsigned const char *writebuf,
342 CORE_ADDR offset, int len) override;
343
344 bool supports_agent () override;
345
346 #ifdef HAVE_LINUX_BTRACE
347 btrace_target_info *enable_btrace (ptid_t ptid,
348 const btrace_config *conf) override;
349
350 int disable_btrace (btrace_target_info *tinfo) override;
351
352 int read_btrace (btrace_target_info *tinfo, buffer *buf,
353 enum btrace_read_type type) override;
354
355 int read_btrace_conf (const btrace_target_info *tinfo,
356 buffer *buf) override;
357 #endif
358
359 bool supports_range_stepping () override;
360
361 bool supports_pid_to_exec_file () override;
362
363 char *pid_to_exec_file (int pid) override;
364
365 bool supports_multifs () override;
366
367 int multifs_open (int pid, const char *filename, int flags,
368 mode_t mode) override;
369
370 int multifs_unlink (int pid, const char *filename) override;
371
372 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
373 size_t bufsiz) override;
374
375 const char *thread_name (ptid_t thread) override;
376
377 #if USE_THREAD_DB
378 bool thread_handle (ptid_t ptid, gdb_byte **handle,
379 int *handle_len) override;
380 #endif
381
382 bool supports_catch_syscall () override;
383
384 int get_ipa_tdesc_idx () override;
385
386 /* Return the information to access registers. This has public
387 visibility because proc-service uses it. */
388 virtual const regs_info *get_regs_info () = 0;
389
390 private:
391
392 /* Handle a GNU/Linux extended wait response. If we see a clone,
393 fork, or vfork event, we need to add the new LWP to our list
394 (and return 0 so as not to report the trap to higher layers).
395 If we see an exec event, we will modify ORIG_EVENT_LWP to point
396 to a new LWP representing the new program. */
397 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
398
399 /* Do low-level handling of the event, and check if we should go on
400 and pass it to caller code. Return the affected lwp if we are, or
401 NULL otherwise. */
402 lwp_info *filter_event (int lwpid, int wstat);
403
404 /* Wait for an event from child(ren) WAIT_PTID, and return any that
405 match FILTER_PTID (leaving others pending). The PTIDs can be:
406 minus_one_ptid, to specify any child; a pid PTID, specifying all
407 lwps of a thread group; or a PTID representing a single lwp. Store
408 the stop status through the status pointer WSTAT. OPTIONS is
409 passed to the waitpid call. Return 0 if no event was found and
410 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
411 was found. Return the PID of the stopped child otherwise. */
412 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
413 int *wstatp, int options);
414
415 /* Wait for an event from child(ren) PTID. PTIDs can be:
416 minus_one_ptid, to specify any child; a pid PTID, specifying all
417 lwps of a thread group; or a PTID representing a single lwp. Store
418 the stop status through the status pointer WSTAT. OPTIONS is
419 passed to the waitpid call. Return 0 if no event was found and
420 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
421 was found. Return the PID of the stopped child otherwise. */
422 int wait_for_event (ptid_t ptid, int *wstatp, int options);
423
424 /* Wait for all children to stop for the SIGSTOPs we just queued. */
425 void wait_for_sigstop ();
426
427 /* Wait for process, returns status. */
428 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
429 int target_options);
430
431 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
432 If SUSPEND, then also increase the suspend count of every LWP,
433 except EXCEPT. */
434 void stop_all_lwps (int suspend, lwp_info *except);
435
436 /* Stopped LWPs that the client wanted to be running, that don't have
437 pending statuses, are set to run again, except for EXCEPT, if not
438 NULL. This undoes a stop_all_lwps call. */
439 void unstop_all_lwps (int unsuspend, lwp_info *except);
440
441 /* Start a step-over operation on LWP. When LWP stopped at a
442 breakpoint, to make progress, we need to remove the breakpoint out
443 of the way. If we let other threads run while we do that, they may
444 pass by the breakpoint location and miss hitting it. To avoid
445 that, a step-over momentarily stops all threads while LWP is
446 single-stepped by either hardware or software while the breakpoint
447 is temporarily uninserted from the inferior. When the single-step
448 finishes, we reinsert the breakpoint, and let all threads that are
449 supposed to be running, run again. */
450 void start_step_over (lwp_info *lwp);
451
452 /* If there's a step over in progress, wait until all threads stop
453 (that is, until the stepping thread finishes its step), and
454 unsuspend all lwps. The stepping thread ends with its status
455 pending, which is processed later when we get back to processing
456 events. */
457 void complete_ongoing_step_over ();
458
459 /* When we finish a step-over, set threads running again. If there's
460 another thread that may need a step-over, now's the time to start
461 it. Eventually, we'll move all threads past their breakpoints. */
462 void proceed_all_lwps ();
463
464 /* The reason we resume in the caller, is because we want to be able
465 to pass lwp->status_pending as WSTAT, and we need to clear
466 status_pending_p before resuming, otherwise, resume_one_lwp
467 refuses to resume. */
468 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
469
470 /* Move THREAD out of the jump pad. */
471 void move_out_of_jump_pad (thread_info *thread);
472
473 /* Call low_arch_setup on THREAD. */
474 void arch_setup_thread (thread_info *thread);
475
476 #ifdef HAVE_LINUX_USRREGS
477 /* Fetch one register. */
478 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
479 int regno);
480
481 /* Store one register. */
482 void store_register (const usrregs_info *usrregs, regcache *regcache,
483 int regno);
484 #endif
485
486 /* Fetch all registers, or just one, from the child process.
487 If REGNO is -1, do this for all registers, skipping any that are
488 assumed to have been retrieved by regsets_fetch_inferior_registers,
489 unless ALL is non-zero.
490 Otherwise, REGNO specifies which register (so we can save time). */
491 void usr_fetch_inferior_registers (const regs_info *regs_info,
492 regcache *regcache, int regno, int all);
493
494 /* Store our register values back into the inferior.
495 If REGNO is -1, do this for all registers, skipping any that are
496 assumed to have been saved by regsets_store_inferior_registers,
497 unless ALL is non-zero.
498 Otherwise, REGNO specifies which register (so we can save time). */
499 void usr_store_inferior_registers (const regs_info *regs_info,
500 regcache *regcache, int regno, int all);
501
502 /* Return the PC as read from the regcache of LWP, without any
503 adjustment. */
504 CORE_ADDR get_pc (lwp_info *lwp);
505
506 /* Called when the LWP stopped for a signal/trap. If it stopped for a
507 trap check what caused it (breakpoint, watchpoint, trace, etc.),
508 and save the result in the LWP's stop_reason field. If it stopped
509 for a breakpoint, decrement the PC if necessary on the lwp's
510 architecture. Returns true if we now have the LWP's stop PC. */
511 bool save_stop_reason (lwp_info *lwp);
512
513 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
514 SIGNAL is nonzero, give it that signal. */
515 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
516 siginfo_t *info);
517
518 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
519 disappears while we try to resume it. */
520 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
521
522 /* This function is called once per thread. We check the thread's
523 last resume request, which will tell us whether to resume, step, or
524 leave the thread stopped. Any signal the client requested to be
525 delivered has already been enqueued at this point.
526
527 If any thread that GDB wants running is stopped at an internal
528 breakpoint that needs stepping over, we start a step-over operation
529 on that particular thread, and leave all others stopped. */
530 void proceed_one_lwp (thread_info *thread, lwp_info *except);
531
532 /* This function is called once per thread. We check the thread's
533 resume request, which will tell us whether to resume, step, or
534 leave the thread stopped; and what signal, if any, it should be
535 sent.
536
537 For threads which we aren't explicitly told otherwise, we preserve
538 the stepping flag; this is used for stepping over gdbserver-placed
539 breakpoints.
540
541 If pending_flags was set in any thread, we queue any needed
542 signals, since we won't actually resume. We already have a pending
543 event to report, so we don't need to preserve any step requests;
544 they should be re-issued if necessary. */
545 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
546
547 /* Return true if this lwp has an interesting status pending. */
548 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
549
550 /* Resume LWPs that are currently stopped without any pending status
551 to report, but are resumed from the core's perspective. */
552 void resume_stopped_resumed_lwps (thread_info *thread);
553
554 /* Unsuspend THREAD, except EXCEPT, and proceed. */
555 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
556
557 /* Return true if this lwp still has an interesting status pending.
558 If not (e.g., it had stopped for a breakpoint that is gone), return
559 false. */
560 bool thread_still_has_status_pending (thread_info *thread);
561
562 /* Return true if this lwp is to-be-resumed and has an interesting
563 status pending. */
564 bool resume_status_pending (thread_info *thread);
565
566 /* Return true if this lwp that GDB wants running is stopped at an
567 internal breakpoint that we need to step over. It assumes that
568 any required STOP_PC adjustment has already been propagated to
569 the inferior's regcache. */
570 bool thread_needs_step_over (thread_info *thread);
571
572 /* Single step via hardware or software single step.
573 Return 1 if hardware single stepping, 0 if software single stepping
574 or can't single step. */
575 int single_step (lwp_info* lwp);
576
577 /* Install breakpoints for software single stepping. */
578 void install_software_single_step_breakpoints (lwp_info *lwp);
579
580 /* Fetch the possibly triggered data watchpoint info and store it in
581 CHILD.
582
583 On some archs, like x86, that use debug registers to set
584 watchpoints, it's possible that the way to know which watched
585 address trapped, is to check the register that is used to select
586 which address to watch. Problem is, between setting the watchpoint
587 and reading back which data address trapped, the user may change
588 the set of watchpoints, and, as a consequence, GDB changes the
589 debug registers in the inferior. To avoid reading back a stale
590 stopped-data-address when that happens, we cache in LP the fact
591 that a watchpoint trapped, and the corresponding data address, as
592 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
593 registers meanwhile, we have the cached data we can rely on. */
594 bool check_stopped_by_watchpoint (lwp_info *child);
595
596 /* Convert a native/host siginfo object, into/from the siginfo in the
597 layout of the inferiors' architecture. */
598 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo,
599 int direction);
600
601 /* Add a process to the common process list, and set its private
602 data. */
603 process_info *add_linux_process (int pid, int attached);
604
605 /* Add a new thread. */
606 lwp_info *add_lwp (ptid_t ptid);
607
608 /* Delete a thread. */
609 void delete_lwp (lwp_info *lwp);
610
611 public: /* Make this public because it's used from outside. */
612 /* Attach to an inferior process. Returns 0 on success, ERRNO on
613 error. */
614 int attach_lwp (ptid_t ptid);
615
616 private: /* Back to private. */
617 /* Detach from LWP. */
618 void detach_one_lwp (lwp_info *lwp);
619
620 /* Detect zombie thread group leaders, and "exit" them. We can't
621 reap their exits until all other threads in the group have
622 exited. */
623 void check_zombie_leaders ();
624
625 /* Convenience function that is called when the kernel reports an exit
626 event. This decides whether to report the event to GDB as a
627 process exit event, a thread exit event, or to suppress the
628 event. */
629 ptid_t filter_exit_event (lwp_info *event_child,
630 target_waitstatus *ourstatus);
631
632 protected:
633 /* The architecture-specific "low" methods are listed below. */
634
635 /* Architecture-specific setup for the current thread. */
636 virtual void low_arch_setup () = 0;
637
638 /* Return false if we can fetch/store the register, true if we cannot
639 fetch/store the register. */
640 virtual bool low_cannot_fetch_register (int regno) = 0;
641
642 virtual bool low_cannot_store_register (int regno) = 0;
643
644 /* Hook to fetch a register in some non-standard way. Used for
645 example by backends that have read-only registers with hardcoded
646 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
647 REGNO was supplied, false if not, and we should fallback to the
648 standard ptrace methods. */
649 virtual bool low_fetch_register (regcache *regcache, int regno);
650
651 /* Return true if breakpoints are supported. Such targets must
652 implement the GET_PC and SET_PC methods. */
653 virtual bool low_supports_breakpoints ();
654
655 virtual CORE_ADDR low_get_pc (regcache *regcache);
656
657 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
658
659 /* Find the next possible PCs after the current instruction executes.
660 Targets that override this method should also override
661 'supports_software_single_step' to return true. */
662 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
663
664 /* Return true if there is a breakpoint at PC. */
665 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
666
667 /* Breakpoint and watchpoint related functions. See target.h for
668 comments. */
669 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
670 int size, raw_breakpoint *bp);
671
672 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
673 int size, raw_breakpoint *bp);
674
675 virtual bool low_stopped_by_watchpoint ();
676
677 virtual CORE_ADDR low_stopped_data_address ();
678
679 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
680 for registers smaller than an xfer unit). */
681 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
682 char *buf);
683
684 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
685 const char *buf);
686
687 /* Hook to convert from target format to ptrace format and back.
688 Returns true if any conversion was done; false otherwise.
689 If DIRECTION is 1, then copy from INF to NATIVE.
690 If DIRECTION is 0, copy from NATIVE to INF. */
691 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
692 int direction);
693
694 /* Hook to call when a new process is created or attached to.
695 If extra per-process architecture-specific data is needed,
696 allocate it here. */
697 virtual arch_process_info *low_new_process ();
698
699 /* Hook to call when a process is being deleted. If extra per-process
700 architecture-specific data is needed, delete it here. */
701 virtual void low_delete_process (arch_process_info *info);
702
703 /* Hook to call when a new thread is detected.
704 If extra per-thread architecture-specific data is needed,
705 allocate it here. */
706 virtual void low_new_thread (lwp_info *);
707
708 /* Hook to call when a thread is being deleted. If extra per-thread
709 architecture-specific data is needed, delete it here. */
710 virtual void low_delete_thread (arch_lwp_info *);
711
712 /* Hook to call, if any, when a new fork is attached. */
713 virtual void low_new_fork (process_info *parent, process_info *child);
714
715 /* Hook to call prior to resuming a thread. */
716 virtual void low_prepare_to_resume (lwp_info *lwp);
717
718 /* How many bytes the PC should be decremented after a break. */
719 virtual int low_decr_pc_after_break ();
720 };
721
722 extern linux_process_target *the_linux_target;
723
724 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
725 #define get_lwp_thread(lwp) ((lwp)->thread)
726
727 /* This struct is recorded in the target_data field of struct thread_info.
728
729 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
730 GDB protocol representation of the thread ID. Threads also have
731 a "process ID" (poorly named) which is (presently) the same as the
732 LWP ID.
733
734 There is also ``all_processes'' is keyed by the "overall process ID",
735 which GNU/Linux calls tgid, "thread group ID". */
736
737 struct lwp_info
738 {
739 /* Backlink to the parent object. */
740 struct thread_info *thread;
741
742 /* If this flag is set, the next SIGSTOP will be ignored (the
743 process will be immediately resumed). This means that either we
744 sent the SIGSTOP to it ourselves and got some other pending event
745 (so the SIGSTOP is still pending), or that we stopped the
746 inferior implicitly via PTRACE_ATTACH and have not waited for it
747 yet. */
748 int stop_expected;
749
750 /* When this is true, we shall not try to resume this thread, even
751 if last_resume_kind isn't resume_stop. */
752 int suspended;
753
754 /* If this flag is set, the lwp is known to be stopped right now (stop
755 event already received in a wait()). */
756 int stopped;
757
758 /* Signal whether we are in a SYSCALL_ENTRY or
759 in a SYSCALL_RETURN event.
760 Values:
761 - TARGET_WAITKIND_SYSCALL_ENTRY
762 - TARGET_WAITKIND_SYSCALL_RETURN */
763 enum target_waitkind syscall_state;
764
765 /* When stopped is set, the last wait status recorded for this lwp. */
766 int last_status;
767
768 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
769 this LWP's last event, to pass to GDB without any further
770 processing. This is used to store extended ptrace event
771 information or exit status until it can be reported to GDB. */
772 struct target_waitstatus waitstatus;
773
774 /* A pointer to the fork child/parent relative. Valid only while
775 the parent fork event is not reported to higher layers. Used to
776 avoid wildcard vCont actions resuming a fork child before GDB is
777 notified about the parent's fork event. */
778 struct lwp_info *fork_relative;
779
780 /* When stopped is set, this is where the lwp last stopped, with
781 decr_pc_after_break already accounted for. If the LWP is
782 running, this is the address at which the lwp was resumed. */
783 CORE_ADDR stop_pc;
784
785 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
786 been reported. */
787 int status_pending_p;
788 int status_pending;
789
790 /* The reason the LWP last stopped, if we need to track it
791 (breakpoint, watchpoint, etc.) */
792 enum target_stop_reason stop_reason;
793
794 /* On architectures where it is possible to know the data address of
795 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
796 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
797 is true. */
798 CORE_ADDR stopped_data_address;
799
800 /* If this is non-zero, it is a breakpoint to be reinserted at our next
801 stop (SIGTRAP stops only). */
802 CORE_ADDR bp_reinsert;
803
804 /* If this flag is set, the last continue operation at the ptrace
805 level on this process was a single-step. */
806 int stepping;
807
808 /* Range to single step within. This is a copy of the step range
809 passed along the last resume request. See 'struct
810 thread_resume'. */
811 CORE_ADDR step_range_start; /* Inclusive */
812 CORE_ADDR step_range_end; /* Exclusive */
813
814 /* If this flag is set, we need to set the event request flags the
815 next time we see this LWP stop. */
816 int must_set_ptrace_flags;
817
818 /* If this is non-zero, it points to a chain of signals which need to
819 be delivered to this process. */
820 struct pending_signals *pending_signals;
821
822 /* A link used when resuming. It is initialized from the resume request,
823 and then processed and cleared in linux_resume_one_lwp. */
824 struct thread_resume *resume;
825
826 /* Information bout this lwp's fast tracepoint collection status (is it
827 currently stopped in the jump pad, and if so, before or at/after the
828 relocated instruction). Normally, we won't care about this, but we will
829 if a signal arrives to this lwp while it is collecting. */
830 fast_tpoint_collect_result collecting_fast_tracepoint;
831
832 /* If this is non-zero, it points to a chain of signals which need
833 to be reported to GDB. These were deferred because the thread
834 was doing a fast tracepoint collect when they arrived. */
835 struct pending_signals *pending_signals_to_report;
836
837 /* When collecting_fast_tracepoint is first found to be 1, we insert
838 a exit-jump-pad-quickly breakpoint. This is it. */
839 struct breakpoint *exit_jump_pad_bkpt;
840
841 #ifdef USE_THREAD_DB
842 int thread_known;
843 /* The thread handle, used for e.g. TLS access. Only valid if
844 THREAD_KNOWN is set. */
845 td_thrhandle_t th;
846
847 /* The pthread_t handle. */
848 thread_t thread_handle;
849 #endif
850
851 /* Arch-specific additions. */
852 struct arch_lwp_info *arch_private;
853 };
854
855 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
856
857 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
858 errno). */
859 int linux_attach_lwp (ptid_t ptid);
860
861 struct lwp_info *find_lwp_pid (ptid_t ptid);
862 /* For linux_stop_lwp see nat/linux-nat.h. */
863
864 #ifdef HAVE_LINUX_REGSETS
865 void initialize_regsets_info (struct regsets_info *regsets_info);
866 #endif
867
868 void initialize_low_arch (void);
869
870 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
871 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
872
873 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
874 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
875
876 /* From thread-db.c */
877 int thread_db_init (void);
878 void thread_db_detach (struct process_info *);
879 void thread_db_mourn (struct process_info *);
880 int thread_db_handle_monitor_command (char *);
881 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
882 CORE_ADDR load_module, CORE_ADDR *address);
883 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
884
885 /* Called from linux-low.c when a clone event is detected. Upon entry,
886 both the clone and the parent should be stopped. This function does
887 whatever is required have the clone under thread_db's control. */
888
889 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
890
891 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
892
893 extern int have_ptrace_getregset;
894
895 /* Search for the value with type MATCH in the auxv vector with
896 entries of length WORDSIZE bytes. If found, store the value in
897 *VALP and return 1. If not found or if there is an error, return
898 0. */
899
900 int linux_get_auxv (int wordsize, CORE_ADDR match,
901 CORE_ADDR *valp);
902
903 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
904 WORDSIZE. If no entry was found, return zero. */
905
906 CORE_ADDR linux_get_hwcap (int wordsize);
907
908 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
909 WORDSIZE. If no entry was found, return zero. */
910
911 CORE_ADDR linux_get_hwcap2 (int wordsize);
912
913 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.047832 seconds and 5 git commands to generate.