ca6cf3edc1cef5dfa956f600aa011e5195d871c8
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Hook to convert from target format to ptrace format and back.
135 Returns true if any conversion was done; false otherwise.
136 If DIRECTION is 1, then copy from INF to NATIVE.
137 If DIRECTION is 0, copy from NATIVE to INF. */
138 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
139
140 /* Hook to call when a new process is created or attached to.
141 If extra per-process architecture-specific data is needed,
142 allocate it here. */
143 struct arch_process_info * (*new_process) (void);
144
145 /* Hook to call when a process is being deleted. If extra per-process
146 architecture-specific data is needed, delete it here. */
147 void (*delete_process) (struct arch_process_info *info);
148
149 /* Hook to call when a new thread is detected.
150 If extra per-thread architecture-specific data is needed,
151 allocate it here. */
152 void (*new_thread) (struct lwp_info *);
153
154 /* Hook to call when a thread is being deleted. If extra per-thread
155 architecture-specific data is needed, delete it here. */
156 void (*delete_thread) (struct arch_lwp_info *);
157
158 /* Hook to call, if any, when a new fork is attached. */
159 void (*new_fork) (struct process_info *parent, struct process_info *child);
160
161 /* Hook to call prior to resuming a thread. */
162 void (*prepare_to_resume) (struct lwp_info *);
163
164 /* Hook to support target specific qSupported. */
165 void (*process_qsupported) (char **, int count);
166
167 /* Returns true if the low target supports tracepoints. */
168 int (*supports_tracepoints) (void);
169
170 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
171 success, -1 on failure. */
172 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
173
174 /* Install a fast tracepoint jump pad. See target.h for
175 comments. */
176 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
177 CORE_ADDR collector,
178 CORE_ADDR lockaddr,
179 ULONGEST orig_size,
180 CORE_ADDR *jump_entry,
181 CORE_ADDR *trampoline,
182 ULONGEST *trampoline_size,
183 unsigned char *jjump_pad_insn,
184 ULONGEST *jjump_pad_insn_size,
185 CORE_ADDR *adjusted_insn_addr,
186 CORE_ADDR *adjusted_insn_addr_end,
187 char *err);
188
189 /* Return the bytecode operations vector for the current inferior.
190 Returns NULL if bytecode compilation is not supported. */
191 struct emit_ops *(*emit_ops) (void);
192
193 /* Return the minimum length of an instruction that can be safely overwritten
194 for use as a fast tracepoint. */
195 int (*get_min_fast_tracepoint_insn_len) (void);
196
197 /* Returns true if the low target supports range stepping. */
198 int (*supports_range_stepping) (void);
199
200 /* See target.h. */
201 int (*supports_hardware_single_step) (void);
202
203 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
204 inferior is stopped due to SYSCALL_SIGTRAP. */
205 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
206
207 /* See target.h. */
208 int (*get_ipa_tdesc_idx) (void);
209 };
210
211 extern struct linux_target_ops the_low_target;
212
213 /* Target ops definitions for a Linux target. */
214
215 class linux_process_target : public process_stratum_target
216 {
217 public:
218
219 int create_inferior (const char *program,
220 const std::vector<char *> &program_args) override;
221
222 void post_create_inferior () override;
223
224 int attach (unsigned long pid) override;
225
226 int kill (process_info *proc) override;
227
228 int detach (process_info *proc) override;
229
230 void mourn (process_info *proc) override;
231
232 void join (int pid) override;
233
234 bool thread_alive (ptid_t pid) override;
235
236 void resume (thread_resume *resume_info, size_t n) override;
237
238 ptid_t wait (ptid_t ptid, target_waitstatus *status,
239 int options) override;
240
241 void fetch_registers (regcache *regcache, int regno) override;
242
243 void store_registers (regcache *regcache, int regno) override;
244
245 int prepare_to_access_memory () override;
246
247 void done_accessing_memory () override;
248
249 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
250 int len) override;
251
252 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
253 int len) override;
254
255 void look_up_symbols () override;
256
257 void request_interrupt () override;
258
259 bool supports_read_auxv () override;
260
261 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
262 unsigned int len) override;
263
264 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
265 int size, raw_breakpoint *bp) override;
266
267 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
268 int size, raw_breakpoint *bp) override;
269
270 bool stopped_by_sw_breakpoint () override;
271
272 bool supports_stopped_by_sw_breakpoint () override;
273
274 bool stopped_by_hw_breakpoint () override;
275
276 bool supports_stopped_by_hw_breakpoint () override;
277
278 bool supports_hardware_single_step () override;
279
280 bool stopped_by_watchpoint () override;
281
282 CORE_ADDR stopped_data_address () override;
283
284 bool supports_read_offsets () override;
285
286 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
287
288 bool supports_get_tls_address () override;
289
290 int get_tls_address (thread_info *thread, CORE_ADDR offset,
291 CORE_ADDR load_module, CORE_ADDR *address) override;
292
293 bool supports_qxfer_osdata () override;
294
295 int qxfer_osdata (const char *annex, unsigned char *readbuf,
296 unsigned const char *writebuf,
297 CORE_ADDR offset, int len) override;
298
299 bool supports_qxfer_siginfo () override;
300
301 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
302 unsigned const char *writebuf,
303 CORE_ADDR offset, int len) override;
304
305 bool supports_non_stop () override;
306
307 bool async (bool enable) override;
308
309 int start_non_stop (bool enable) override;
310
311 bool supports_multi_process () override;
312
313 bool supports_fork_events () override;
314
315 bool supports_vfork_events () override;
316
317 bool supports_exec_events () override;
318
319 void handle_new_gdb_connection () override;
320
321 int handle_monitor_command (char *mon) override;
322
323 int core_of_thread (ptid_t ptid) override;
324
325 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
326 bool supports_read_loadmap () override;
327
328 int read_loadmap (const char *annex, CORE_ADDR offset,
329 unsigned char *myaddr, unsigned int len) override;
330 #endif
331
332 void process_qsupported (char **features, int count) override;
333
334 bool supports_tracepoints () override;
335
336 CORE_ADDR read_pc (regcache *regcache) override;
337
338 void write_pc (regcache *regcache, CORE_ADDR pc) override;
339
340 bool supports_thread_stopped () override;
341
342 bool thread_stopped (thread_info *thread) override;
343
344 void pause_all (bool freeze) override;
345
346 void unpause_all (bool unfreeze) override;
347
348 void stabilize_threads () override;
349
350 bool supports_fast_tracepoints () override;
351
352 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
353 CORE_ADDR tpaddr,
354 CORE_ADDR collector,
355 CORE_ADDR lockaddr,
356 ULONGEST orig_size,
357 CORE_ADDR *jump_entry,
358 CORE_ADDR *trampoline,
359 ULONGEST *trampoline_size,
360 unsigned char *jjump_pad_insn,
361 ULONGEST *jjump_pad_insn_size,
362 CORE_ADDR *adjusted_insn_addr,
363 CORE_ADDR *adjusted_insn_addr_end,
364 char *err) override;
365
366 int get_min_fast_tracepoint_insn_len () override;
367
368 struct emit_ops *emit_ops () override;
369
370 bool supports_disable_randomization () override;
371
372 bool supports_qxfer_libraries_svr4 () override;
373
374 int qxfer_libraries_svr4 (const char *annex,
375 unsigned char *readbuf,
376 unsigned const char *writebuf,
377 CORE_ADDR offset, int len) override;
378
379 bool supports_agent () override;
380
381 #ifdef HAVE_LINUX_BTRACE
382 btrace_target_info *enable_btrace (ptid_t ptid,
383 const btrace_config *conf) override;
384
385 int disable_btrace (btrace_target_info *tinfo) override;
386
387 int read_btrace (btrace_target_info *tinfo, buffer *buf,
388 enum btrace_read_type type) override;
389
390 int read_btrace_conf (const btrace_target_info *tinfo,
391 buffer *buf) override;
392 #endif
393
394 bool supports_range_stepping () override;
395
396 bool supports_pid_to_exec_file () override;
397
398 char *pid_to_exec_file (int pid) override;
399
400 bool supports_multifs () override;
401
402 int multifs_open (int pid, const char *filename, int flags,
403 mode_t mode) override;
404
405 int multifs_unlink (int pid, const char *filename) override;
406
407 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
408 size_t bufsiz) override;
409
410 const char *thread_name (ptid_t thread) override;
411
412 #if USE_THREAD_DB
413 bool thread_handle (ptid_t ptid, gdb_byte **handle,
414 int *handle_len) override;
415 #endif
416
417 bool supports_catch_syscall () override;
418
419 int get_ipa_tdesc_idx () override;
420
421 /* Return the information to access registers. This has public
422 visibility because proc-service uses it. */
423 virtual const regs_info *get_regs_info () = 0;
424
425 private:
426
427 /* Handle a GNU/Linux extended wait response. If we see a clone,
428 fork, or vfork event, we need to add the new LWP to our list
429 (and return 0 so as not to report the trap to higher layers).
430 If we see an exec event, we will modify ORIG_EVENT_LWP to point
431 to a new LWP representing the new program. */
432 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
433
434 /* Do low-level handling of the event, and check if we should go on
435 and pass it to caller code. Return the affected lwp if we are, or
436 NULL otherwise. */
437 lwp_info *filter_event (int lwpid, int wstat);
438
439 /* Wait for an event from child(ren) WAIT_PTID, and return any that
440 match FILTER_PTID (leaving others pending). The PTIDs can be:
441 minus_one_ptid, to specify any child; a pid PTID, specifying all
442 lwps of a thread group; or a PTID representing a single lwp. Store
443 the stop status through the status pointer WSTAT. OPTIONS is
444 passed to the waitpid call. Return 0 if no event was found and
445 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
446 was found. Return the PID of the stopped child otherwise. */
447 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
448 int *wstatp, int options);
449
450 /* Wait for an event from child(ren) PTID. PTIDs can be:
451 minus_one_ptid, to specify any child; a pid PTID, specifying all
452 lwps of a thread group; or a PTID representing a single lwp. Store
453 the stop status through the status pointer WSTAT. OPTIONS is
454 passed to the waitpid call. Return 0 if no event was found and
455 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
456 was found. Return the PID of the stopped child otherwise. */
457 int wait_for_event (ptid_t ptid, int *wstatp, int options);
458
459 /* Wait for all children to stop for the SIGSTOPs we just queued. */
460 void wait_for_sigstop ();
461
462 /* Wait for process, returns status. */
463 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
464 int target_options);
465
466 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
467 If SUSPEND, then also increase the suspend count of every LWP,
468 except EXCEPT. */
469 void stop_all_lwps (int suspend, lwp_info *except);
470
471 /* Stopped LWPs that the client wanted to be running, that don't have
472 pending statuses, are set to run again, except for EXCEPT, if not
473 NULL. This undoes a stop_all_lwps call. */
474 void unstop_all_lwps (int unsuspend, lwp_info *except);
475
476 /* Start a step-over operation on LWP. When LWP stopped at a
477 breakpoint, to make progress, we need to remove the breakpoint out
478 of the way. If we let other threads run while we do that, they may
479 pass by the breakpoint location and miss hitting it. To avoid
480 that, a step-over momentarily stops all threads while LWP is
481 single-stepped by either hardware or software while the breakpoint
482 is temporarily uninserted from the inferior. When the single-step
483 finishes, we reinsert the breakpoint, and let all threads that are
484 supposed to be running, run again. */
485 void start_step_over (lwp_info *lwp);
486
487 /* If there's a step over in progress, wait until all threads stop
488 (that is, until the stepping thread finishes its step), and
489 unsuspend all lwps. The stepping thread ends with its status
490 pending, which is processed later when we get back to processing
491 events. */
492 void complete_ongoing_step_over ();
493
494 /* When we finish a step-over, set threads running again. If there's
495 another thread that may need a step-over, now's the time to start
496 it. Eventually, we'll move all threads past their breakpoints. */
497 void proceed_all_lwps ();
498
499 /* The reason we resume in the caller, is because we want to be able
500 to pass lwp->status_pending as WSTAT, and we need to clear
501 status_pending_p before resuming, otherwise, resume_one_lwp
502 refuses to resume. */
503 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
504
505 /* Move THREAD out of the jump pad. */
506 void move_out_of_jump_pad (thread_info *thread);
507
508 /* Call low_arch_setup on THREAD. */
509 void arch_setup_thread (thread_info *thread);
510
511 #ifdef HAVE_LINUX_USRREGS
512 /* Fetch one register. */
513 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
514 int regno);
515
516 /* Store one register. */
517 void store_register (const usrregs_info *usrregs, regcache *regcache,
518 int regno);
519 #endif
520
521 /* Fetch all registers, or just one, from the child process.
522 If REGNO is -1, do this for all registers, skipping any that are
523 assumed to have been retrieved by regsets_fetch_inferior_registers,
524 unless ALL is non-zero.
525 Otherwise, REGNO specifies which register (so we can save time). */
526 void usr_fetch_inferior_registers (const regs_info *regs_info,
527 regcache *regcache, int regno, int all);
528
529 /* Store our register values back into the inferior.
530 If REGNO is -1, do this for all registers, skipping any that are
531 assumed to have been saved by regsets_store_inferior_registers,
532 unless ALL is non-zero.
533 Otherwise, REGNO specifies which register (so we can save time). */
534 void usr_store_inferior_registers (const regs_info *regs_info,
535 regcache *regcache, int regno, int all);
536
537 /* Return the PC as read from the regcache of LWP, without any
538 adjustment. */
539 CORE_ADDR get_pc (lwp_info *lwp);
540
541 /* Called when the LWP stopped for a signal/trap. If it stopped for a
542 trap check what caused it (breakpoint, watchpoint, trace, etc.),
543 and save the result in the LWP's stop_reason field. If it stopped
544 for a breakpoint, decrement the PC if necessary on the lwp's
545 architecture. Returns true if we now have the LWP's stop PC. */
546 bool save_stop_reason (lwp_info *lwp);
547
548 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
549 SIGNAL is nonzero, give it that signal. */
550 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
551 siginfo_t *info);
552
553 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
554 disappears while we try to resume it. */
555 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
556
557 /* This function is called once per thread. We check the thread's
558 last resume request, which will tell us whether to resume, step, or
559 leave the thread stopped. Any signal the client requested to be
560 delivered has already been enqueued at this point.
561
562 If any thread that GDB wants running is stopped at an internal
563 breakpoint that needs stepping over, we start a step-over operation
564 on that particular thread, and leave all others stopped. */
565 void proceed_one_lwp (thread_info *thread, lwp_info *except);
566
567 /* This function is called once per thread. We check the thread's
568 resume request, which will tell us whether to resume, step, or
569 leave the thread stopped; and what signal, if any, it should be
570 sent.
571
572 For threads which we aren't explicitly told otherwise, we preserve
573 the stepping flag; this is used for stepping over gdbserver-placed
574 breakpoints.
575
576 If pending_flags was set in any thread, we queue any needed
577 signals, since we won't actually resume. We already have a pending
578 event to report, so we don't need to preserve any step requests;
579 they should be re-issued if necessary. */
580 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
581
582 /* Return true if this lwp has an interesting status pending. */
583 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
584
585 /* Resume LWPs that are currently stopped without any pending status
586 to report, but are resumed from the core's perspective. */
587 void resume_stopped_resumed_lwps (thread_info *thread);
588
589 /* Unsuspend THREAD, except EXCEPT, and proceed. */
590 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
591
592 /* Return true if this lwp still has an interesting status pending.
593 If not (e.g., it had stopped for a breakpoint that is gone), return
594 false. */
595 bool thread_still_has_status_pending (thread_info *thread);
596
597 /* Return true if this lwp is to-be-resumed and has an interesting
598 status pending. */
599 bool resume_status_pending (thread_info *thread);
600
601 /* Return true if this lwp that GDB wants running is stopped at an
602 internal breakpoint that we need to step over. It assumes that
603 any required STOP_PC adjustment has already been propagated to
604 the inferior's regcache. */
605 bool thread_needs_step_over (thread_info *thread);
606
607 /* Single step via hardware or software single step.
608 Return 1 if hardware single stepping, 0 if software single stepping
609 or can't single step. */
610 int single_step (lwp_info* lwp);
611
612 /* Install breakpoints for software single stepping. */
613 void install_software_single_step_breakpoints (lwp_info *lwp);
614
615 /* Fetch the possibly triggered data watchpoint info and store it in
616 CHILD.
617
618 On some archs, like x86, that use debug registers to set
619 watchpoints, it's possible that the way to know which watched
620 address trapped, is to check the register that is used to select
621 which address to watch. Problem is, between setting the watchpoint
622 and reading back which data address trapped, the user may change
623 the set of watchpoints, and, as a consequence, GDB changes the
624 debug registers in the inferior. To avoid reading back a stale
625 stopped-data-address when that happens, we cache in LP the fact
626 that a watchpoint trapped, and the corresponding data address, as
627 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
628 registers meanwhile, we have the cached data we can rely on. */
629 bool check_stopped_by_watchpoint (lwp_info *child);
630
631 protected:
632 /* The architecture-specific "low" methods are listed below. */
633
634 /* Architecture-specific setup for the current thread. */
635 virtual void low_arch_setup () = 0;
636
637 /* Return false if we can fetch/store the register, true if we cannot
638 fetch/store the register. */
639 virtual bool low_cannot_fetch_register (int regno) = 0;
640
641 virtual bool low_cannot_store_register (int regno) = 0;
642
643 /* Hook to fetch a register in some non-standard way. Used for
644 example by backends that have read-only registers with hardcoded
645 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
646 REGNO was supplied, false if not, and we should fallback to the
647 standard ptrace methods. */
648 virtual bool low_fetch_register (regcache *regcache, int regno);
649
650 /* Return true if breakpoints are supported. Such targets must
651 implement the GET_PC and SET_PC methods. */
652 virtual bool low_supports_breakpoints ();
653
654 virtual CORE_ADDR low_get_pc (regcache *regcache);
655
656 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
657
658 /* Find the next possible PCs after the current instruction executes.
659 Targets that override this method should also override
660 'supports_software_single_step' to return true. */
661 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
662
663 /* Return true if there is a breakpoint at PC. */
664 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
665
666 /* Breakpoint and watchpoint related functions. See target.h for
667 comments. */
668 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
669 int size, raw_breakpoint *bp);
670
671 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
672 int size, raw_breakpoint *bp);
673
674 virtual bool low_stopped_by_watchpoint ();
675
676 virtual CORE_ADDR low_stopped_data_address ();
677
678 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
679 for registers smaller than an xfer unit). */
680 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
681 char *buf);
682
683 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
684 const char *buf);
685
686 /* How many bytes the PC should be decremented after a break. */
687 virtual int low_decr_pc_after_break ();
688 };
689
690 extern linux_process_target *the_linux_target;
691
692 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
693 #define get_lwp_thread(lwp) ((lwp)->thread)
694
695 /* This struct is recorded in the target_data field of struct thread_info.
696
697 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
698 GDB protocol representation of the thread ID. Threads also have
699 a "process ID" (poorly named) which is (presently) the same as the
700 LWP ID.
701
702 There is also ``all_processes'' is keyed by the "overall process ID",
703 which GNU/Linux calls tgid, "thread group ID". */
704
705 struct lwp_info
706 {
707 /* Backlink to the parent object. */
708 struct thread_info *thread;
709
710 /* If this flag is set, the next SIGSTOP will be ignored (the
711 process will be immediately resumed). This means that either we
712 sent the SIGSTOP to it ourselves and got some other pending event
713 (so the SIGSTOP is still pending), or that we stopped the
714 inferior implicitly via PTRACE_ATTACH and have not waited for it
715 yet. */
716 int stop_expected;
717
718 /* When this is true, we shall not try to resume this thread, even
719 if last_resume_kind isn't resume_stop. */
720 int suspended;
721
722 /* If this flag is set, the lwp is known to be stopped right now (stop
723 event already received in a wait()). */
724 int stopped;
725
726 /* Signal whether we are in a SYSCALL_ENTRY or
727 in a SYSCALL_RETURN event.
728 Values:
729 - TARGET_WAITKIND_SYSCALL_ENTRY
730 - TARGET_WAITKIND_SYSCALL_RETURN */
731 enum target_waitkind syscall_state;
732
733 /* When stopped is set, the last wait status recorded for this lwp. */
734 int last_status;
735
736 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
737 this LWP's last event, to pass to GDB without any further
738 processing. This is used to store extended ptrace event
739 information or exit status until it can be reported to GDB. */
740 struct target_waitstatus waitstatus;
741
742 /* A pointer to the fork child/parent relative. Valid only while
743 the parent fork event is not reported to higher layers. Used to
744 avoid wildcard vCont actions resuming a fork child before GDB is
745 notified about the parent's fork event. */
746 struct lwp_info *fork_relative;
747
748 /* When stopped is set, this is where the lwp last stopped, with
749 decr_pc_after_break already accounted for. If the LWP is
750 running, this is the address at which the lwp was resumed. */
751 CORE_ADDR stop_pc;
752
753 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
754 been reported. */
755 int status_pending_p;
756 int status_pending;
757
758 /* The reason the LWP last stopped, if we need to track it
759 (breakpoint, watchpoint, etc.) */
760 enum target_stop_reason stop_reason;
761
762 /* On architectures where it is possible to know the data address of
763 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
764 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
765 is true. */
766 CORE_ADDR stopped_data_address;
767
768 /* If this is non-zero, it is a breakpoint to be reinserted at our next
769 stop (SIGTRAP stops only). */
770 CORE_ADDR bp_reinsert;
771
772 /* If this flag is set, the last continue operation at the ptrace
773 level on this process was a single-step. */
774 int stepping;
775
776 /* Range to single step within. This is a copy of the step range
777 passed along the last resume request. See 'struct
778 thread_resume'. */
779 CORE_ADDR step_range_start; /* Inclusive */
780 CORE_ADDR step_range_end; /* Exclusive */
781
782 /* If this flag is set, we need to set the event request flags the
783 next time we see this LWP stop. */
784 int must_set_ptrace_flags;
785
786 /* If this is non-zero, it points to a chain of signals which need to
787 be delivered to this process. */
788 struct pending_signals *pending_signals;
789
790 /* A link used when resuming. It is initialized from the resume request,
791 and then processed and cleared in linux_resume_one_lwp. */
792 struct thread_resume *resume;
793
794 /* Information bout this lwp's fast tracepoint collection status (is it
795 currently stopped in the jump pad, and if so, before or at/after the
796 relocated instruction). Normally, we won't care about this, but we will
797 if a signal arrives to this lwp while it is collecting. */
798 fast_tpoint_collect_result collecting_fast_tracepoint;
799
800 /* If this is non-zero, it points to a chain of signals which need
801 to be reported to GDB. These were deferred because the thread
802 was doing a fast tracepoint collect when they arrived. */
803 struct pending_signals *pending_signals_to_report;
804
805 /* When collecting_fast_tracepoint is first found to be 1, we insert
806 a exit-jump-pad-quickly breakpoint. This is it. */
807 struct breakpoint *exit_jump_pad_bkpt;
808
809 #ifdef USE_THREAD_DB
810 int thread_known;
811 /* The thread handle, used for e.g. TLS access. Only valid if
812 THREAD_KNOWN is set. */
813 td_thrhandle_t th;
814
815 /* The pthread_t handle. */
816 thread_t thread_handle;
817 #endif
818
819 /* Arch-specific additions. */
820 struct arch_lwp_info *arch_private;
821 };
822
823 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
824
825 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
826 errno). */
827 int linux_attach_lwp (ptid_t ptid);
828
829 struct lwp_info *find_lwp_pid (ptid_t ptid);
830 /* For linux_stop_lwp see nat/linux-nat.h. */
831
832 #ifdef HAVE_LINUX_REGSETS
833 void initialize_regsets_info (struct regsets_info *regsets_info);
834 #endif
835
836 void initialize_low_arch (void);
837
838 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
839 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
840
841 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
842 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
843
844 /* From thread-db.c */
845 int thread_db_init (void);
846 void thread_db_detach (struct process_info *);
847 void thread_db_mourn (struct process_info *);
848 int thread_db_handle_monitor_command (char *);
849 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
850 CORE_ADDR load_module, CORE_ADDR *address);
851 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
852
853 /* Called from linux-low.c when a clone event is detected. Upon entry,
854 both the clone and the parent should be stopped. This function does
855 whatever is required have the clone under thread_db's control. */
856
857 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
858
859 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
860
861 extern int have_ptrace_getregset;
862
863 /* Search for the value with type MATCH in the auxv vector with
864 entries of length WORDSIZE bytes. If found, store the value in
865 *VALP and return 1. If not found or if there is an error, return
866 0. */
867
868 int linux_get_auxv (int wordsize, CORE_ADDR match,
869 CORE_ADDR *valp);
870
871 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
872 WORDSIZE. If no entry was found, return zero. */
873
874 CORE_ADDR linux_get_hwcap (int wordsize);
875
876 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
877 WORDSIZE. If no entry was found, return zero. */
878
879 CORE_ADDR linux_get_hwcap2 (int wordsize);
880
881 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.048134 seconds and 3 git commands to generate.