gdbserver/linux-low: turn 'prepare_to_resume' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Hook to support target specific qSupported. */
135 void (*process_qsupported) (char **, int count);
136
137 /* Returns true if the low target supports tracepoints. */
138 int (*supports_tracepoints) (void);
139
140 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
141 success, -1 on failure. */
142 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
143
144 /* Install a fast tracepoint jump pad. See target.h for
145 comments. */
146 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
147 CORE_ADDR collector,
148 CORE_ADDR lockaddr,
149 ULONGEST orig_size,
150 CORE_ADDR *jump_entry,
151 CORE_ADDR *trampoline,
152 ULONGEST *trampoline_size,
153 unsigned char *jjump_pad_insn,
154 ULONGEST *jjump_pad_insn_size,
155 CORE_ADDR *adjusted_insn_addr,
156 CORE_ADDR *adjusted_insn_addr_end,
157 char *err);
158
159 /* Return the bytecode operations vector for the current inferior.
160 Returns NULL if bytecode compilation is not supported. */
161 struct emit_ops *(*emit_ops) (void);
162
163 /* Return the minimum length of an instruction that can be safely overwritten
164 for use as a fast tracepoint. */
165 int (*get_min_fast_tracepoint_insn_len) (void);
166
167 /* Returns true if the low target supports range stepping. */
168 int (*supports_range_stepping) (void);
169
170 /* See target.h. */
171 int (*supports_hardware_single_step) (void);
172
173 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
174 inferior is stopped due to SYSCALL_SIGTRAP. */
175 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
176
177 /* See target.h. */
178 int (*get_ipa_tdesc_idx) (void);
179 };
180
181 extern struct linux_target_ops the_low_target;
182
183 /* Target ops definitions for a Linux target. */
184
185 class linux_process_target : public process_stratum_target
186 {
187 public:
188
189 int create_inferior (const char *program,
190 const std::vector<char *> &program_args) override;
191
192 void post_create_inferior () override;
193
194 int attach (unsigned long pid) override;
195
196 int kill (process_info *proc) override;
197
198 int detach (process_info *proc) override;
199
200 void mourn (process_info *proc) override;
201
202 void join (int pid) override;
203
204 bool thread_alive (ptid_t pid) override;
205
206 void resume (thread_resume *resume_info, size_t n) override;
207
208 ptid_t wait (ptid_t ptid, target_waitstatus *status,
209 int options) override;
210
211 void fetch_registers (regcache *regcache, int regno) override;
212
213 void store_registers (regcache *regcache, int regno) override;
214
215 int prepare_to_access_memory () override;
216
217 void done_accessing_memory () override;
218
219 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
220 int len) override;
221
222 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
223 int len) override;
224
225 void look_up_symbols () override;
226
227 void request_interrupt () override;
228
229 bool supports_read_auxv () override;
230
231 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
232 unsigned int len) override;
233
234 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
235 int size, raw_breakpoint *bp) override;
236
237 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
238 int size, raw_breakpoint *bp) override;
239
240 bool stopped_by_sw_breakpoint () override;
241
242 bool supports_stopped_by_sw_breakpoint () override;
243
244 bool stopped_by_hw_breakpoint () override;
245
246 bool supports_stopped_by_hw_breakpoint () override;
247
248 bool supports_hardware_single_step () override;
249
250 bool stopped_by_watchpoint () override;
251
252 CORE_ADDR stopped_data_address () override;
253
254 bool supports_read_offsets () override;
255
256 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
257
258 bool supports_get_tls_address () override;
259
260 int get_tls_address (thread_info *thread, CORE_ADDR offset,
261 CORE_ADDR load_module, CORE_ADDR *address) override;
262
263 bool supports_qxfer_osdata () override;
264
265 int qxfer_osdata (const char *annex, unsigned char *readbuf,
266 unsigned const char *writebuf,
267 CORE_ADDR offset, int len) override;
268
269 bool supports_qxfer_siginfo () override;
270
271 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
272 unsigned const char *writebuf,
273 CORE_ADDR offset, int len) override;
274
275 bool supports_non_stop () override;
276
277 bool async (bool enable) override;
278
279 int start_non_stop (bool enable) override;
280
281 bool supports_multi_process () override;
282
283 bool supports_fork_events () override;
284
285 bool supports_vfork_events () override;
286
287 bool supports_exec_events () override;
288
289 void handle_new_gdb_connection () override;
290
291 int handle_monitor_command (char *mon) override;
292
293 int core_of_thread (ptid_t ptid) override;
294
295 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
296 bool supports_read_loadmap () override;
297
298 int read_loadmap (const char *annex, CORE_ADDR offset,
299 unsigned char *myaddr, unsigned int len) override;
300 #endif
301
302 void process_qsupported (char **features, int count) override;
303
304 bool supports_tracepoints () override;
305
306 CORE_ADDR read_pc (regcache *regcache) override;
307
308 void write_pc (regcache *regcache, CORE_ADDR pc) override;
309
310 bool supports_thread_stopped () override;
311
312 bool thread_stopped (thread_info *thread) override;
313
314 void pause_all (bool freeze) override;
315
316 void unpause_all (bool unfreeze) override;
317
318 void stabilize_threads () override;
319
320 bool supports_fast_tracepoints () override;
321
322 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
323 CORE_ADDR tpaddr,
324 CORE_ADDR collector,
325 CORE_ADDR lockaddr,
326 ULONGEST orig_size,
327 CORE_ADDR *jump_entry,
328 CORE_ADDR *trampoline,
329 ULONGEST *trampoline_size,
330 unsigned char *jjump_pad_insn,
331 ULONGEST *jjump_pad_insn_size,
332 CORE_ADDR *adjusted_insn_addr,
333 CORE_ADDR *adjusted_insn_addr_end,
334 char *err) override;
335
336 int get_min_fast_tracepoint_insn_len () override;
337
338 struct emit_ops *emit_ops () override;
339
340 bool supports_disable_randomization () override;
341
342 bool supports_qxfer_libraries_svr4 () override;
343
344 int qxfer_libraries_svr4 (const char *annex,
345 unsigned char *readbuf,
346 unsigned const char *writebuf,
347 CORE_ADDR offset, int len) override;
348
349 bool supports_agent () override;
350
351 #ifdef HAVE_LINUX_BTRACE
352 btrace_target_info *enable_btrace (ptid_t ptid,
353 const btrace_config *conf) override;
354
355 int disable_btrace (btrace_target_info *tinfo) override;
356
357 int read_btrace (btrace_target_info *tinfo, buffer *buf,
358 enum btrace_read_type type) override;
359
360 int read_btrace_conf (const btrace_target_info *tinfo,
361 buffer *buf) override;
362 #endif
363
364 bool supports_range_stepping () override;
365
366 bool supports_pid_to_exec_file () override;
367
368 char *pid_to_exec_file (int pid) override;
369
370 bool supports_multifs () override;
371
372 int multifs_open (int pid, const char *filename, int flags,
373 mode_t mode) override;
374
375 int multifs_unlink (int pid, const char *filename) override;
376
377 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
378 size_t bufsiz) override;
379
380 const char *thread_name (ptid_t thread) override;
381
382 #if USE_THREAD_DB
383 bool thread_handle (ptid_t ptid, gdb_byte **handle,
384 int *handle_len) override;
385 #endif
386
387 bool supports_catch_syscall () override;
388
389 int get_ipa_tdesc_idx () override;
390
391 /* Return the information to access registers. This has public
392 visibility because proc-service uses it. */
393 virtual const regs_info *get_regs_info () = 0;
394
395 private:
396
397 /* Handle a GNU/Linux extended wait response. If we see a clone,
398 fork, or vfork event, we need to add the new LWP to our list
399 (and return 0 so as not to report the trap to higher layers).
400 If we see an exec event, we will modify ORIG_EVENT_LWP to point
401 to a new LWP representing the new program. */
402 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
403
404 /* Do low-level handling of the event, and check if we should go on
405 and pass it to caller code. Return the affected lwp if we are, or
406 NULL otherwise. */
407 lwp_info *filter_event (int lwpid, int wstat);
408
409 /* Wait for an event from child(ren) WAIT_PTID, and return any that
410 match FILTER_PTID (leaving others pending). The PTIDs can be:
411 minus_one_ptid, to specify any child; a pid PTID, specifying all
412 lwps of a thread group; or a PTID representing a single lwp. Store
413 the stop status through the status pointer WSTAT. OPTIONS is
414 passed to the waitpid call. Return 0 if no event was found and
415 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
416 was found. Return the PID of the stopped child otherwise. */
417 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
418 int *wstatp, int options);
419
420 /* Wait for an event from child(ren) PTID. PTIDs can be:
421 minus_one_ptid, to specify any child; a pid PTID, specifying all
422 lwps of a thread group; or a PTID representing a single lwp. Store
423 the stop status through the status pointer WSTAT. OPTIONS is
424 passed to the waitpid call. Return 0 if no event was found and
425 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
426 was found. Return the PID of the stopped child otherwise. */
427 int wait_for_event (ptid_t ptid, int *wstatp, int options);
428
429 /* Wait for all children to stop for the SIGSTOPs we just queued. */
430 void wait_for_sigstop ();
431
432 /* Wait for process, returns status. */
433 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
434 int target_options);
435
436 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
437 If SUSPEND, then also increase the suspend count of every LWP,
438 except EXCEPT. */
439 void stop_all_lwps (int suspend, lwp_info *except);
440
441 /* Stopped LWPs that the client wanted to be running, that don't have
442 pending statuses, are set to run again, except for EXCEPT, if not
443 NULL. This undoes a stop_all_lwps call. */
444 void unstop_all_lwps (int unsuspend, lwp_info *except);
445
446 /* Start a step-over operation on LWP. When LWP stopped at a
447 breakpoint, to make progress, we need to remove the breakpoint out
448 of the way. If we let other threads run while we do that, they may
449 pass by the breakpoint location and miss hitting it. To avoid
450 that, a step-over momentarily stops all threads while LWP is
451 single-stepped by either hardware or software while the breakpoint
452 is temporarily uninserted from the inferior. When the single-step
453 finishes, we reinsert the breakpoint, and let all threads that are
454 supposed to be running, run again. */
455 void start_step_over (lwp_info *lwp);
456
457 /* If there's a step over in progress, wait until all threads stop
458 (that is, until the stepping thread finishes its step), and
459 unsuspend all lwps. The stepping thread ends with its status
460 pending, which is processed later when we get back to processing
461 events. */
462 void complete_ongoing_step_over ();
463
464 /* When we finish a step-over, set threads running again. If there's
465 another thread that may need a step-over, now's the time to start
466 it. Eventually, we'll move all threads past their breakpoints. */
467 void proceed_all_lwps ();
468
469 /* The reason we resume in the caller, is because we want to be able
470 to pass lwp->status_pending as WSTAT, and we need to clear
471 status_pending_p before resuming, otherwise, resume_one_lwp
472 refuses to resume. */
473 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
474
475 /* Move THREAD out of the jump pad. */
476 void move_out_of_jump_pad (thread_info *thread);
477
478 /* Call low_arch_setup on THREAD. */
479 void arch_setup_thread (thread_info *thread);
480
481 #ifdef HAVE_LINUX_USRREGS
482 /* Fetch one register. */
483 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
484 int regno);
485
486 /* Store one register. */
487 void store_register (const usrregs_info *usrregs, regcache *regcache,
488 int regno);
489 #endif
490
491 /* Fetch all registers, or just one, from the child process.
492 If REGNO is -1, do this for all registers, skipping any that are
493 assumed to have been retrieved by regsets_fetch_inferior_registers,
494 unless ALL is non-zero.
495 Otherwise, REGNO specifies which register (so we can save time). */
496 void usr_fetch_inferior_registers (const regs_info *regs_info,
497 regcache *regcache, int regno, int all);
498
499 /* Store our register values back into the inferior.
500 If REGNO is -1, do this for all registers, skipping any that are
501 assumed to have been saved by regsets_store_inferior_registers,
502 unless ALL is non-zero.
503 Otherwise, REGNO specifies which register (so we can save time). */
504 void usr_store_inferior_registers (const regs_info *regs_info,
505 regcache *regcache, int regno, int all);
506
507 /* Return the PC as read from the regcache of LWP, without any
508 adjustment. */
509 CORE_ADDR get_pc (lwp_info *lwp);
510
511 /* Called when the LWP stopped for a signal/trap. If it stopped for a
512 trap check what caused it (breakpoint, watchpoint, trace, etc.),
513 and save the result in the LWP's stop_reason field. If it stopped
514 for a breakpoint, decrement the PC if necessary on the lwp's
515 architecture. Returns true if we now have the LWP's stop PC. */
516 bool save_stop_reason (lwp_info *lwp);
517
518 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
519 SIGNAL is nonzero, give it that signal. */
520 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
521 siginfo_t *info);
522
523 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
524 disappears while we try to resume it. */
525 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
526
527 /* This function is called once per thread. We check the thread's
528 last resume request, which will tell us whether to resume, step, or
529 leave the thread stopped. Any signal the client requested to be
530 delivered has already been enqueued at this point.
531
532 If any thread that GDB wants running is stopped at an internal
533 breakpoint that needs stepping over, we start a step-over operation
534 on that particular thread, and leave all others stopped. */
535 void proceed_one_lwp (thread_info *thread, lwp_info *except);
536
537 /* This function is called once per thread. We check the thread's
538 resume request, which will tell us whether to resume, step, or
539 leave the thread stopped; and what signal, if any, it should be
540 sent.
541
542 For threads which we aren't explicitly told otherwise, we preserve
543 the stepping flag; this is used for stepping over gdbserver-placed
544 breakpoints.
545
546 If pending_flags was set in any thread, we queue any needed
547 signals, since we won't actually resume. We already have a pending
548 event to report, so we don't need to preserve any step requests;
549 they should be re-issued if necessary. */
550 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
551
552 /* Return true if this lwp has an interesting status pending. */
553 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
554
555 /* Resume LWPs that are currently stopped without any pending status
556 to report, but are resumed from the core's perspective. */
557 void resume_stopped_resumed_lwps (thread_info *thread);
558
559 /* Unsuspend THREAD, except EXCEPT, and proceed. */
560 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
561
562 /* Return true if this lwp still has an interesting status pending.
563 If not (e.g., it had stopped for a breakpoint that is gone), return
564 false. */
565 bool thread_still_has_status_pending (thread_info *thread);
566
567 /* Return true if this lwp is to-be-resumed and has an interesting
568 status pending. */
569 bool resume_status_pending (thread_info *thread);
570
571 /* Return true if this lwp that GDB wants running is stopped at an
572 internal breakpoint that we need to step over. It assumes that
573 any required STOP_PC adjustment has already been propagated to
574 the inferior's regcache. */
575 bool thread_needs_step_over (thread_info *thread);
576
577 /* Single step via hardware or software single step.
578 Return 1 if hardware single stepping, 0 if software single stepping
579 or can't single step. */
580 int single_step (lwp_info* lwp);
581
582 /* Install breakpoints for software single stepping. */
583 void install_software_single_step_breakpoints (lwp_info *lwp);
584
585 /* Fetch the possibly triggered data watchpoint info and store it in
586 CHILD.
587
588 On some archs, like x86, that use debug registers to set
589 watchpoints, it's possible that the way to know which watched
590 address trapped, is to check the register that is used to select
591 which address to watch. Problem is, between setting the watchpoint
592 and reading back which data address trapped, the user may change
593 the set of watchpoints, and, as a consequence, GDB changes the
594 debug registers in the inferior. To avoid reading back a stale
595 stopped-data-address when that happens, we cache in LP the fact
596 that a watchpoint trapped, and the corresponding data address, as
597 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
598 registers meanwhile, we have the cached data we can rely on. */
599 bool check_stopped_by_watchpoint (lwp_info *child);
600
601 /* Convert a native/host siginfo object, into/from the siginfo in the
602 layout of the inferiors' architecture. */
603 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo,
604 int direction);
605
606 /* Add a process to the common process list, and set its private
607 data. */
608 process_info *add_linux_process (int pid, int attached);
609
610 /* Add a new thread. */
611 lwp_info *add_lwp (ptid_t ptid);
612
613 /* Delete a thread. */
614 void delete_lwp (lwp_info *lwp);
615
616 public: /* Make this public because it's used from outside. */
617 /* Attach to an inferior process. Returns 0 on success, ERRNO on
618 error. */
619 int attach_lwp (ptid_t ptid);
620
621 private: /* Back to private. */
622 /* Detach from LWP. */
623 void detach_one_lwp (lwp_info *lwp);
624
625 /* Detect zombie thread group leaders, and "exit" them. We can't
626 reap their exits until all other threads in the group have
627 exited. */
628 void check_zombie_leaders ();
629
630 /* Convenience function that is called when the kernel reports an exit
631 event. This decides whether to report the event to GDB as a
632 process exit event, a thread exit event, or to suppress the
633 event. */
634 ptid_t filter_exit_event (lwp_info *event_child,
635 target_waitstatus *ourstatus);
636
637 protected:
638 /* The architecture-specific "low" methods are listed below. */
639
640 /* Architecture-specific setup for the current thread. */
641 virtual void low_arch_setup () = 0;
642
643 /* Return false if we can fetch/store the register, true if we cannot
644 fetch/store the register. */
645 virtual bool low_cannot_fetch_register (int regno) = 0;
646
647 virtual bool low_cannot_store_register (int regno) = 0;
648
649 /* Hook to fetch a register in some non-standard way. Used for
650 example by backends that have read-only registers with hardcoded
651 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
652 REGNO was supplied, false if not, and we should fallback to the
653 standard ptrace methods. */
654 virtual bool low_fetch_register (regcache *regcache, int regno);
655
656 /* Return true if breakpoints are supported. Such targets must
657 implement the GET_PC and SET_PC methods. */
658 virtual bool low_supports_breakpoints ();
659
660 virtual CORE_ADDR low_get_pc (regcache *regcache);
661
662 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
663
664 /* Find the next possible PCs after the current instruction executes.
665 Targets that override this method should also override
666 'supports_software_single_step' to return true. */
667 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
668
669 /* Return true if there is a breakpoint at PC. */
670 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
671
672 /* Breakpoint and watchpoint related functions. See target.h for
673 comments. */
674 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
675 int size, raw_breakpoint *bp);
676
677 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
678 int size, raw_breakpoint *bp);
679
680 virtual bool low_stopped_by_watchpoint ();
681
682 virtual CORE_ADDR low_stopped_data_address ();
683
684 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
685 for registers smaller than an xfer unit). */
686 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
687 char *buf);
688
689 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
690 const char *buf);
691
692 /* Hook to convert from target format to ptrace format and back.
693 Returns true if any conversion was done; false otherwise.
694 If DIRECTION is 1, then copy from INF to NATIVE.
695 If DIRECTION is 0, copy from NATIVE to INF. */
696 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
697 int direction);
698
699 /* Hook to call when a new process is created or attached to.
700 If extra per-process architecture-specific data is needed,
701 allocate it here. */
702 virtual arch_process_info *low_new_process ();
703
704 /* Hook to call when a process is being deleted. If extra per-process
705 architecture-specific data is needed, delete it here. */
706 virtual void low_delete_process (arch_process_info *info);
707
708 /* Hook to call when a new thread is detected.
709 If extra per-thread architecture-specific data is needed,
710 allocate it here. */
711 virtual void low_new_thread (lwp_info *);
712
713 /* Hook to call when a thread is being deleted. If extra per-thread
714 architecture-specific data is needed, delete it here. */
715 virtual void low_delete_thread (arch_lwp_info *);
716
717 /* Hook to call, if any, when a new fork is attached. */
718 virtual void low_new_fork (process_info *parent, process_info *child);
719
720 /* Hook to call prior to resuming a thread. */
721 virtual void low_prepare_to_resume (lwp_info *lwp);
722
723 /* How many bytes the PC should be decremented after a break. */
724 virtual int low_decr_pc_after_break ();
725 };
726
727 extern linux_process_target *the_linux_target;
728
729 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
730 #define get_lwp_thread(lwp) ((lwp)->thread)
731
732 /* This struct is recorded in the target_data field of struct thread_info.
733
734 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
735 GDB protocol representation of the thread ID. Threads also have
736 a "process ID" (poorly named) which is (presently) the same as the
737 LWP ID.
738
739 There is also ``all_processes'' is keyed by the "overall process ID",
740 which GNU/Linux calls tgid, "thread group ID". */
741
742 struct lwp_info
743 {
744 /* Backlink to the parent object. */
745 struct thread_info *thread;
746
747 /* If this flag is set, the next SIGSTOP will be ignored (the
748 process will be immediately resumed). This means that either we
749 sent the SIGSTOP to it ourselves and got some other pending event
750 (so the SIGSTOP is still pending), or that we stopped the
751 inferior implicitly via PTRACE_ATTACH and have not waited for it
752 yet. */
753 int stop_expected;
754
755 /* When this is true, we shall not try to resume this thread, even
756 if last_resume_kind isn't resume_stop. */
757 int suspended;
758
759 /* If this flag is set, the lwp is known to be stopped right now (stop
760 event already received in a wait()). */
761 int stopped;
762
763 /* Signal whether we are in a SYSCALL_ENTRY or
764 in a SYSCALL_RETURN event.
765 Values:
766 - TARGET_WAITKIND_SYSCALL_ENTRY
767 - TARGET_WAITKIND_SYSCALL_RETURN */
768 enum target_waitkind syscall_state;
769
770 /* When stopped is set, the last wait status recorded for this lwp. */
771 int last_status;
772
773 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
774 this LWP's last event, to pass to GDB without any further
775 processing. This is used to store extended ptrace event
776 information or exit status until it can be reported to GDB. */
777 struct target_waitstatus waitstatus;
778
779 /* A pointer to the fork child/parent relative. Valid only while
780 the parent fork event is not reported to higher layers. Used to
781 avoid wildcard vCont actions resuming a fork child before GDB is
782 notified about the parent's fork event. */
783 struct lwp_info *fork_relative;
784
785 /* When stopped is set, this is where the lwp last stopped, with
786 decr_pc_after_break already accounted for. If the LWP is
787 running, this is the address at which the lwp was resumed. */
788 CORE_ADDR stop_pc;
789
790 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
791 been reported. */
792 int status_pending_p;
793 int status_pending;
794
795 /* The reason the LWP last stopped, if we need to track it
796 (breakpoint, watchpoint, etc.) */
797 enum target_stop_reason stop_reason;
798
799 /* On architectures where it is possible to know the data address of
800 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
801 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
802 is true. */
803 CORE_ADDR stopped_data_address;
804
805 /* If this is non-zero, it is a breakpoint to be reinserted at our next
806 stop (SIGTRAP stops only). */
807 CORE_ADDR bp_reinsert;
808
809 /* If this flag is set, the last continue operation at the ptrace
810 level on this process was a single-step. */
811 int stepping;
812
813 /* Range to single step within. This is a copy of the step range
814 passed along the last resume request. See 'struct
815 thread_resume'. */
816 CORE_ADDR step_range_start; /* Inclusive */
817 CORE_ADDR step_range_end; /* Exclusive */
818
819 /* If this flag is set, we need to set the event request flags the
820 next time we see this LWP stop. */
821 int must_set_ptrace_flags;
822
823 /* If this is non-zero, it points to a chain of signals which need to
824 be delivered to this process. */
825 struct pending_signals *pending_signals;
826
827 /* A link used when resuming. It is initialized from the resume request,
828 and then processed and cleared in linux_resume_one_lwp. */
829 struct thread_resume *resume;
830
831 /* Information bout this lwp's fast tracepoint collection status (is it
832 currently stopped in the jump pad, and if so, before or at/after the
833 relocated instruction). Normally, we won't care about this, but we will
834 if a signal arrives to this lwp while it is collecting. */
835 fast_tpoint_collect_result collecting_fast_tracepoint;
836
837 /* If this is non-zero, it points to a chain of signals which need
838 to be reported to GDB. These were deferred because the thread
839 was doing a fast tracepoint collect when they arrived. */
840 struct pending_signals *pending_signals_to_report;
841
842 /* When collecting_fast_tracepoint is first found to be 1, we insert
843 a exit-jump-pad-quickly breakpoint. This is it. */
844 struct breakpoint *exit_jump_pad_bkpt;
845
846 #ifdef USE_THREAD_DB
847 int thread_known;
848 /* The thread handle, used for e.g. TLS access. Only valid if
849 THREAD_KNOWN is set. */
850 td_thrhandle_t th;
851
852 /* The pthread_t handle. */
853 thread_t thread_handle;
854 #endif
855
856 /* Arch-specific additions. */
857 struct arch_lwp_info *arch_private;
858 };
859
860 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
861
862 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
863 errno). */
864 int linux_attach_lwp (ptid_t ptid);
865
866 struct lwp_info *find_lwp_pid (ptid_t ptid);
867 /* For linux_stop_lwp see nat/linux-nat.h. */
868
869 #ifdef HAVE_LINUX_REGSETS
870 void initialize_regsets_info (struct regsets_info *regsets_info);
871 #endif
872
873 void initialize_low_arch (void);
874
875 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
876 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
877
878 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
879 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
880
881 /* From thread-db.c */
882 int thread_db_init (void);
883 void thread_db_detach (struct process_info *);
884 void thread_db_mourn (struct process_info *);
885 int thread_db_handle_monitor_command (char *);
886 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
887 CORE_ADDR load_module, CORE_ADDR *address);
888 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
889
890 /* Called from linux-low.c when a clone event is detected. Upon entry,
891 both the clone and the parent should be stopped. This function does
892 whatever is required have the clone under thread_db's control. */
893
894 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
895
896 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
897
898 extern int have_ptrace_getregset;
899
900 /* Search for the value with type MATCH in the auxv vector with
901 entries of length WORDSIZE bytes. If found, store the value in
902 *VALP and return 1. If not found or if there is an error, return
903 0. */
904
905 int linux_get_auxv (int wordsize, CORE_ADDR match,
906 CORE_ADDR *valp);
907
908 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
909 WORDSIZE. If no entry was found, return zero. */
910
911 CORE_ADDR linux_get_hwcap (int wordsize);
912
913 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
914 WORDSIZE. If no entry was found, return zero. */
915
916 CORE_ADDR linux_get_hwcap2 (int wordsize);
917
918 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.067182 seconds and 5 git commands to generate.