gdbserver/linux-low: turn the 'decr_pc_after_break' field into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 int (*breakpoint_at) (CORE_ADDR pc);
135
136 /* Breakpoint and watchpoint related functions. See target.h for
137 comments. */
138 int (*supports_z_point_type) (char z_type);
139 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
140 int size, struct raw_breakpoint *bp);
141 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
142 int size, struct raw_breakpoint *bp);
143
144 int (*stopped_by_watchpoint) (void);
145 CORE_ADDR (*stopped_data_address) (void);
146
147 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
148 for registers smaller than an xfer unit). */
149 void (*collect_ptrace_register) (struct regcache *regcache,
150 int regno, char *buf);
151 void (*supply_ptrace_register) (struct regcache *regcache,
152 int regno, const char *buf);
153
154 /* Hook to convert from target format to ptrace format and back.
155 Returns true if any conversion was done; false otherwise.
156 If DIRECTION is 1, then copy from INF to NATIVE.
157 If DIRECTION is 0, copy from NATIVE to INF. */
158 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
159
160 /* Hook to call when a new process is created or attached to.
161 If extra per-process architecture-specific data is needed,
162 allocate it here. */
163 struct arch_process_info * (*new_process) (void);
164
165 /* Hook to call when a process is being deleted. If extra per-process
166 architecture-specific data is needed, delete it here. */
167 void (*delete_process) (struct arch_process_info *info);
168
169 /* Hook to call when a new thread is detected.
170 If extra per-thread architecture-specific data is needed,
171 allocate it here. */
172 void (*new_thread) (struct lwp_info *);
173
174 /* Hook to call when a thread is being deleted. If extra per-thread
175 architecture-specific data is needed, delete it here. */
176 void (*delete_thread) (struct arch_lwp_info *);
177
178 /* Hook to call, if any, when a new fork is attached. */
179 void (*new_fork) (struct process_info *parent, struct process_info *child);
180
181 /* Hook to call prior to resuming a thread. */
182 void (*prepare_to_resume) (struct lwp_info *);
183
184 /* Hook to support target specific qSupported. */
185 void (*process_qsupported) (char **, int count);
186
187 /* Returns true if the low target supports tracepoints. */
188 int (*supports_tracepoints) (void);
189
190 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
191 success, -1 on failure. */
192 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
193
194 /* Install a fast tracepoint jump pad. See target.h for
195 comments. */
196 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
197 CORE_ADDR collector,
198 CORE_ADDR lockaddr,
199 ULONGEST orig_size,
200 CORE_ADDR *jump_entry,
201 CORE_ADDR *trampoline,
202 ULONGEST *trampoline_size,
203 unsigned char *jjump_pad_insn,
204 ULONGEST *jjump_pad_insn_size,
205 CORE_ADDR *adjusted_insn_addr,
206 CORE_ADDR *adjusted_insn_addr_end,
207 char *err);
208
209 /* Return the bytecode operations vector for the current inferior.
210 Returns NULL if bytecode compilation is not supported. */
211 struct emit_ops *(*emit_ops) (void);
212
213 /* Return the minimum length of an instruction that can be safely overwritten
214 for use as a fast tracepoint. */
215 int (*get_min_fast_tracepoint_insn_len) (void);
216
217 /* Returns true if the low target supports range stepping. */
218 int (*supports_range_stepping) (void);
219
220 /* See target.h. */
221 int (*supports_hardware_single_step) (void);
222
223 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
224 inferior is stopped due to SYSCALL_SIGTRAP. */
225 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
226
227 /* See target.h. */
228 int (*get_ipa_tdesc_idx) (void);
229 };
230
231 extern struct linux_target_ops the_low_target;
232
233 /* Target ops definitions for a Linux target. */
234
235 class linux_process_target : public process_stratum_target
236 {
237 public:
238
239 int create_inferior (const char *program,
240 const std::vector<char *> &program_args) override;
241
242 void post_create_inferior () override;
243
244 int attach (unsigned long pid) override;
245
246 int kill (process_info *proc) override;
247
248 int detach (process_info *proc) override;
249
250 void mourn (process_info *proc) override;
251
252 void join (int pid) override;
253
254 bool thread_alive (ptid_t pid) override;
255
256 void resume (thread_resume *resume_info, size_t n) override;
257
258 ptid_t wait (ptid_t ptid, target_waitstatus *status,
259 int options) override;
260
261 void fetch_registers (regcache *regcache, int regno) override;
262
263 void store_registers (regcache *regcache, int regno) override;
264
265 int prepare_to_access_memory () override;
266
267 void done_accessing_memory () override;
268
269 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
270 int len) override;
271
272 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
273 int len) override;
274
275 void look_up_symbols () override;
276
277 void request_interrupt () override;
278
279 bool supports_read_auxv () override;
280
281 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
282 unsigned int len) override;
283
284 bool supports_z_point_type (char z_type) override;
285
286 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
287 int size, raw_breakpoint *bp) override;
288
289 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
290 int size, raw_breakpoint *bp) override;
291
292 bool stopped_by_sw_breakpoint () override;
293
294 bool supports_stopped_by_sw_breakpoint () override;
295
296 bool stopped_by_hw_breakpoint () override;
297
298 bool supports_stopped_by_hw_breakpoint () override;
299
300 bool supports_hardware_single_step () override;
301
302 bool stopped_by_watchpoint () override;
303
304 CORE_ADDR stopped_data_address () override;
305
306 bool supports_read_offsets () override;
307
308 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
309
310 bool supports_get_tls_address () override;
311
312 int get_tls_address (thread_info *thread, CORE_ADDR offset,
313 CORE_ADDR load_module, CORE_ADDR *address) override;
314
315 bool supports_qxfer_osdata () override;
316
317 int qxfer_osdata (const char *annex, unsigned char *readbuf,
318 unsigned const char *writebuf,
319 CORE_ADDR offset, int len) override;
320
321 bool supports_qxfer_siginfo () override;
322
323 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
324 unsigned const char *writebuf,
325 CORE_ADDR offset, int len) override;
326
327 bool supports_non_stop () override;
328
329 bool async (bool enable) override;
330
331 int start_non_stop (bool enable) override;
332
333 bool supports_multi_process () override;
334
335 bool supports_fork_events () override;
336
337 bool supports_vfork_events () override;
338
339 bool supports_exec_events () override;
340
341 void handle_new_gdb_connection () override;
342
343 int handle_monitor_command (char *mon) override;
344
345 int core_of_thread (ptid_t ptid) override;
346
347 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
348 bool supports_read_loadmap () override;
349
350 int read_loadmap (const char *annex, CORE_ADDR offset,
351 unsigned char *myaddr, unsigned int len) override;
352 #endif
353
354 void process_qsupported (char **features, int count) override;
355
356 bool supports_tracepoints () override;
357
358 CORE_ADDR read_pc (regcache *regcache) override;
359
360 void write_pc (regcache *regcache, CORE_ADDR pc) override;
361
362 bool supports_thread_stopped () override;
363
364 bool thread_stopped (thread_info *thread) override;
365
366 void pause_all (bool freeze) override;
367
368 void unpause_all (bool unfreeze) override;
369
370 void stabilize_threads () override;
371
372 bool supports_fast_tracepoints () override;
373
374 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
375 CORE_ADDR tpaddr,
376 CORE_ADDR collector,
377 CORE_ADDR lockaddr,
378 ULONGEST orig_size,
379 CORE_ADDR *jump_entry,
380 CORE_ADDR *trampoline,
381 ULONGEST *trampoline_size,
382 unsigned char *jjump_pad_insn,
383 ULONGEST *jjump_pad_insn_size,
384 CORE_ADDR *adjusted_insn_addr,
385 CORE_ADDR *adjusted_insn_addr_end,
386 char *err) override;
387
388 int get_min_fast_tracepoint_insn_len () override;
389
390 struct emit_ops *emit_ops () override;
391
392 bool supports_disable_randomization () override;
393
394 bool supports_qxfer_libraries_svr4 () override;
395
396 int qxfer_libraries_svr4 (const char *annex,
397 unsigned char *readbuf,
398 unsigned const char *writebuf,
399 CORE_ADDR offset, int len) override;
400
401 bool supports_agent () override;
402
403 #ifdef HAVE_LINUX_BTRACE
404 btrace_target_info *enable_btrace (ptid_t ptid,
405 const btrace_config *conf) override;
406
407 int disable_btrace (btrace_target_info *tinfo) override;
408
409 int read_btrace (btrace_target_info *tinfo, buffer *buf,
410 enum btrace_read_type type) override;
411
412 int read_btrace_conf (const btrace_target_info *tinfo,
413 buffer *buf) override;
414 #endif
415
416 bool supports_range_stepping () override;
417
418 bool supports_pid_to_exec_file () override;
419
420 char *pid_to_exec_file (int pid) override;
421
422 bool supports_multifs () override;
423
424 int multifs_open (int pid, const char *filename, int flags,
425 mode_t mode) override;
426
427 int multifs_unlink (int pid, const char *filename) override;
428
429 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
430 size_t bufsiz) override;
431
432 const char *thread_name (ptid_t thread) override;
433
434 #if USE_THREAD_DB
435 bool thread_handle (ptid_t ptid, gdb_byte **handle,
436 int *handle_len) override;
437 #endif
438
439 bool supports_catch_syscall () override;
440
441 int get_ipa_tdesc_idx () override;
442
443 /* Return the information to access registers. This has public
444 visibility because proc-service uses it. */
445 virtual const regs_info *get_regs_info () = 0;
446
447 private:
448
449 /* Handle a GNU/Linux extended wait response. If we see a clone,
450 fork, or vfork event, we need to add the new LWP to our list
451 (and return 0 so as not to report the trap to higher layers).
452 If we see an exec event, we will modify ORIG_EVENT_LWP to point
453 to a new LWP representing the new program. */
454 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
455
456 /* Do low-level handling of the event, and check if we should go on
457 and pass it to caller code. Return the affected lwp if we are, or
458 NULL otherwise. */
459 lwp_info *filter_event (int lwpid, int wstat);
460
461 /* Wait for an event from child(ren) WAIT_PTID, and return any that
462 match FILTER_PTID (leaving others pending). The PTIDs can be:
463 minus_one_ptid, to specify any child; a pid PTID, specifying all
464 lwps of a thread group; or a PTID representing a single lwp. Store
465 the stop status through the status pointer WSTAT. OPTIONS is
466 passed to the waitpid call. Return 0 if no event was found and
467 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
468 was found. Return the PID of the stopped child otherwise. */
469 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
470 int *wstatp, int options);
471
472 /* Wait for an event from child(ren) PTID. PTIDs can be:
473 minus_one_ptid, to specify any child; a pid PTID, specifying all
474 lwps of a thread group; or a PTID representing a single lwp. Store
475 the stop status through the status pointer WSTAT. OPTIONS is
476 passed to the waitpid call. Return 0 if no event was found and
477 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
478 was found. Return the PID of the stopped child otherwise. */
479 int wait_for_event (ptid_t ptid, int *wstatp, int options);
480
481 /* Wait for all children to stop for the SIGSTOPs we just queued. */
482 void wait_for_sigstop ();
483
484 /* Wait for process, returns status. */
485 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
486 int target_options);
487
488 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
489 If SUSPEND, then also increase the suspend count of every LWP,
490 except EXCEPT. */
491 void stop_all_lwps (int suspend, lwp_info *except);
492
493 /* Stopped LWPs that the client wanted to be running, that don't have
494 pending statuses, are set to run again, except for EXCEPT, if not
495 NULL. This undoes a stop_all_lwps call. */
496 void unstop_all_lwps (int unsuspend, lwp_info *except);
497
498 /* Start a step-over operation on LWP. When LWP stopped at a
499 breakpoint, to make progress, we need to remove the breakpoint out
500 of the way. If we let other threads run while we do that, they may
501 pass by the breakpoint location and miss hitting it. To avoid
502 that, a step-over momentarily stops all threads while LWP is
503 single-stepped by either hardware or software while the breakpoint
504 is temporarily uninserted from the inferior. When the single-step
505 finishes, we reinsert the breakpoint, and let all threads that are
506 supposed to be running, run again. */
507 void start_step_over (lwp_info *lwp);
508
509 /* If there's a step over in progress, wait until all threads stop
510 (that is, until the stepping thread finishes its step), and
511 unsuspend all lwps. The stepping thread ends with its status
512 pending, which is processed later when we get back to processing
513 events. */
514 void complete_ongoing_step_over ();
515
516 /* When we finish a step-over, set threads running again. If there's
517 another thread that may need a step-over, now's the time to start
518 it. Eventually, we'll move all threads past their breakpoints. */
519 void proceed_all_lwps ();
520
521 /* The reason we resume in the caller, is because we want to be able
522 to pass lwp->status_pending as WSTAT, and we need to clear
523 status_pending_p before resuming, otherwise, resume_one_lwp
524 refuses to resume. */
525 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
526
527 /* Move THREAD out of the jump pad. */
528 void move_out_of_jump_pad (thread_info *thread);
529
530 /* Call low_arch_setup on THREAD. */
531 void arch_setup_thread (thread_info *thread);
532
533 #ifdef HAVE_LINUX_USRREGS
534 /* Fetch one register. */
535 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
536 int regno);
537
538 /* Store one register. */
539 void store_register (const usrregs_info *usrregs, regcache *regcache,
540 int regno);
541 #endif
542
543 /* Fetch all registers, or just one, from the child process.
544 If REGNO is -1, do this for all registers, skipping any that are
545 assumed to have been retrieved by regsets_fetch_inferior_registers,
546 unless ALL is non-zero.
547 Otherwise, REGNO specifies which register (so we can save time). */
548 void usr_fetch_inferior_registers (const regs_info *regs_info,
549 regcache *regcache, int regno, int all);
550
551 /* Store our register values back into the inferior.
552 If REGNO is -1, do this for all registers, skipping any that are
553 assumed to have been saved by regsets_store_inferior_registers,
554 unless ALL is non-zero.
555 Otherwise, REGNO specifies which register (so we can save time). */
556 void usr_store_inferior_registers (const regs_info *regs_info,
557 regcache *regcache, int regno, int all);
558
559 /* Return the PC as read from the regcache of LWP, without any
560 adjustment. */
561 CORE_ADDR get_pc (lwp_info *lwp);
562
563 /* Called when the LWP stopped for a signal/trap. If it stopped for a
564 trap check what caused it (breakpoint, watchpoint, trace, etc.),
565 and save the result in the LWP's stop_reason field. If it stopped
566 for a breakpoint, decrement the PC if necessary on the lwp's
567 architecture. Returns true if we now have the LWP's stop PC. */
568 bool save_stop_reason (lwp_info *lwp);
569
570 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
571 SIGNAL is nonzero, give it that signal. */
572 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
573 siginfo_t *info);
574
575 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
576 disappears while we try to resume it. */
577 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
578
579 /* This function is called once per thread. We check the thread's
580 last resume request, which will tell us whether to resume, step, or
581 leave the thread stopped. Any signal the client requested to be
582 delivered has already been enqueued at this point.
583
584 If any thread that GDB wants running is stopped at an internal
585 breakpoint that needs stepping over, we start a step-over operation
586 on that particular thread, and leave all others stopped. */
587 void proceed_one_lwp (thread_info *thread, lwp_info *except);
588
589 /* This function is called once per thread. We check the thread's
590 resume request, which will tell us whether to resume, step, or
591 leave the thread stopped; and what signal, if any, it should be
592 sent.
593
594 For threads which we aren't explicitly told otherwise, we preserve
595 the stepping flag; this is used for stepping over gdbserver-placed
596 breakpoints.
597
598 If pending_flags was set in any thread, we queue any needed
599 signals, since we won't actually resume. We already have a pending
600 event to report, so we don't need to preserve any step requests;
601 they should be re-issued if necessary. */
602 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
603
604 /* Return true if this lwp has an interesting status pending. */
605 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
606
607 /* Resume LWPs that are currently stopped without any pending status
608 to report, but are resumed from the core's perspective. */
609 void resume_stopped_resumed_lwps (thread_info *thread);
610
611 /* Unsuspend THREAD, except EXCEPT, and proceed. */
612 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
613
614 /* Return true if this lwp still has an interesting status pending.
615 If not (e.g., it had stopped for a breakpoint that is gone), return
616 false. */
617 bool thread_still_has_status_pending (thread_info *thread);
618
619 /* Return true if this lwp is to-be-resumed and has an interesting
620 status pending. */
621 bool resume_status_pending (thread_info *thread);
622
623 /* Return true if this lwp that GDB wants running is stopped at an
624 internal breakpoint that we need to step over. It assumes that
625 any required STOP_PC adjustment has already been propagated to
626 the inferior's regcache. */
627 bool thread_needs_step_over (thread_info *thread);
628
629 /* Single step via hardware or software single step.
630 Return 1 if hardware single stepping, 0 if software single stepping
631 or can't single step. */
632 int single_step (lwp_info* lwp);
633
634 /* Install breakpoints for software single stepping. */
635 void install_software_single_step_breakpoints (lwp_info *lwp);
636
637 protected:
638 /* The architecture-specific "low" methods are listed below. */
639
640 /* Architecture-specific setup for the current thread. */
641 virtual void low_arch_setup () = 0;
642
643 /* Return false if we can fetch/store the register, true if we cannot
644 fetch/store the register. */
645 virtual bool low_cannot_fetch_register (int regno) = 0;
646
647 virtual bool low_cannot_store_register (int regno) = 0;
648
649 /* Hook to fetch a register in some non-standard way. Used for
650 example by backends that have read-only registers with hardcoded
651 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
652 REGNO was supplied, false if not, and we should fallback to the
653 standard ptrace methods. */
654 virtual bool low_fetch_register (regcache *regcache, int regno);
655
656 /* Return true if breakpoints are supported. Such targets must
657 implement the GET_PC and SET_PC methods. */
658 virtual bool low_supports_breakpoints ();
659
660 virtual CORE_ADDR low_get_pc (regcache *regcache);
661
662 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
663
664 /* Find the next possible PCs after the current instruction executes.
665 Targets that override this method should also override
666 'supports_software_single_step' to return true. */
667 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
668
669 /* How many bytes the PC should be decremented after a break. */
670 virtual int low_decr_pc_after_break ();
671 };
672
673 extern linux_process_target *the_linux_target;
674
675 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
676 #define get_lwp_thread(lwp) ((lwp)->thread)
677
678 /* This struct is recorded in the target_data field of struct thread_info.
679
680 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
681 GDB protocol representation of the thread ID. Threads also have
682 a "process ID" (poorly named) which is (presently) the same as the
683 LWP ID.
684
685 There is also ``all_processes'' is keyed by the "overall process ID",
686 which GNU/Linux calls tgid, "thread group ID". */
687
688 struct lwp_info
689 {
690 /* Backlink to the parent object. */
691 struct thread_info *thread;
692
693 /* If this flag is set, the next SIGSTOP will be ignored (the
694 process will be immediately resumed). This means that either we
695 sent the SIGSTOP to it ourselves and got some other pending event
696 (so the SIGSTOP is still pending), or that we stopped the
697 inferior implicitly via PTRACE_ATTACH and have not waited for it
698 yet. */
699 int stop_expected;
700
701 /* When this is true, we shall not try to resume this thread, even
702 if last_resume_kind isn't resume_stop. */
703 int suspended;
704
705 /* If this flag is set, the lwp is known to be stopped right now (stop
706 event already received in a wait()). */
707 int stopped;
708
709 /* Signal whether we are in a SYSCALL_ENTRY or
710 in a SYSCALL_RETURN event.
711 Values:
712 - TARGET_WAITKIND_SYSCALL_ENTRY
713 - TARGET_WAITKIND_SYSCALL_RETURN */
714 enum target_waitkind syscall_state;
715
716 /* When stopped is set, the last wait status recorded for this lwp. */
717 int last_status;
718
719 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
720 this LWP's last event, to pass to GDB without any further
721 processing. This is used to store extended ptrace event
722 information or exit status until it can be reported to GDB. */
723 struct target_waitstatus waitstatus;
724
725 /* A pointer to the fork child/parent relative. Valid only while
726 the parent fork event is not reported to higher layers. Used to
727 avoid wildcard vCont actions resuming a fork child before GDB is
728 notified about the parent's fork event. */
729 struct lwp_info *fork_relative;
730
731 /* When stopped is set, this is where the lwp last stopped, with
732 decr_pc_after_break already accounted for. If the LWP is
733 running, this is the address at which the lwp was resumed. */
734 CORE_ADDR stop_pc;
735
736 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
737 been reported. */
738 int status_pending_p;
739 int status_pending;
740
741 /* The reason the LWP last stopped, if we need to track it
742 (breakpoint, watchpoint, etc.) */
743 enum target_stop_reason stop_reason;
744
745 /* On architectures where it is possible to know the data address of
746 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
747 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
748 is true. */
749 CORE_ADDR stopped_data_address;
750
751 /* If this is non-zero, it is a breakpoint to be reinserted at our next
752 stop (SIGTRAP stops only). */
753 CORE_ADDR bp_reinsert;
754
755 /* If this flag is set, the last continue operation at the ptrace
756 level on this process was a single-step. */
757 int stepping;
758
759 /* Range to single step within. This is a copy of the step range
760 passed along the last resume request. See 'struct
761 thread_resume'. */
762 CORE_ADDR step_range_start; /* Inclusive */
763 CORE_ADDR step_range_end; /* Exclusive */
764
765 /* If this flag is set, we need to set the event request flags the
766 next time we see this LWP stop. */
767 int must_set_ptrace_flags;
768
769 /* If this is non-zero, it points to a chain of signals which need to
770 be delivered to this process. */
771 struct pending_signals *pending_signals;
772
773 /* A link used when resuming. It is initialized from the resume request,
774 and then processed and cleared in linux_resume_one_lwp. */
775 struct thread_resume *resume;
776
777 /* Information bout this lwp's fast tracepoint collection status (is it
778 currently stopped in the jump pad, and if so, before or at/after the
779 relocated instruction). Normally, we won't care about this, but we will
780 if a signal arrives to this lwp while it is collecting. */
781 fast_tpoint_collect_result collecting_fast_tracepoint;
782
783 /* If this is non-zero, it points to a chain of signals which need
784 to be reported to GDB. These were deferred because the thread
785 was doing a fast tracepoint collect when they arrived. */
786 struct pending_signals *pending_signals_to_report;
787
788 /* When collecting_fast_tracepoint is first found to be 1, we insert
789 a exit-jump-pad-quickly breakpoint. This is it. */
790 struct breakpoint *exit_jump_pad_bkpt;
791
792 #ifdef USE_THREAD_DB
793 int thread_known;
794 /* The thread handle, used for e.g. TLS access. Only valid if
795 THREAD_KNOWN is set. */
796 td_thrhandle_t th;
797
798 /* The pthread_t handle. */
799 thread_t thread_handle;
800 #endif
801
802 /* Arch-specific additions. */
803 struct arch_lwp_info *arch_private;
804 };
805
806 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
807
808 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
809 errno). */
810 int linux_attach_lwp (ptid_t ptid);
811
812 struct lwp_info *find_lwp_pid (ptid_t ptid);
813 /* For linux_stop_lwp see nat/linux-nat.h. */
814
815 #ifdef HAVE_LINUX_REGSETS
816 void initialize_regsets_info (struct regsets_info *regsets_info);
817 #endif
818
819 void initialize_low_arch (void);
820
821 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
822 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
823
824 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
825 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
826
827 /* From thread-db.c */
828 int thread_db_init (void);
829 void thread_db_detach (struct process_info *);
830 void thread_db_mourn (struct process_info *);
831 int thread_db_handle_monitor_command (char *);
832 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
833 CORE_ADDR load_module, CORE_ADDR *address);
834 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
835
836 /* Called from linux-low.c when a clone event is detected. Upon entry,
837 both the clone and the parent should be stopped. This function does
838 whatever is required have the clone under thread_db's control. */
839
840 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
841
842 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
843
844 extern int have_ptrace_getregset;
845
846 /* Search for the value with type MATCH in the auxv vector with
847 entries of length WORDSIZE bytes. If found, store the value in
848 *VALP and return 1. If not found or if there is an error, return
849 0. */
850
851 int linux_get_auxv (int wordsize, CORE_ADDR match,
852 CORE_ADDR *valp);
853
854 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
855 WORDSIZE. If no entry was found, return zero. */
856
857 CORE_ADDR linux_get_hwcap (int wordsize);
858
859 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
860 WORDSIZE. If no entry was found, return zero. */
861
862 CORE_ADDR linux_get_hwcap2 (int wordsize);
863
864 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.066517 seconds and 5 git commands to generate.