gdbserver/linux-low: turn 'supports_z_point_type' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Breakpoint and watchpoint related functions. See target.h for
135 comments. */
136 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
137 int size, struct raw_breakpoint *bp);
138 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
139 int size, struct raw_breakpoint *bp);
140
141 int (*stopped_by_watchpoint) (void);
142 CORE_ADDR (*stopped_data_address) (void);
143
144 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
145 for registers smaller than an xfer unit). */
146 void (*collect_ptrace_register) (struct regcache *regcache,
147 int regno, char *buf);
148 void (*supply_ptrace_register) (struct regcache *regcache,
149 int regno, const char *buf);
150
151 /* Hook to convert from target format to ptrace format and back.
152 Returns true if any conversion was done; false otherwise.
153 If DIRECTION is 1, then copy from INF to NATIVE.
154 If DIRECTION is 0, copy from NATIVE to INF. */
155 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
156
157 /* Hook to call when a new process is created or attached to.
158 If extra per-process architecture-specific data is needed,
159 allocate it here. */
160 struct arch_process_info * (*new_process) (void);
161
162 /* Hook to call when a process is being deleted. If extra per-process
163 architecture-specific data is needed, delete it here. */
164 void (*delete_process) (struct arch_process_info *info);
165
166 /* Hook to call when a new thread is detected.
167 If extra per-thread architecture-specific data is needed,
168 allocate it here. */
169 void (*new_thread) (struct lwp_info *);
170
171 /* Hook to call when a thread is being deleted. If extra per-thread
172 architecture-specific data is needed, delete it here. */
173 void (*delete_thread) (struct arch_lwp_info *);
174
175 /* Hook to call, if any, when a new fork is attached. */
176 void (*new_fork) (struct process_info *parent, struct process_info *child);
177
178 /* Hook to call prior to resuming a thread. */
179 void (*prepare_to_resume) (struct lwp_info *);
180
181 /* Hook to support target specific qSupported. */
182 void (*process_qsupported) (char **, int count);
183
184 /* Returns true if the low target supports tracepoints. */
185 int (*supports_tracepoints) (void);
186
187 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
188 success, -1 on failure. */
189 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
190
191 /* Install a fast tracepoint jump pad. See target.h for
192 comments. */
193 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
194 CORE_ADDR collector,
195 CORE_ADDR lockaddr,
196 ULONGEST orig_size,
197 CORE_ADDR *jump_entry,
198 CORE_ADDR *trampoline,
199 ULONGEST *trampoline_size,
200 unsigned char *jjump_pad_insn,
201 ULONGEST *jjump_pad_insn_size,
202 CORE_ADDR *adjusted_insn_addr,
203 CORE_ADDR *adjusted_insn_addr_end,
204 char *err);
205
206 /* Return the bytecode operations vector for the current inferior.
207 Returns NULL if bytecode compilation is not supported. */
208 struct emit_ops *(*emit_ops) (void);
209
210 /* Return the minimum length of an instruction that can be safely overwritten
211 for use as a fast tracepoint. */
212 int (*get_min_fast_tracepoint_insn_len) (void);
213
214 /* Returns true if the low target supports range stepping. */
215 int (*supports_range_stepping) (void);
216
217 /* See target.h. */
218 int (*supports_hardware_single_step) (void);
219
220 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
221 inferior is stopped due to SYSCALL_SIGTRAP. */
222 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
223
224 /* See target.h. */
225 int (*get_ipa_tdesc_idx) (void);
226 };
227
228 extern struct linux_target_ops the_low_target;
229
230 /* Target ops definitions for a Linux target. */
231
232 class linux_process_target : public process_stratum_target
233 {
234 public:
235
236 int create_inferior (const char *program,
237 const std::vector<char *> &program_args) override;
238
239 void post_create_inferior () override;
240
241 int attach (unsigned long pid) override;
242
243 int kill (process_info *proc) override;
244
245 int detach (process_info *proc) override;
246
247 void mourn (process_info *proc) override;
248
249 void join (int pid) override;
250
251 bool thread_alive (ptid_t pid) override;
252
253 void resume (thread_resume *resume_info, size_t n) override;
254
255 ptid_t wait (ptid_t ptid, target_waitstatus *status,
256 int options) override;
257
258 void fetch_registers (regcache *regcache, int regno) override;
259
260 void store_registers (regcache *regcache, int regno) override;
261
262 int prepare_to_access_memory () override;
263
264 void done_accessing_memory () override;
265
266 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
267 int len) override;
268
269 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
270 int len) override;
271
272 void look_up_symbols () override;
273
274 void request_interrupt () override;
275
276 bool supports_read_auxv () override;
277
278 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
279 unsigned int len) override;
280
281 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
282 int size, raw_breakpoint *bp) override;
283
284 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
285 int size, raw_breakpoint *bp) override;
286
287 bool stopped_by_sw_breakpoint () override;
288
289 bool supports_stopped_by_sw_breakpoint () override;
290
291 bool stopped_by_hw_breakpoint () override;
292
293 bool supports_stopped_by_hw_breakpoint () override;
294
295 bool supports_hardware_single_step () override;
296
297 bool stopped_by_watchpoint () override;
298
299 CORE_ADDR stopped_data_address () override;
300
301 bool supports_read_offsets () override;
302
303 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
304
305 bool supports_get_tls_address () override;
306
307 int get_tls_address (thread_info *thread, CORE_ADDR offset,
308 CORE_ADDR load_module, CORE_ADDR *address) override;
309
310 bool supports_qxfer_osdata () override;
311
312 int qxfer_osdata (const char *annex, unsigned char *readbuf,
313 unsigned const char *writebuf,
314 CORE_ADDR offset, int len) override;
315
316 bool supports_qxfer_siginfo () override;
317
318 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
319 unsigned const char *writebuf,
320 CORE_ADDR offset, int len) override;
321
322 bool supports_non_stop () override;
323
324 bool async (bool enable) override;
325
326 int start_non_stop (bool enable) override;
327
328 bool supports_multi_process () override;
329
330 bool supports_fork_events () override;
331
332 bool supports_vfork_events () override;
333
334 bool supports_exec_events () override;
335
336 void handle_new_gdb_connection () override;
337
338 int handle_monitor_command (char *mon) override;
339
340 int core_of_thread (ptid_t ptid) override;
341
342 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
343 bool supports_read_loadmap () override;
344
345 int read_loadmap (const char *annex, CORE_ADDR offset,
346 unsigned char *myaddr, unsigned int len) override;
347 #endif
348
349 void process_qsupported (char **features, int count) override;
350
351 bool supports_tracepoints () override;
352
353 CORE_ADDR read_pc (regcache *regcache) override;
354
355 void write_pc (regcache *regcache, CORE_ADDR pc) override;
356
357 bool supports_thread_stopped () override;
358
359 bool thread_stopped (thread_info *thread) override;
360
361 void pause_all (bool freeze) override;
362
363 void unpause_all (bool unfreeze) override;
364
365 void stabilize_threads () override;
366
367 bool supports_fast_tracepoints () override;
368
369 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
370 CORE_ADDR tpaddr,
371 CORE_ADDR collector,
372 CORE_ADDR lockaddr,
373 ULONGEST orig_size,
374 CORE_ADDR *jump_entry,
375 CORE_ADDR *trampoline,
376 ULONGEST *trampoline_size,
377 unsigned char *jjump_pad_insn,
378 ULONGEST *jjump_pad_insn_size,
379 CORE_ADDR *adjusted_insn_addr,
380 CORE_ADDR *adjusted_insn_addr_end,
381 char *err) override;
382
383 int get_min_fast_tracepoint_insn_len () override;
384
385 struct emit_ops *emit_ops () override;
386
387 bool supports_disable_randomization () override;
388
389 bool supports_qxfer_libraries_svr4 () override;
390
391 int qxfer_libraries_svr4 (const char *annex,
392 unsigned char *readbuf,
393 unsigned const char *writebuf,
394 CORE_ADDR offset, int len) override;
395
396 bool supports_agent () override;
397
398 #ifdef HAVE_LINUX_BTRACE
399 btrace_target_info *enable_btrace (ptid_t ptid,
400 const btrace_config *conf) override;
401
402 int disable_btrace (btrace_target_info *tinfo) override;
403
404 int read_btrace (btrace_target_info *tinfo, buffer *buf,
405 enum btrace_read_type type) override;
406
407 int read_btrace_conf (const btrace_target_info *tinfo,
408 buffer *buf) override;
409 #endif
410
411 bool supports_range_stepping () override;
412
413 bool supports_pid_to_exec_file () override;
414
415 char *pid_to_exec_file (int pid) override;
416
417 bool supports_multifs () override;
418
419 int multifs_open (int pid, const char *filename, int flags,
420 mode_t mode) override;
421
422 int multifs_unlink (int pid, const char *filename) override;
423
424 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
425 size_t bufsiz) override;
426
427 const char *thread_name (ptid_t thread) override;
428
429 #if USE_THREAD_DB
430 bool thread_handle (ptid_t ptid, gdb_byte **handle,
431 int *handle_len) override;
432 #endif
433
434 bool supports_catch_syscall () override;
435
436 int get_ipa_tdesc_idx () override;
437
438 /* Return the information to access registers. This has public
439 visibility because proc-service uses it. */
440 virtual const regs_info *get_regs_info () = 0;
441
442 private:
443
444 /* Handle a GNU/Linux extended wait response. If we see a clone,
445 fork, or vfork event, we need to add the new LWP to our list
446 (and return 0 so as not to report the trap to higher layers).
447 If we see an exec event, we will modify ORIG_EVENT_LWP to point
448 to a new LWP representing the new program. */
449 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
450
451 /* Do low-level handling of the event, and check if we should go on
452 and pass it to caller code. Return the affected lwp if we are, or
453 NULL otherwise. */
454 lwp_info *filter_event (int lwpid, int wstat);
455
456 /* Wait for an event from child(ren) WAIT_PTID, and return any that
457 match FILTER_PTID (leaving others pending). The PTIDs can be:
458 minus_one_ptid, to specify any child; a pid PTID, specifying all
459 lwps of a thread group; or a PTID representing a single lwp. Store
460 the stop status through the status pointer WSTAT. OPTIONS is
461 passed to the waitpid call. Return 0 if no event was found and
462 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
463 was found. Return the PID of the stopped child otherwise. */
464 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
465 int *wstatp, int options);
466
467 /* Wait for an event from child(ren) PTID. PTIDs can be:
468 minus_one_ptid, to specify any child; a pid PTID, specifying all
469 lwps of a thread group; or a PTID representing a single lwp. Store
470 the stop status through the status pointer WSTAT. OPTIONS is
471 passed to the waitpid call. Return 0 if no event was found and
472 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
473 was found. Return the PID of the stopped child otherwise. */
474 int wait_for_event (ptid_t ptid, int *wstatp, int options);
475
476 /* Wait for all children to stop for the SIGSTOPs we just queued. */
477 void wait_for_sigstop ();
478
479 /* Wait for process, returns status. */
480 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
481 int target_options);
482
483 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
484 If SUSPEND, then also increase the suspend count of every LWP,
485 except EXCEPT. */
486 void stop_all_lwps (int suspend, lwp_info *except);
487
488 /* Stopped LWPs that the client wanted to be running, that don't have
489 pending statuses, are set to run again, except for EXCEPT, if not
490 NULL. This undoes a stop_all_lwps call. */
491 void unstop_all_lwps (int unsuspend, lwp_info *except);
492
493 /* Start a step-over operation on LWP. When LWP stopped at a
494 breakpoint, to make progress, we need to remove the breakpoint out
495 of the way. If we let other threads run while we do that, they may
496 pass by the breakpoint location and miss hitting it. To avoid
497 that, a step-over momentarily stops all threads while LWP is
498 single-stepped by either hardware or software while the breakpoint
499 is temporarily uninserted from the inferior. When the single-step
500 finishes, we reinsert the breakpoint, and let all threads that are
501 supposed to be running, run again. */
502 void start_step_over (lwp_info *lwp);
503
504 /* If there's a step over in progress, wait until all threads stop
505 (that is, until the stepping thread finishes its step), and
506 unsuspend all lwps. The stepping thread ends with its status
507 pending, which is processed later when we get back to processing
508 events. */
509 void complete_ongoing_step_over ();
510
511 /* When we finish a step-over, set threads running again. If there's
512 another thread that may need a step-over, now's the time to start
513 it. Eventually, we'll move all threads past their breakpoints. */
514 void proceed_all_lwps ();
515
516 /* The reason we resume in the caller, is because we want to be able
517 to pass lwp->status_pending as WSTAT, and we need to clear
518 status_pending_p before resuming, otherwise, resume_one_lwp
519 refuses to resume. */
520 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
521
522 /* Move THREAD out of the jump pad. */
523 void move_out_of_jump_pad (thread_info *thread);
524
525 /* Call low_arch_setup on THREAD. */
526 void arch_setup_thread (thread_info *thread);
527
528 #ifdef HAVE_LINUX_USRREGS
529 /* Fetch one register. */
530 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
531 int regno);
532
533 /* Store one register. */
534 void store_register (const usrregs_info *usrregs, regcache *regcache,
535 int regno);
536 #endif
537
538 /* Fetch all registers, or just one, from the child process.
539 If REGNO is -1, do this for all registers, skipping any that are
540 assumed to have been retrieved by regsets_fetch_inferior_registers,
541 unless ALL is non-zero.
542 Otherwise, REGNO specifies which register (so we can save time). */
543 void usr_fetch_inferior_registers (const regs_info *regs_info,
544 regcache *regcache, int regno, int all);
545
546 /* Store our register values back into the inferior.
547 If REGNO is -1, do this for all registers, skipping any that are
548 assumed to have been saved by regsets_store_inferior_registers,
549 unless ALL is non-zero.
550 Otherwise, REGNO specifies which register (so we can save time). */
551 void usr_store_inferior_registers (const regs_info *regs_info,
552 regcache *regcache, int regno, int all);
553
554 /* Return the PC as read from the regcache of LWP, without any
555 adjustment. */
556 CORE_ADDR get_pc (lwp_info *lwp);
557
558 /* Called when the LWP stopped for a signal/trap. If it stopped for a
559 trap check what caused it (breakpoint, watchpoint, trace, etc.),
560 and save the result in the LWP's stop_reason field. If it stopped
561 for a breakpoint, decrement the PC if necessary on the lwp's
562 architecture. Returns true if we now have the LWP's stop PC. */
563 bool save_stop_reason (lwp_info *lwp);
564
565 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
566 SIGNAL is nonzero, give it that signal. */
567 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
568 siginfo_t *info);
569
570 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
571 disappears while we try to resume it. */
572 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
573
574 /* This function is called once per thread. We check the thread's
575 last resume request, which will tell us whether to resume, step, or
576 leave the thread stopped. Any signal the client requested to be
577 delivered has already been enqueued at this point.
578
579 If any thread that GDB wants running is stopped at an internal
580 breakpoint that needs stepping over, we start a step-over operation
581 on that particular thread, and leave all others stopped. */
582 void proceed_one_lwp (thread_info *thread, lwp_info *except);
583
584 /* This function is called once per thread. We check the thread's
585 resume request, which will tell us whether to resume, step, or
586 leave the thread stopped; and what signal, if any, it should be
587 sent.
588
589 For threads which we aren't explicitly told otherwise, we preserve
590 the stepping flag; this is used for stepping over gdbserver-placed
591 breakpoints.
592
593 If pending_flags was set in any thread, we queue any needed
594 signals, since we won't actually resume. We already have a pending
595 event to report, so we don't need to preserve any step requests;
596 they should be re-issued if necessary. */
597 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
598
599 /* Return true if this lwp has an interesting status pending. */
600 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
601
602 /* Resume LWPs that are currently stopped without any pending status
603 to report, but are resumed from the core's perspective. */
604 void resume_stopped_resumed_lwps (thread_info *thread);
605
606 /* Unsuspend THREAD, except EXCEPT, and proceed. */
607 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
608
609 /* Return true if this lwp still has an interesting status pending.
610 If not (e.g., it had stopped for a breakpoint that is gone), return
611 false. */
612 bool thread_still_has_status_pending (thread_info *thread);
613
614 /* Return true if this lwp is to-be-resumed and has an interesting
615 status pending. */
616 bool resume_status_pending (thread_info *thread);
617
618 /* Return true if this lwp that GDB wants running is stopped at an
619 internal breakpoint that we need to step over. It assumes that
620 any required STOP_PC adjustment has already been propagated to
621 the inferior's regcache. */
622 bool thread_needs_step_over (thread_info *thread);
623
624 /* Single step via hardware or software single step.
625 Return 1 if hardware single stepping, 0 if software single stepping
626 or can't single step. */
627 int single_step (lwp_info* lwp);
628
629 /* Install breakpoints for software single stepping. */
630 void install_software_single_step_breakpoints (lwp_info *lwp);
631
632 protected:
633 /* The architecture-specific "low" methods are listed below. */
634
635 /* Architecture-specific setup for the current thread. */
636 virtual void low_arch_setup () = 0;
637
638 /* Return false if we can fetch/store the register, true if we cannot
639 fetch/store the register. */
640 virtual bool low_cannot_fetch_register (int regno) = 0;
641
642 virtual bool low_cannot_store_register (int regno) = 0;
643
644 /* Hook to fetch a register in some non-standard way. Used for
645 example by backends that have read-only registers with hardcoded
646 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
647 REGNO was supplied, false if not, and we should fallback to the
648 standard ptrace methods. */
649 virtual bool low_fetch_register (regcache *regcache, int regno);
650
651 /* Return true if breakpoints are supported. Such targets must
652 implement the GET_PC and SET_PC methods. */
653 virtual bool low_supports_breakpoints ();
654
655 virtual CORE_ADDR low_get_pc (regcache *regcache);
656
657 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
658
659 /* Find the next possible PCs after the current instruction executes.
660 Targets that override this method should also override
661 'supports_software_single_step' to return true. */
662 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
663
664 /* Return true if there is a breakpoint at PC. */
665 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
666
667 /* How many bytes the PC should be decremented after a break. */
668 virtual int low_decr_pc_after_break ();
669 };
670
671 extern linux_process_target *the_linux_target;
672
673 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
674 #define get_lwp_thread(lwp) ((lwp)->thread)
675
676 /* This struct is recorded in the target_data field of struct thread_info.
677
678 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
679 GDB protocol representation of the thread ID. Threads also have
680 a "process ID" (poorly named) which is (presently) the same as the
681 LWP ID.
682
683 There is also ``all_processes'' is keyed by the "overall process ID",
684 which GNU/Linux calls tgid, "thread group ID". */
685
686 struct lwp_info
687 {
688 /* Backlink to the parent object. */
689 struct thread_info *thread;
690
691 /* If this flag is set, the next SIGSTOP will be ignored (the
692 process will be immediately resumed). This means that either we
693 sent the SIGSTOP to it ourselves and got some other pending event
694 (so the SIGSTOP is still pending), or that we stopped the
695 inferior implicitly via PTRACE_ATTACH and have not waited for it
696 yet. */
697 int stop_expected;
698
699 /* When this is true, we shall not try to resume this thread, even
700 if last_resume_kind isn't resume_stop. */
701 int suspended;
702
703 /* If this flag is set, the lwp is known to be stopped right now (stop
704 event already received in a wait()). */
705 int stopped;
706
707 /* Signal whether we are in a SYSCALL_ENTRY or
708 in a SYSCALL_RETURN event.
709 Values:
710 - TARGET_WAITKIND_SYSCALL_ENTRY
711 - TARGET_WAITKIND_SYSCALL_RETURN */
712 enum target_waitkind syscall_state;
713
714 /* When stopped is set, the last wait status recorded for this lwp. */
715 int last_status;
716
717 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
718 this LWP's last event, to pass to GDB without any further
719 processing. This is used to store extended ptrace event
720 information or exit status until it can be reported to GDB. */
721 struct target_waitstatus waitstatus;
722
723 /* A pointer to the fork child/parent relative. Valid only while
724 the parent fork event is not reported to higher layers. Used to
725 avoid wildcard vCont actions resuming a fork child before GDB is
726 notified about the parent's fork event. */
727 struct lwp_info *fork_relative;
728
729 /* When stopped is set, this is where the lwp last stopped, with
730 decr_pc_after_break already accounted for. If the LWP is
731 running, this is the address at which the lwp was resumed. */
732 CORE_ADDR stop_pc;
733
734 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
735 been reported. */
736 int status_pending_p;
737 int status_pending;
738
739 /* The reason the LWP last stopped, if we need to track it
740 (breakpoint, watchpoint, etc.) */
741 enum target_stop_reason stop_reason;
742
743 /* On architectures where it is possible to know the data address of
744 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
745 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
746 is true. */
747 CORE_ADDR stopped_data_address;
748
749 /* If this is non-zero, it is a breakpoint to be reinserted at our next
750 stop (SIGTRAP stops only). */
751 CORE_ADDR bp_reinsert;
752
753 /* If this flag is set, the last continue operation at the ptrace
754 level on this process was a single-step. */
755 int stepping;
756
757 /* Range to single step within. This is a copy of the step range
758 passed along the last resume request. See 'struct
759 thread_resume'. */
760 CORE_ADDR step_range_start; /* Inclusive */
761 CORE_ADDR step_range_end; /* Exclusive */
762
763 /* If this flag is set, we need to set the event request flags the
764 next time we see this LWP stop. */
765 int must_set_ptrace_flags;
766
767 /* If this is non-zero, it points to a chain of signals which need to
768 be delivered to this process. */
769 struct pending_signals *pending_signals;
770
771 /* A link used when resuming. It is initialized from the resume request,
772 and then processed and cleared in linux_resume_one_lwp. */
773 struct thread_resume *resume;
774
775 /* Information bout this lwp's fast tracepoint collection status (is it
776 currently stopped in the jump pad, and if so, before or at/after the
777 relocated instruction). Normally, we won't care about this, but we will
778 if a signal arrives to this lwp while it is collecting. */
779 fast_tpoint_collect_result collecting_fast_tracepoint;
780
781 /* If this is non-zero, it points to a chain of signals which need
782 to be reported to GDB. These were deferred because the thread
783 was doing a fast tracepoint collect when they arrived. */
784 struct pending_signals *pending_signals_to_report;
785
786 /* When collecting_fast_tracepoint is first found to be 1, we insert
787 a exit-jump-pad-quickly breakpoint. This is it. */
788 struct breakpoint *exit_jump_pad_bkpt;
789
790 #ifdef USE_THREAD_DB
791 int thread_known;
792 /* The thread handle, used for e.g. TLS access. Only valid if
793 THREAD_KNOWN is set. */
794 td_thrhandle_t th;
795
796 /* The pthread_t handle. */
797 thread_t thread_handle;
798 #endif
799
800 /* Arch-specific additions. */
801 struct arch_lwp_info *arch_private;
802 };
803
804 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
805
806 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
807 errno). */
808 int linux_attach_lwp (ptid_t ptid);
809
810 struct lwp_info *find_lwp_pid (ptid_t ptid);
811 /* For linux_stop_lwp see nat/linux-nat.h. */
812
813 #ifdef HAVE_LINUX_REGSETS
814 void initialize_regsets_info (struct regsets_info *regsets_info);
815 #endif
816
817 void initialize_low_arch (void);
818
819 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
820 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
821
822 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
823 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
824
825 /* From thread-db.c */
826 int thread_db_init (void);
827 void thread_db_detach (struct process_info *);
828 void thread_db_mourn (struct process_info *);
829 int thread_db_handle_monitor_command (char *);
830 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
831 CORE_ADDR load_module, CORE_ADDR *address);
832 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
833
834 /* Called from linux-low.c when a clone event is detected. Upon entry,
835 both the clone and the parent should be stopped. This function does
836 whatever is required have the clone under thread_db's control. */
837
838 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
839
840 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
841
842 extern int have_ptrace_getregset;
843
844 /* Search for the value with type MATCH in the auxv vector with
845 entries of length WORDSIZE bytes. If found, store the value in
846 *VALP and return 1. If not found or if there is an error, return
847 0. */
848
849 int linux_get_auxv (int wordsize, CORE_ADDR match,
850 CORE_ADDR *valp);
851
852 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
853 WORDSIZE. If no entry was found, return zero. */
854
855 CORE_ADDR linux_get_hwcap (int wordsize);
856
857 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
858 WORDSIZE. If no entry was found, return zero. */
859
860 CORE_ADDR linux_get_hwcap2 (int wordsize);
861
862 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.059994 seconds and 4 git commands to generate.