gdbserver/linux-low: turn 'get_thread_area' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Install a fast tracepoint jump pad. See target.h for
135 comments. */
136 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
137 CORE_ADDR collector,
138 CORE_ADDR lockaddr,
139 ULONGEST orig_size,
140 CORE_ADDR *jump_entry,
141 CORE_ADDR *trampoline,
142 ULONGEST *trampoline_size,
143 unsigned char *jjump_pad_insn,
144 ULONGEST *jjump_pad_insn_size,
145 CORE_ADDR *adjusted_insn_addr,
146 CORE_ADDR *adjusted_insn_addr_end,
147 char *err);
148
149 /* Return the bytecode operations vector for the current inferior.
150 Returns NULL if bytecode compilation is not supported. */
151 struct emit_ops *(*emit_ops) (void);
152
153 /* Return the minimum length of an instruction that can be safely overwritten
154 for use as a fast tracepoint. */
155 int (*get_min_fast_tracepoint_insn_len) (void);
156
157 /* Returns true if the low target supports range stepping. */
158 int (*supports_range_stepping) (void);
159
160 /* See target.h. */
161 int (*supports_hardware_single_step) (void);
162
163 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
164 inferior is stopped due to SYSCALL_SIGTRAP. */
165 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
166
167 /* See target.h. */
168 int (*get_ipa_tdesc_idx) (void);
169 };
170
171 extern struct linux_target_ops the_low_target;
172
173 /* Target ops definitions for a Linux target. */
174
175 class linux_process_target : public process_stratum_target
176 {
177 public:
178
179 int create_inferior (const char *program,
180 const std::vector<char *> &program_args) override;
181
182 void post_create_inferior () override;
183
184 int attach (unsigned long pid) override;
185
186 int kill (process_info *proc) override;
187
188 int detach (process_info *proc) override;
189
190 void mourn (process_info *proc) override;
191
192 void join (int pid) override;
193
194 bool thread_alive (ptid_t pid) override;
195
196 void resume (thread_resume *resume_info, size_t n) override;
197
198 ptid_t wait (ptid_t ptid, target_waitstatus *status,
199 int options) override;
200
201 void fetch_registers (regcache *regcache, int regno) override;
202
203 void store_registers (regcache *regcache, int regno) override;
204
205 int prepare_to_access_memory () override;
206
207 void done_accessing_memory () override;
208
209 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
210 int len) override;
211
212 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
213 int len) override;
214
215 void look_up_symbols () override;
216
217 void request_interrupt () override;
218
219 bool supports_read_auxv () override;
220
221 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
222 unsigned int len) override;
223
224 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
225 int size, raw_breakpoint *bp) override;
226
227 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
228 int size, raw_breakpoint *bp) override;
229
230 bool stopped_by_sw_breakpoint () override;
231
232 bool supports_stopped_by_sw_breakpoint () override;
233
234 bool stopped_by_hw_breakpoint () override;
235
236 bool supports_stopped_by_hw_breakpoint () override;
237
238 bool supports_hardware_single_step () override;
239
240 bool stopped_by_watchpoint () override;
241
242 CORE_ADDR stopped_data_address () override;
243
244 bool supports_read_offsets () override;
245
246 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
247
248 bool supports_get_tls_address () override;
249
250 int get_tls_address (thread_info *thread, CORE_ADDR offset,
251 CORE_ADDR load_module, CORE_ADDR *address) override;
252
253 bool supports_qxfer_osdata () override;
254
255 int qxfer_osdata (const char *annex, unsigned char *readbuf,
256 unsigned const char *writebuf,
257 CORE_ADDR offset, int len) override;
258
259 bool supports_qxfer_siginfo () override;
260
261 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
262 unsigned const char *writebuf,
263 CORE_ADDR offset, int len) override;
264
265 bool supports_non_stop () override;
266
267 bool async (bool enable) override;
268
269 int start_non_stop (bool enable) override;
270
271 bool supports_multi_process () override;
272
273 bool supports_fork_events () override;
274
275 bool supports_vfork_events () override;
276
277 bool supports_exec_events () override;
278
279 void handle_new_gdb_connection () override;
280
281 int handle_monitor_command (char *mon) override;
282
283 int core_of_thread (ptid_t ptid) override;
284
285 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
286 bool supports_read_loadmap () override;
287
288 int read_loadmap (const char *annex, CORE_ADDR offset,
289 unsigned char *myaddr, unsigned int len) override;
290 #endif
291
292 CORE_ADDR read_pc (regcache *regcache) override;
293
294 void write_pc (regcache *regcache, CORE_ADDR pc) override;
295
296 bool supports_thread_stopped () override;
297
298 bool thread_stopped (thread_info *thread) override;
299
300 void pause_all (bool freeze) override;
301
302 void unpause_all (bool unfreeze) override;
303
304 void stabilize_threads () override;
305
306 bool supports_fast_tracepoints () override;
307
308 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
309 CORE_ADDR tpaddr,
310 CORE_ADDR collector,
311 CORE_ADDR lockaddr,
312 ULONGEST orig_size,
313 CORE_ADDR *jump_entry,
314 CORE_ADDR *trampoline,
315 ULONGEST *trampoline_size,
316 unsigned char *jjump_pad_insn,
317 ULONGEST *jjump_pad_insn_size,
318 CORE_ADDR *adjusted_insn_addr,
319 CORE_ADDR *adjusted_insn_addr_end,
320 char *err) override;
321
322 int get_min_fast_tracepoint_insn_len () override;
323
324 struct emit_ops *emit_ops () override;
325
326 bool supports_disable_randomization () override;
327
328 bool supports_qxfer_libraries_svr4 () override;
329
330 int qxfer_libraries_svr4 (const char *annex,
331 unsigned char *readbuf,
332 unsigned const char *writebuf,
333 CORE_ADDR offset, int len) override;
334
335 bool supports_agent () override;
336
337 #ifdef HAVE_LINUX_BTRACE
338 btrace_target_info *enable_btrace (ptid_t ptid,
339 const btrace_config *conf) override;
340
341 int disable_btrace (btrace_target_info *tinfo) override;
342
343 int read_btrace (btrace_target_info *tinfo, buffer *buf,
344 enum btrace_read_type type) override;
345
346 int read_btrace_conf (const btrace_target_info *tinfo,
347 buffer *buf) override;
348 #endif
349
350 bool supports_range_stepping () override;
351
352 bool supports_pid_to_exec_file () override;
353
354 char *pid_to_exec_file (int pid) override;
355
356 bool supports_multifs () override;
357
358 int multifs_open (int pid, const char *filename, int flags,
359 mode_t mode) override;
360
361 int multifs_unlink (int pid, const char *filename) override;
362
363 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
364 size_t bufsiz) override;
365
366 const char *thread_name (ptid_t thread) override;
367
368 #if USE_THREAD_DB
369 bool thread_handle (ptid_t ptid, gdb_byte **handle,
370 int *handle_len) override;
371 #endif
372
373 bool supports_catch_syscall () override;
374
375 int get_ipa_tdesc_idx () override;
376
377 /* Return the information to access registers. This has public
378 visibility because proc-service uses it. */
379 virtual const regs_info *get_regs_info () = 0;
380
381 private:
382
383 /* Handle a GNU/Linux extended wait response. If we see a clone,
384 fork, or vfork event, we need to add the new LWP to our list
385 (and return 0 so as not to report the trap to higher layers).
386 If we see an exec event, we will modify ORIG_EVENT_LWP to point
387 to a new LWP representing the new program. */
388 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
389
390 /* Do low-level handling of the event, and check if we should go on
391 and pass it to caller code. Return the affected lwp if we are, or
392 NULL otherwise. */
393 lwp_info *filter_event (int lwpid, int wstat);
394
395 /* Wait for an event from child(ren) WAIT_PTID, and return any that
396 match FILTER_PTID (leaving others pending). The PTIDs can be:
397 minus_one_ptid, to specify any child; a pid PTID, specifying all
398 lwps of a thread group; or a PTID representing a single lwp. Store
399 the stop status through the status pointer WSTAT. OPTIONS is
400 passed to the waitpid call. Return 0 if no event was found and
401 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
402 was found. Return the PID of the stopped child otherwise. */
403 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
404 int *wstatp, int options);
405
406 /* Wait for an event from child(ren) PTID. PTIDs can be:
407 minus_one_ptid, to specify any child; a pid PTID, specifying all
408 lwps of a thread group; or a PTID representing a single lwp. Store
409 the stop status through the status pointer WSTAT. OPTIONS is
410 passed to the waitpid call. Return 0 if no event was found and
411 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
412 was found. Return the PID of the stopped child otherwise. */
413 int wait_for_event (ptid_t ptid, int *wstatp, int options);
414
415 /* Wait for all children to stop for the SIGSTOPs we just queued. */
416 void wait_for_sigstop ();
417
418 /* Wait for process, returns status. */
419 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
420 int target_options);
421
422 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
423 If SUSPEND, then also increase the suspend count of every LWP,
424 except EXCEPT. */
425 void stop_all_lwps (int suspend, lwp_info *except);
426
427 /* Stopped LWPs that the client wanted to be running, that don't have
428 pending statuses, are set to run again, except for EXCEPT, if not
429 NULL. This undoes a stop_all_lwps call. */
430 void unstop_all_lwps (int unsuspend, lwp_info *except);
431
432 /* Start a step-over operation on LWP. When LWP stopped at a
433 breakpoint, to make progress, we need to remove the breakpoint out
434 of the way. If we let other threads run while we do that, they may
435 pass by the breakpoint location and miss hitting it. To avoid
436 that, a step-over momentarily stops all threads while LWP is
437 single-stepped by either hardware or software while the breakpoint
438 is temporarily uninserted from the inferior. When the single-step
439 finishes, we reinsert the breakpoint, and let all threads that are
440 supposed to be running, run again. */
441 void start_step_over (lwp_info *lwp);
442
443 /* If there's a step over in progress, wait until all threads stop
444 (that is, until the stepping thread finishes its step), and
445 unsuspend all lwps. The stepping thread ends with its status
446 pending, which is processed later when we get back to processing
447 events. */
448 void complete_ongoing_step_over ();
449
450 /* When we finish a step-over, set threads running again. If there's
451 another thread that may need a step-over, now's the time to start
452 it. Eventually, we'll move all threads past their breakpoints. */
453 void proceed_all_lwps ();
454
455 /* The reason we resume in the caller, is because we want to be able
456 to pass lwp->status_pending as WSTAT, and we need to clear
457 status_pending_p before resuming, otherwise, resume_one_lwp
458 refuses to resume. */
459 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
460
461 /* Move THREAD out of the jump pad. */
462 void move_out_of_jump_pad (thread_info *thread);
463
464 /* Call low_arch_setup on THREAD. */
465 void arch_setup_thread (thread_info *thread);
466
467 #ifdef HAVE_LINUX_USRREGS
468 /* Fetch one register. */
469 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
470 int regno);
471
472 /* Store one register. */
473 void store_register (const usrregs_info *usrregs, regcache *regcache,
474 int regno);
475 #endif
476
477 /* Fetch all registers, or just one, from the child process.
478 If REGNO is -1, do this for all registers, skipping any that are
479 assumed to have been retrieved by regsets_fetch_inferior_registers,
480 unless ALL is non-zero.
481 Otherwise, REGNO specifies which register (so we can save time). */
482 void usr_fetch_inferior_registers (const regs_info *regs_info,
483 regcache *regcache, int regno, int all);
484
485 /* Store our register values back into the inferior.
486 If REGNO is -1, do this for all registers, skipping any that are
487 assumed to have been saved by regsets_store_inferior_registers,
488 unless ALL is non-zero.
489 Otherwise, REGNO specifies which register (so we can save time). */
490 void usr_store_inferior_registers (const regs_info *regs_info,
491 regcache *regcache, int regno, int all);
492
493 /* Return the PC as read from the regcache of LWP, without any
494 adjustment. */
495 CORE_ADDR get_pc (lwp_info *lwp);
496
497 /* Called when the LWP stopped for a signal/trap. If it stopped for a
498 trap check what caused it (breakpoint, watchpoint, trace, etc.),
499 and save the result in the LWP's stop_reason field. If it stopped
500 for a breakpoint, decrement the PC if necessary on the lwp's
501 architecture. Returns true if we now have the LWP's stop PC. */
502 bool save_stop_reason (lwp_info *lwp);
503
504 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
505 SIGNAL is nonzero, give it that signal. */
506 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
507 siginfo_t *info);
508
509 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
510 disappears while we try to resume it. */
511 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
512
513 /* This function is called once per thread. We check the thread's
514 last resume request, which will tell us whether to resume, step, or
515 leave the thread stopped. Any signal the client requested to be
516 delivered has already been enqueued at this point.
517
518 If any thread that GDB wants running is stopped at an internal
519 breakpoint that needs stepping over, we start a step-over operation
520 on that particular thread, and leave all others stopped. */
521 void proceed_one_lwp (thread_info *thread, lwp_info *except);
522
523 /* This function is called once per thread. We check the thread's
524 resume request, which will tell us whether to resume, step, or
525 leave the thread stopped; and what signal, if any, it should be
526 sent.
527
528 For threads which we aren't explicitly told otherwise, we preserve
529 the stepping flag; this is used for stepping over gdbserver-placed
530 breakpoints.
531
532 If pending_flags was set in any thread, we queue any needed
533 signals, since we won't actually resume. We already have a pending
534 event to report, so we don't need to preserve any step requests;
535 they should be re-issued if necessary. */
536 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
537
538 /* Return true if this lwp has an interesting status pending. */
539 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
540
541 /* Resume LWPs that are currently stopped without any pending status
542 to report, but are resumed from the core's perspective. */
543 void resume_stopped_resumed_lwps (thread_info *thread);
544
545 /* Unsuspend THREAD, except EXCEPT, and proceed. */
546 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
547
548 /* Return true if this lwp still has an interesting status pending.
549 If not (e.g., it had stopped for a breakpoint that is gone), return
550 false. */
551 bool thread_still_has_status_pending (thread_info *thread);
552
553 /* Return true if this lwp is to-be-resumed and has an interesting
554 status pending. */
555 bool resume_status_pending (thread_info *thread);
556
557 /* Return true if this lwp that GDB wants running is stopped at an
558 internal breakpoint that we need to step over. It assumes that
559 any required STOP_PC adjustment has already been propagated to
560 the inferior's regcache. */
561 bool thread_needs_step_over (thread_info *thread);
562
563 /* Single step via hardware or software single step.
564 Return 1 if hardware single stepping, 0 if software single stepping
565 or can't single step. */
566 int single_step (lwp_info* lwp);
567
568 /* Install breakpoints for software single stepping. */
569 void install_software_single_step_breakpoints (lwp_info *lwp);
570
571 /* Fetch the possibly triggered data watchpoint info and store it in
572 CHILD.
573
574 On some archs, like x86, that use debug registers to set
575 watchpoints, it's possible that the way to know which watched
576 address trapped, is to check the register that is used to select
577 which address to watch. Problem is, between setting the watchpoint
578 and reading back which data address trapped, the user may change
579 the set of watchpoints, and, as a consequence, GDB changes the
580 debug registers in the inferior. To avoid reading back a stale
581 stopped-data-address when that happens, we cache in LP the fact
582 that a watchpoint trapped, and the corresponding data address, as
583 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
584 registers meanwhile, we have the cached data we can rely on. */
585 bool check_stopped_by_watchpoint (lwp_info *child);
586
587 /* Convert a native/host siginfo object, into/from the siginfo in the
588 layout of the inferiors' architecture. */
589 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo,
590 int direction);
591
592 /* Add a process to the common process list, and set its private
593 data. */
594 process_info *add_linux_process (int pid, int attached);
595
596 /* Add a new thread. */
597 lwp_info *add_lwp (ptid_t ptid);
598
599 /* Delete a thread. */
600 void delete_lwp (lwp_info *lwp);
601
602 public: /* Make this public because it's used from outside. */
603 /* Attach to an inferior process. Returns 0 on success, ERRNO on
604 error. */
605 int attach_lwp (ptid_t ptid);
606
607 private: /* Back to private. */
608 /* Detach from LWP. */
609 void detach_one_lwp (lwp_info *lwp);
610
611 /* Detect zombie thread group leaders, and "exit" them. We can't
612 reap their exits until all other threads in the group have
613 exited. */
614 void check_zombie_leaders ();
615
616 /* Convenience function that is called when the kernel reports an exit
617 event. This decides whether to report the event to GDB as a
618 process exit event, a thread exit event, or to suppress the
619 event. */
620 ptid_t filter_exit_event (lwp_info *event_child,
621 target_waitstatus *ourstatus);
622
623 /* Returns true if THREAD is stopped in a jump pad, and we can't
624 move it out, because we need to report the stop event to GDB. For
625 example, if the user puts a breakpoint in the jump pad, it's
626 because she wants to debug it. */
627 bool stuck_in_jump_pad (thread_info *thread);
628
629 /* Convenience wrapper. Returns information about LWP's fast tracepoint
630 collection status. */
631 fast_tpoint_collect_result linux_fast_tracepoint_collecting
632 (lwp_info *lwp, fast_tpoint_collect_status *status);
633
634 protected:
635 /* The architecture-specific "low" methods are listed below. */
636
637 /* Architecture-specific setup for the current thread. */
638 virtual void low_arch_setup () = 0;
639
640 /* Return false if we can fetch/store the register, true if we cannot
641 fetch/store the register. */
642 virtual bool low_cannot_fetch_register (int regno) = 0;
643
644 virtual bool low_cannot_store_register (int regno) = 0;
645
646 /* Hook to fetch a register in some non-standard way. Used for
647 example by backends that have read-only registers with hardcoded
648 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
649 REGNO was supplied, false if not, and we should fallback to the
650 standard ptrace methods. */
651 virtual bool low_fetch_register (regcache *regcache, int regno);
652
653 /* Return true if breakpoints are supported. Such targets must
654 implement the GET_PC and SET_PC methods. */
655 virtual bool low_supports_breakpoints ();
656
657 virtual CORE_ADDR low_get_pc (regcache *regcache);
658
659 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
660
661 /* Find the next possible PCs after the current instruction executes.
662 Targets that override this method should also override
663 'supports_software_single_step' to return true. */
664 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
665
666 /* Return true if there is a breakpoint at PC. */
667 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
668
669 /* Breakpoint and watchpoint related functions. See target.h for
670 comments. */
671 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
672 int size, raw_breakpoint *bp);
673
674 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
675 int size, raw_breakpoint *bp);
676
677 virtual bool low_stopped_by_watchpoint ();
678
679 virtual CORE_ADDR low_stopped_data_address ();
680
681 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
682 for registers smaller than an xfer unit). */
683 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
684 char *buf);
685
686 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
687 const char *buf);
688
689 /* Hook to convert from target format to ptrace format and back.
690 Returns true if any conversion was done; false otherwise.
691 If DIRECTION is 1, then copy from INF to NATIVE.
692 If DIRECTION is 0, copy from NATIVE to INF. */
693 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
694 int direction);
695
696 /* Hook to call when a new process is created or attached to.
697 If extra per-process architecture-specific data is needed,
698 allocate it here. */
699 virtual arch_process_info *low_new_process ();
700
701 /* Hook to call when a process is being deleted. If extra per-process
702 architecture-specific data is needed, delete it here. */
703 virtual void low_delete_process (arch_process_info *info);
704
705 /* Hook to call when a new thread is detected.
706 If extra per-thread architecture-specific data is needed,
707 allocate it here. */
708 virtual void low_new_thread (lwp_info *);
709
710 /* Hook to call when a thread is being deleted. If extra per-thread
711 architecture-specific data is needed, delete it here. */
712 virtual void low_delete_thread (arch_lwp_info *);
713
714 /* Hook to call, if any, when a new fork is attached. */
715 virtual void low_new_fork (process_info *parent, process_info *child);
716
717 /* Hook to call prior to resuming a thread. */
718 virtual void low_prepare_to_resume (lwp_info *lwp);
719
720 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
721 success, -1 on failure. */
722 virtual int low_get_thread_area (int lwpid, CORE_ADDR *addrp);
723
724 /* How many bytes the PC should be decremented after a break. */
725 virtual int low_decr_pc_after_break ();
726 };
727
728 extern linux_process_target *the_linux_target;
729
730 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
731 #define get_lwp_thread(lwp) ((lwp)->thread)
732
733 /* This struct is recorded in the target_data field of struct thread_info.
734
735 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
736 GDB protocol representation of the thread ID. Threads also have
737 a "process ID" (poorly named) which is (presently) the same as the
738 LWP ID.
739
740 There is also ``all_processes'' is keyed by the "overall process ID",
741 which GNU/Linux calls tgid, "thread group ID". */
742
743 struct lwp_info
744 {
745 /* Backlink to the parent object. */
746 struct thread_info *thread;
747
748 /* If this flag is set, the next SIGSTOP will be ignored (the
749 process will be immediately resumed). This means that either we
750 sent the SIGSTOP to it ourselves and got some other pending event
751 (so the SIGSTOP is still pending), or that we stopped the
752 inferior implicitly via PTRACE_ATTACH and have not waited for it
753 yet. */
754 int stop_expected;
755
756 /* When this is true, we shall not try to resume this thread, even
757 if last_resume_kind isn't resume_stop. */
758 int suspended;
759
760 /* If this flag is set, the lwp is known to be stopped right now (stop
761 event already received in a wait()). */
762 int stopped;
763
764 /* Signal whether we are in a SYSCALL_ENTRY or
765 in a SYSCALL_RETURN event.
766 Values:
767 - TARGET_WAITKIND_SYSCALL_ENTRY
768 - TARGET_WAITKIND_SYSCALL_RETURN */
769 enum target_waitkind syscall_state;
770
771 /* When stopped is set, the last wait status recorded for this lwp. */
772 int last_status;
773
774 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
775 this LWP's last event, to pass to GDB without any further
776 processing. This is used to store extended ptrace event
777 information or exit status until it can be reported to GDB. */
778 struct target_waitstatus waitstatus;
779
780 /* A pointer to the fork child/parent relative. Valid only while
781 the parent fork event is not reported to higher layers. Used to
782 avoid wildcard vCont actions resuming a fork child before GDB is
783 notified about the parent's fork event. */
784 struct lwp_info *fork_relative;
785
786 /* When stopped is set, this is where the lwp last stopped, with
787 decr_pc_after_break already accounted for. If the LWP is
788 running, this is the address at which the lwp was resumed. */
789 CORE_ADDR stop_pc;
790
791 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
792 been reported. */
793 int status_pending_p;
794 int status_pending;
795
796 /* The reason the LWP last stopped, if we need to track it
797 (breakpoint, watchpoint, etc.) */
798 enum target_stop_reason stop_reason;
799
800 /* On architectures where it is possible to know the data address of
801 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
802 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
803 is true. */
804 CORE_ADDR stopped_data_address;
805
806 /* If this is non-zero, it is a breakpoint to be reinserted at our next
807 stop (SIGTRAP stops only). */
808 CORE_ADDR bp_reinsert;
809
810 /* If this flag is set, the last continue operation at the ptrace
811 level on this process was a single-step. */
812 int stepping;
813
814 /* Range to single step within. This is a copy of the step range
815 passed along the last resume request. See 'struct
816 thread_resume'. */
817 CORE_ADDR step_range_start; /* Inclusive */
818 CORE_ADDR step_range_end; /* Exclusive */
819
820 /* If this flag is set, we need to set the event request flags the
821 next time we see this LWP stop. */
822 int must_set_ptrace_flags;
823
824 /* If this is non-zero, it points to a chain of signals which need to
825 be delivered to this process. */
826 struct pending_signals *pending_signals;
827
828 /* A link used when resuming. It is initialized from the resume request,
829 and then processed and cleared in linux_resume_one_lwp. */
830 struct thread_resume *resume;
831
832 /* Information bout this lwp's fast tracepoint collection status (is it
833 currently stopped in the jump pad, and if so, before or at/after the
834 relocated instruction). Normally, we won't care about this, but we will
835 if a signal arrives to this lwp while it is collecting. */
836 fast_tpoint_collect_result collecting_fast_tracepoint;
837
838 /* If this is non-zero, it points to a chain of signals which need
839 to be reported to GDB. These were deferred because the thread
840 was doing a fast tracepoint collect when they arrived. */
841 struct pending_signals *pending_signals_to_report;
842
843 /* When collecting_fast_tracepoint is first found to be 1, we insert
844 a exit-jump-pad-quickly breakpoint. This is it. */
845 struct breakpoint *exit_jump_pad_bkpt;
846
847 #ifdef USE_THREAD_DB
848 int thread_known;
849 /* The thread handle, used for e.g. TLS access. Only valid if
850 THREAD_KNOWN is set. */
851 td_thrhandle_t th;
852
853 /* The pthread_t handle. */
854 thread_t thread_handle;
855 #endif
856
857 /* Arch-specific additions. */
858 struct arch_lwp_info *arch_private;
859 };
860
861 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
862
863 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
864 errno). */
865 int linux_attach_lwp (ptid_t ptid);
866
867 struct lwp_info *find_lwp_pid (ptid_t ptid);
868 /* For linux_stop_lwp see nat/linux-nat.h. */
869
870 #ifdef HAVE_LINUX_REGSETS
871 void initialize_regsets_info (struct regsets_info *regsets_info);
872 #endif
873
874 void initialize_low_arch (void);
875
876 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
877 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
878
879 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
880 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
881
882 /* From thread-db.c */
883 int thread_db_init (void);
884 void thread_db_detach (struct process_info *);
885 void thread_db_mourn (struct process_info *);
886 int thread_db_handle_monitor_command (char *);
887 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
888 CORE_ADDR load_module, CORE_ADDR *address);
889 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
890
891 /* Called from linux-low.c when a clone event is detected. Upon entry,
892 both the clone and the parent should be stopped. This function does
893 whatever is required have the clone under thread_db's control. */
894
895 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
896
897 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
898
899 extern int have_ptrace_getregset;
900
901 /* Search for the value with type MATCH in the auxv vector with
902 entries of length WORDSIZE bytes. If found, store the value in
903 *VALP and return 1. If not found or if there is an error, return
904 0. */
905
906 int linux_get_auxv (int wordsize, CORE_ADDR match,
907 CORE_ADDR *valp);
908
909 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
910 WORDSIZE. If no entry was found, return zero. */
911
912 CORE_ADDR linux_get_hwcap (int wordsize);
913
914 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
915 WORDSIZE. If no entry was found, return zero. */
916
917 CORE_ADDR linux_get_hwcap2 (int wordsize);
918
919 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.062498 seconds and 4 git commands to generate.