0fdd8ceb9457facc0ec58a549928d9d6c1af26ed
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 int (*stopped_by_watchpoint) (void);
135 CORE_ADDR (*stopped_data_address) (void);
136
137 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
138 for registers smaller than an xfer unit). */
139 void (*collect_ptrace_register) (struct regcache *regcache,
140 int regno, char *buf);
141 void (*supply_ptrace_register) (struct regcache *regcache,
142 int regno, const char *buf);
143
144 /* Hook to convert from target format to ptrace format and back.
145 Returns true if any conversion was done; false otherwise.
146 If DIRECTION is 1, then copy from INF to NATIVE.
147 If DIRECTION is 0, copy from NATIVE to INF. */
148 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
149
150 /* Hook to call when a new process is created or attached to.
151 If extra per-process architecture-specific data is needed,
152 allocate it here. */
153 struct arch_process_info * (*new_process) (void);
154
155 /* Hook to call when a process is being deleted. If extra per-process
156 architecture-specific data is needed, delete it here. */
157 void (*delete_process) (struct arch_process_info *info);
158
159 /* Hook to call when a new thread is detected.
160 If extra per-thread architecture-specific data is needed,
161 allocate it here. */
162 void (*new_thread) (struct lwp_info *);
163
164 /* Hook to call when a thread is being deleted. If extra per-thread
165 architecture-specific data is needed, delete it here. */
166 void (*delete_thread) (struct arch_lwp_info *);
167
168 /* Hook to call, if any, when a new fork is attached. */
169 void (*new_fork) (struct process_info *parent, struct process_info *child);
170
171 /* Hook to call prior to resuming a thread. */
172 void (*prepare_to_resume) (struct lwp_info *);
173
174 /* Hook to support target specific qSupported. */
175 void (*process_qsupported) (char **, int count);
176
177 /* Returns true if the low target supports tracepoints. */
178 int (*supports_tracepoints) (void);
179
180 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
181 success, -1 on failure. */
182 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
183
184 /* Install a fast tracepoint jump pad. See target.h for
185 comments. */
186 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
187 CORE_ADDR collector,
188 CORE_ADDR lockaddr,
189 ULONGEST orig_size,
190 CORE_ADDR *jump_entry,
191 CORE_ADDR *trampoline,
192 ULONGEST *trampoline_size,
193 unsigned char *jjump_pad_insn,
194 ULONGEST *jjump_pad_insn_size,
195 CORE_ADDR *adjusted_insn_addr,
196 CORE_ADDR *adjusted_insn_addr_end,
197 char *err);
198
199 /* Return the bytecode operations vector for the current inferior.
200 Returns NULL if bytecode compilation is not supported. */
201 struct emit_ops *(*emit_ops) (void);
202
203 /* Return the minimum length of an instruction that can be safely overwritten
204 for use as a fast tracepoint. */
205 int (*get_min_fast_tracepoint_insn_len) (void);
206
207 /* Returns true if the low target supports range stepping. */
208 int (*supports_range_stepping) (void);
209
210 /* See target.h. */
211 int (*supports_hardware_single_step) (void);
212
213 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
214 inferior is stopped due to SYSCALL_SIGTRAP. */
215 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
216
217 /* See target.h. */
218 int (*get_ipa_tdesc_idx) (void);
219 };
220
221 extern struct linux_target_ops the_low_target;
222
223 /* Target ops definitions for a Linux target. */
224
225 class linux_process_target : public process_stratum_target
226 {
227 public:
228
229 int create_inferior (const char *program,
230 const std::vector<char *> &program_args) override;
231
232 void post_create_inferior () override;
233
234 int attach (unsigned long pid) override;
235
236 int kill (process_info *proc) override;
237
238 int detach (process_info *proc) override;
239
240 void mourn (process_info *proc) override;
241
242 void join (int pid) override;
243
244 bool thread_alive (ptid_t pid) override;
245
246 void resume (thread_resume *resume_info, size_t n) override;
247
248 ptid_t wait (ptid_t ptid, target_waitstatus *status,
249 int options) override;
250
251 void fetch_registers (regcache *regcache, int regno) override;
252
253 void store_registers (regcache *regcache, int regno) override;
254
255 int prepare_to_access_memory () override;
256
257 void done_accessing_memory () override;
258
259 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
260 int len) override;
261
262 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
263 int len) override;
264
265 void look_up_symbols () override;
266
267 void request_interrupt () override;
268
269 bool supports_read_auxv () override;
270
271 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
272 unsigned int len) override;
273
274 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
275 int size, raw_breakpoint *bp) override;
276
277 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
278 int size, raw_breakpoint *bp) override;
279
280 bool stopped_by_sw_breakpoint () override;
281
282 bool supports_stopped_by_sw_breakpoint () override;
283
284 bool stopped_by_hw_breakpoint () override;
285
286 bool supports_stopped_by_hw_breakpoint () override;
287
288 bool supports_hardware_single_step () override;
289
290 bool stopped_by_watchpoint () override;
291
292 CORE_ADDR stopped_data_address () override;
293
294 bool supports_read_offsets () override;
295
296 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
297
298 bool supports_get_tls_address () override;
299
300 int get_tls_address (thread_info *thread, CORE_ADDR offset,
301 CORE_ADDR load_module, CORE_ADDR *address) override;
302
303 bool supports_qxfer_osdata () override;
304
305 int qxfer_osdata (const char *annex, unsigned char *readbuf,
306 unsigned const char *writebuf,
307 CORE_ADDR offset, int len) override;
308
309 bool supports_qxfer_siginfo () override;
310
311 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
312 unsigned const char *writebuf,
313 CORE_ADDR offset, int len) override;
314
315 bool supports_non_stop () override;
316
317 bool async (bool enable) override;
318
319 int start_non_stop (bool enable) override;
320
321 bool supports_multi_process () override;
322
323 bool supports_fork_events () override;
324
325 bool supports_vfork_events () override;
326
327 bool supports_exec_events () override;
328
329 void handle_new_gdb_connection () override;
330
331 int handle_monitor_command (char *mon) override;
332
333 int core_of_thread (ptid_t ptid) override;
334
335 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
336 bool supports_read_loadmap () override;
337
338 int read_loadmap (const char *annex, CORE_ADDR offset,
339 unsigned char *myaddr, unsigned int len) override;
340 #endif
341
342 void process_qsupported (char **features, int count) override;
343
344 bool supports_tracepoints () override;
345
346 CORE_ADDR read_pc (regcache *regcache) override;
347
348 void write_pc (regcache *regcache, CORE_ADDR pc) override;
349
350 bool supports_thread_stopped () override;
351
352 bool thread_stopped (thread_info *thread) override;
353
354 void pause_all (bool freeze) override;
355
356 void unpause_all (bool unfreeze) override;
357
358 void stabilize_threads () override;
359
360 bool supports_fast_tracepoints () override;
361
362 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
363 CORE_ADDR tpaddr,
364 CORE_ADDR collector,
365 CORE_ADDR lockaddr,
366 ULONGEST orig_size,
367 CORE_ADDR *jump_entry,
368 CORE_ADDR *trampoline,
369 ULONGEST *trampoline_size,
370 unsigned char *jjump_pad_insn,
371 ULONGEST *jjump_pad_insn_size,
372 CORE_ADDR *adjusted_insn_addr,
373 CORE_ADDR *adjusted_insn_addr_end,
374 char *err) override;
375
376 int get_min_fast_tracepoint_insn_len () override;
377
378 struct emit_ops *emit_ops () override;
379
380 bool supports_disable_randomization () override;
381
382 bool supports_qxfer_libraries_svr4 () override;
383
384 int qxfer_libraries_svr4 (const char *annex,
385 unsigned char *readbuf,
386 unsigned const char *writebuf,
387 CORE_ADDR offset, int len) override;
388
389 bool supports_agent () override;
390
391 #ifdef HAVE_LINUX_BTRACE
392 btrace_target_info *enable_btrace (ptid_t ptid,
393 const btrace_config *conf) override;
394
395 int disable_btrace (btrace_target_info *tinfo) override;
396
397 int read_btrace (btrace_target_info *tinfo, buffer *buf,
398 enum btrace_read_type type) override;
399
400 int read_btrace_conf (const btrace_target_info *tinfo,
401 buffer *buf) override;
402 #endif
403
404 bool supports_range_stepping () override;
405
406 bool supports_pid_to_exec_file () override;
407
408 char *pid_to_exec_file (int pid) override;
409
410 bool supports_multifs () override;
411
412 int multifs_open (int pid, const char *filename, int flags,
413 mode_t mode) override;
414
415 int multifs_unlink (int pid, const char *filename) override;
416
417 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
418 size_t bufsiz) override;
419
420 const char *thread_name (ptid_t thread) override;
421
422 #if USE_THREAD_DB
423 bool thread_handle (ptid_t ptid, gdb_byte **handle,
424 int *handle_len) override;
425 #endif
426
427 bool supports_catch_syscall () override;
428
429 int get_ipa_tdesc_idx () override;
430
431 /* Return the information to access registers. This has public
432 visibility because proc-service uses it. */
433 virtual const regs_info *get_regs_info () = 0;
434
435 private:
436
437 /* Handle a GNU/Linux extended wait response. If we see a clone,
438 fork, or vfork event, we need to add the new LWP to our list
439 (and return 0 so as not to report the trap to higher layers).
440 If we see an exec event, we will modify ORIG_EVENT_LWP to point
441 to a new LWP representing the new program. */
442 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
443
444 /* Do low-level handling of the event, and check if we should go on
445 and pass it to caller code. Return the affected lwp if we are, or
446 NULL otherwise. */
447 lwp_info *filter_event (int lwpid, int wstat);
448
449 /* Wait for an event from child(ren) WAIT_PTID, and return any that
450 match FILTER_PTID (leaving others pending). The PTIDs can be:
451 minus_one_ptid, to specify any child; a pid PTID, specifying all
452 lwps of a thread group; or a PTID representing a single lwp. Store
453 the stop status through the status pointer WSTAT. OPTIONS is
454 passed to the waitpid call. Return 0 if no event was found and
455 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
456 was found. Return the PID of the stopped child otherwise. */
457 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
458 int *wstatp, int options);
459
460 /* Wait for an event from child(ren) PTID. PTIDs can be:
461 minus_one_ptid, to specify any child; a pid PTID, specifying all
462 lwps of a thread group; or a PTID representing a single lwp. Store
463 the stop status through the status pointer WSTAT. OPTIONS is
464 passed to the waitpid call. Return 0 if no event was found and
465 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
466 was found. Return the PID of the stopped child otherwise. */
467 int wait_for_event (ptid_t ptid, int *wstatp, int options);
468
469 /* Wait for all children to stop for the SIGSTOPs we just queued. */
470 void wait_for_sigstop ();
471
472 /* Wait for process, returns status. */
473 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
474 int target_options);
475
476 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
477 If SUSPEND, then also increase the suspend count of every LWP,
478 except EXCEPT. */
479 void stop_all_lwps (int suspend, lwp_info *except);
480
481 /* Stopped LWPs that the client wanted to be running, that don't have
482 pending statuses, are set to run again, except for EXCEPT, if not
483 NULL. This undoes a stop_all_lwps call. */
484 void unstop_all_lwps (int unsuspend, lwp_info *except);
485
486 /* Start a step-over operation on LWP. When LWP stopped at a
487 breakpoint, to make progress, we need to remove the breakpoint out
488 of the way. If we let other threads run while we do that, they may
489 pass by the breakpoint location and miss hitting it. To avoid
490 that, a step-over momentarily stops all threads while LWP is
491 single-stepped by either hardware or software while the breakpoint
492 is temporarily uninserted from the inferior. When the single-step
493 finishes, we reinsert the breakpoint, and let all threads that are
494 supposed to be running, run again. */
495 void start_step_over (lwp_info *lwp);
496
497 /* If there's a step over in progress, wait until all threads stop
498 (that is, until the stepping thread finishes its step), and
499 unsuspend all lwps. The stepping thread ends with its status
500 pending, which is processed later when we get back to processing
501 events. */
502 void complete_ongoing_step_over ();
503
504 /* When we finish a step-over, set threads running again. If there's
505 another thread that may need a step-over, now's the time to start
506 it. Eventually, we'll move all threads past their breakpoints. */
507 void proceed_all_lwps ();
508
509 /* The reason we resume in the caller, is because we want to be able
510 to pass lwp->status_pending as WSTAT, and we need to clear
511 status_pending_p before resuming, otherwise, resume_one_lwp
512 refuses to resume. */
513 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
514
515 /* Move THREAD out of the jump pad. */
516 void move_out_of_jump_pad (thread_info *thread);
517
518 /* Call low_arch_setup on THREAD. */
519 void arch_setup_thread (thread_info *thread);
520
521 #ifdef HAVE_LINUX_USRREGS
522 /* Fetch one register. */
523 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
524 int regno);
525
526 /* Store one register. */
527 void store_register (const usrregs_info *usrregs, regcache *regcache,
528 int regno);
529 #endif
530
531 /* Fetch all registers, or just one, from the child process.
532 If REGNO is -1, do this for all registers, skipping any that are
533 assumed to have been retrieved by regsets_fetch_inferior_registers,
534 unless ALL is non-zero.
535 Otherwise, REGNO specifies which register (so we can save time). */
536 void usr_fetch_inferior_registers (const regs_info *regs_info,
537 regcache *regcache, int regno, int all);
538
539 /* Store our register values back into the inferior.
540 If REGNO is -1, do this for all registers, skipping any that are
541 assumed to have been saved by regsets_store_inferior_registers,
542 unless ALL is non-zero.
543 Otherwise, REGNO specifies which register (so we can save time). */
544 void usr_store_inferior_registers (const regs_info *regs_info,
545 regcache *regcache, int regno, int all);
546
547 /* Return the PC as read from the regcache of LWP, without any
548 adjustment. */
549 CORE_ADDR get_pc (lwp_info *lwp);
550
551 /* Called when the LWP stopped for a signal/trap. If it stopped for a
552 trap check what caused it (breakpoint, watchpoint, trace, etc.),
553 and save the result in the LWP's stop_reason field. If it stopped
554 for a breakpoint, decrement the PC if necessary on the lwp's
555 architecture. Returns true if we now have the LWP's stop PC. */
556 bool save_stop_reason (lwp_info *lwp);
557
558 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
559 SIGNAL is nonzero, give it that signal. */
560 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
561 siginfo_t *info);
562
563 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
564 disappears while we try to resume it. */
565 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
566
567 /* This function is called once per thread. We check the thread's
568 last resume request, which will tell us whether to resume, step, or
569 leave the thread stopped. Any signal the client requested to be
570 delivered has already been enqueued at this point.
571
572 If any thread that GDB wants running is stopped at an internal
573 breakpoint that needs stepping over, we start a step-over operation
574 on that particular thread, and leave all others stopped. */
575 void proceed_one_lwp (thread_info *thread, lwp_info *except);
576
577 /* This function is called once per thread. We check the thread's
578 resume request, which will tell us whether to resume, step, or
579 leave the thread stopped; and what signal, if any, it should be
580 sent.
581
582 For threads which we aren't explicitly told otherwise, we preserve
583 the stepping flag; this is used for stepping over gdbserver-placed
584 breakpoints.
585
586 If pending_flags was set in any thread, we queue any needed
587 signals, since we won't actually resume. We already have a pending
588 event to report, so we don't need to preserve any step requests;
589 they should be re-issued if necessary. */
590 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
591
592 /* Return true if this lwp has an interesting status pending. */
593 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
594
595 /* Resume LWPs that are currently stopped without any pending status
596 to report, but are resumed from the core's perspective. */
597 void resume_stopped_resumed_lwps (thread_info *thread);
598
599 /* Unsuspend THREAD, except EXCEPT, and proceed. */
600 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
601
602 /* Return true if this lwp still has an interesting status pending.
603 If not (e.g., it had stopped for a breakpoint that is gone), return
604 false. */
605 bool thread_still_has_status_pending (thread_info *thread);
606
607 /* Return true if this lwp is to-be-resumed and has an interesting
608 status pending. */
609 bool resume_status_pending (thread_info *thread);
610
611 /* Return true if this lwp that GDB wants running is stopped at an
612 internal breakpoint that we need to step over. It assumes that
613 any required STOP_PC adjustment has already been propagated to
614 the inferior's regcache. */
615 bool thread_needs_step_over (thread_info *thread);
616
617 /* Single step via hardware or software single step.
618 Return 1 if hardware single stepping, 0 if software single stepping
619 or can't single step. */
620 int single_step (lwp_info* lwp);
621
622 /* Install breakpoints for software single stepping. */
623 void install_software_single_step_breakpoints (lwp_info *lwp);
624
625 protected:
626 /* The architecture-specific "low" methods are listed below. */
627
628 /* Architecture-specific setup for the current thread. */
629 virtual void low_arch_setup () = 0;
630
631 /* Return false if we can fetch/store the register, true if we cannot
632 fetch/store the register. */
633 virtual bool low_cannot_fetch_register (int regno) = 0;
634
635 virtual bool low_cannot_store_register (int regno) = 0;
636
637 /* Hook to fetch a register in some non-standard way. Used for
638 example by backends that have read-only registers with hardcoded
639 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
640 REGNO was supplied, false if not, and we should fallback to the
641 standard ptrace methods. */
642 virtual bool low_fetch_register (regcache *regcache, int regno);
643
644 /* Return true if breakpoints are supported. Such targets must
645 implement the GET_PC and SET_PC methods. */
646 virtual bool low_supports_breakpoints ();
647
648 virtual CORE_ADDR low_get_pc (regcache *regcache);
649
650 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
651
652 /* Find the next possible PCs after the current instruction executes.
653 Targets that override this method should also override
654 'supports_software_single_step' to return true. */
655 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
656
657 /* Return true if there is a breakpoint at PC. */
658 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
659
660 /* Breakpoint and watchpoint related functions. See target.h for
661 comments. */
662 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
663 int size, raw_breakpoint *bp);
664
665 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
666 int size, raw_breakpoint *bp);
667
668 /* How many bytes the PC should be decremented after a break. */
669 virtual int low_decr_pc_after_break ();
670 };
671
672 extern linux_process_target *the_linux_target;
673
674 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
675 #define get_lwp_thread(lwp) ((lwp)->thread)
676
677 /* This struct is recorded in the target_data field of struct thread_info.
678
679 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
680 GDB protocol representation of the thread ID. Threads also have
681 a "process ID" (poorly named) which is (presently) the same as the
682 LWP ID.
683
684 There is also ``all_processes'' is keyed by the "overall process ID",
685 which GNU/Linux calls tgid, "thread group ID". */
686
687 struct lwp_info
688 {
689 /* Backlink to the parent object. */
690 struct thread_info *thread;
691
692 /* If this flag is set, the next SIGSTOP will be ignored (the
693 process will be immediately resumed). This means that either we
694 sent the SIGSTOP to it ourselves and got some other pending event
695 (so the SIGSTOP is still pending), or that we stopped the
696 inferior implicitly via PTRACE_ATTACH and have not waited for it
697 yet. */
698 int stop_expected;
699
700 /* When this is true, we shall not try to resume this thread, even
701 if last_resume_kind isn't resume_stop. */
702 int suspended;
703
704 /* If this flag is set, the lwp is known to be stopped right now (stop
705 event already received in a wait()). */
706 int stopped;
707
708 /* Signal whether we are in a SYSCALL_ENTRY or
709 in a SYSCALL_RETURN event.
710 Values:
711 - TARGET_WAITKIND_SYSCALL_ENTRY
712 - TARGET_WAITKIND_SYSCALL_RETURN */
713 enum target_waitkind syscall_state;
714
715 /* When stopped is set, the last wait status recorded for this lwp. */
716 int last_status;
717
718 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
719 this LWP's last event, to pass to GDB without any further
720 processing. This is used to store extended ptrace event
721 information or exit status until it can be reported to GDB. */
722 struct target_waitstatus waitstatus;
723
724 /* A pointer to the fork child/parent relative. Valid only while
725 the parent fork event is not reported to higher layers. Used to
726 avoid wildcard vCont actions resuming a fork child before GDB is
727 notified about the parent's fork event. */
728 struct lwp_info *fork_relative;
729
730 /* When stopped is set, this is where the lwp last stopped, with
731 decr_pc_after_break already accounted for. If the LWP is
732 running, this is the address at which the lwp was resumed. */
733 CORE_ADDR stop_pc;
734
735 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
736 been reported. */
737 int status_pending_p;
738 int status_pending;
739
740 /* The reason the LWP last stopped, if we need to track it
741 (breakpoint, watchpoint, etc.) */
742 enum target_stop_reason stop_reason;
743
744 /* On architectures where it is possible to know the data address of
745 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
746 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
747 is true. */
748 CORE_ADDR stopped_data_address;
749
750 /* If this is non-zero, it is a breakpoint to be reinserted at our next
751 stop (SIGTRAP stops only). */
752 CORE_ADDR bp_reinsert;
753
754 /* If this flag is set, the last continue operation at the ptrace
755 level on this process was a single-step. */
756 int stepping;
757
758 /* Range to single step within. This is a copy of the step range
759 passed along the last resume request. See 'struct
760 thread_resume'. */
761 CORE_ADDR step_range_start; /* Inclusive */
762 CORE_ADDR step_range_end; /* Exclusive */
763
764 /* If this flag is set, we need to set the event request flags the
765 next time we see this LWP stop. */
766 int must_set_ptrace_flags;
767
768 /* If this is non-zero, it points to a chain of signals which need to
769 be delivered to this process. */
770 struct pending_signals *pending_signals;
771
772 /* A link used when resuming. It is initialized from the resume request,
773 and then processed and cleared in linux_resume_one_lwp. */
774 struct thread_resume *resume;
775
776 /* Information bout this lwp's fast tracepoint collection status (is it
777 currently stopped in the jump pad, and if so, before or at/after the
778 relocated instruction). Normally, we won't care about this, but we will
779 if a signal arrives to this lwp while it is collecting. */
780 fast_tpoint_collect_result collecting_fast_tracepoint;
781
782 /* If this is non-zero, it points to a chain of signals which need
783 to be reported to GDB. These were deferred because the thread
784 was doing a fast tracepoint collect when they arrived. */
785 struct pending_signals *pending_signals_to_report;
786
787 /* When collecting_fast_tracepoint is first found to be 1, we insert
788 a exit-jump-pad-quickly breakpoint. This is it. */
789 struct breakpoint *exit_jump_pad_bkpt;
790
791 #ifdef USE_THREAD_DB
792 int thread_known;
793 /* The thread handle, used for e.g. TLS access. Only valid if
794 THREAD_KNOWN is set. */
795 td_thrhandle_t th;
796
797 /* The pthread_t handle. */
798 thread_t thread_handle;
799 #endif
800
801 /* Arch-specific additions. */
802 struct arch_lwp_info *arch_private;
803 };
804
805 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
806
807 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
808 errno). */
809 int linux_attach_lwp (ptid_t ptid);
810
811 struct lwp_info *find_lwp_pid (ptid_t ptid);
812 /* For linux_stop_lwp see nat/linux-nat.h. */
813
814 #ifdef HAVE_LINUX_REGSETS
815 void initialize_regsets_info (struct regsets_info *regsets_info);
816 #endif
817
818 void initialize_low_arch (void);
819
820 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
821 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
822
823 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
824 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
825
826 /* From thread-db.c */
827 int thread_db_init (void);
828 void thread_db_detach (struct process_info *);
829 void thread_db_mourn (struct process_info *);
830 int thread_db_handle_monitor_command (char *);
831 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
832 CORE_ADDR load_module, CORE_ADDR *address);
833 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
834
835 /* Called from linux-low.c when a clone event is detected. Upon entry,
836 both the clone and the parent should be stopped. This function does
837 whatever is required have the clone under thread_db's control. */
838
839 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
840
841 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
842
843 extern int have_ptrace_getregset;
844
845 /* Search for the value with type MATCH in the auxv vector with
846 entries of length WORDSIZE bytes. If found, store the value in
847 *VALP and return 1. If not found or if there is an error, return
848 0. */
849
850 int linux_get_auxv (int wordsize, CORE_ADDR match,
851 CORE_ADDR *valp);
852
853 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
854 WORDSIZE. If no entry was found, return zero. */
855
856 CORE_ADDR linux_get_hwcap (int wordsize);
857
858 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
859 WORDSIZE. If no entry was found, return zero. */
860
861 CORE_ADDR linux_get_hwcap2 (int wordsize);
862
863 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.069515 seconds and 4 git commands to generate.