gdbserver/linux-low: turn 'breakpoint_at' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #define PTRACE_XFER_TYPE long
35
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func) (struct regcache *, void *);
38 typedef void (*regset_store_func) (struct regcache *, const void *);
39 enum regset_type {
40 GENERAL_REGS,
41 FP_REGS,
42 EXTENDED_REGS,
43 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
44 };
45
46 /* The arch's regsets array initializer must be terminated with a NULL
47 regset. */
48 #define NULL_REGSET \
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
50
51 struct regset_info
52 {
53 int get_request, set_request;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
56 int nt_type;
57 int size;
58 enum regset_type type;
59 regset_fill_func fill_function;
60 regset_store_func store_function;
61 };
62
63 /* Aggregation of all the supported regsets of a given
64 architecture/mode. */
65
66 struct regsets_info
67 {
68 /* The regsets array. */
69 struct regset_info *regsets;
70
71 /* The number of regsets in the REGSETS array. */
72 int num_regsets;
73
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets;
80 };
81
82 #endif
83
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
86
87 struct usrregs_info
88 {
89 /* The number of registers accessible. */
90 int num_regs;
91
92 /* The registers map. */
93 int *regmap;
94 };
95
96 /* All info needed to access an architecture/mode's registers. */
97
98 struct regs_info
99 {
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap;
105
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info *usrregs;
110
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info *regsets_info;
114 #endif
115 };
116
117 struct process_info_private
118 {
119 /* Arch-specific additions. */
120 struct arch_process_info *arch_private;
121
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db *thread_db;
125
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
127 CORE_ADDR r_debug;
128 };
129
130 struct lwp_info;
131
132 struct linux_target_ops
133 {
134 /* Breakpoint and watchpoint related functions. See target.h for
135 comments. */
136 int (*supports_z_point_type) (char z_type);
137 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
138 int size, struct raw_breakpoint *bp);
139 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
140 int size, struct raw_breakpoint *bp);
141
142 int (*stopped_by_watchpoint) (void);
143 CORE_ADDR (*stopped_data_address) (void);
144
145 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
146 for registers smaller than an xfer unit). */
147 void (*collect_ptrace_register) (struct regcache *regcache,
148 int regno, char *buf);
149 void (*supply_ptrace_register) (struct regcache *regcache,
150 int regno, const char *buf);
151
152 /* Hook to convert from target format to ptrace format and back.
153 Returns true if any conversion was done; false otherwise.
154 If DIRECTION is 1, then copy from INF to NATIVE.
155 If DIRECTION is 0, copy from NATIVE to INF. */
156 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
157
158 /* Hook to call when a new process is created or attached to.
159 If extra per-process architecture-specific data is needed,
160 allocate it here. */
161 struct arch_process_info * (*new_process) (void);
162
163 /* Hook to call when a process is being deleted. If extra per-process
164 architecture-specific data is needed, delete it here. */
165 void (*delete_process) (struct arch_process_info *info);
166
167 /* Hook to call when a new thread is detected.
168 If extra per-thread architecture-specific data is needed,
169 allocate it here. */
170 void (*new_thread) (struct lwp_info *);
171
172 /* Hook to call when a thread is being deleted. If extra per-thread
173 architecture-specific data is needed, delete it here. */
174 void (*delete_thread) (struct arch_lwp_info *);
175
176 /* Hook to call, if any, when a new fork is attached. */
177 void (*new_fork) (struct process_info *parent, struct process_info *child);
178
179 /* Hook to call prior to resuming a thread. */
180 void (*prepare_to_resume) (struct lwp_info *);
181
182 /* Hook to support target specific qSupported. */
183 void (*process_qsupported) (char **, int count);
184
185 /* Returns true if the low target supports tracepoints. */
186 int (*supports_tracepoints) (void);
187
188 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
189 success, -1 on failure. */
190 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
191
192 /* Install a fast tracepoint jump pad. See target.h for
193 comments. */
194 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
195 CORE_ADDR collector,
196 CORE_ADDR lockaddr,
197 ULONGEST orig_size,
198 CORE_ADDR *jump_entry,
199 CORE_ADDR *trampoline,
200 ULONGEST *trampoline_size,
201 unsigned char *jjump_pad_insn,
202 ULONGEST *jjump_pad_insn_size,
203 CORE_ADDR *adjusted_insn_addr,
204 CORE_ADDR *adjusted_insn_addr_end,
205 char *err);
206
207 /* Return the bytecode operations vector for the current inferior.
208 Returns NULL if bytecode compilation is not supported. */
209 struct emit_ops *(*emit_ops) (void);
210
211 /* Return the minimum length of an instruction that can be safely overwritten
212 for use as a fast tracepoint. */
213 int (*get_min_fast_tracepoint_insn_len) (void);
214
215 /* Returns true if the low target supports range stepping. */
216 int (*supports_range_stepping) (void);
217
218 /* See target.h. */
219 int (*supports_hardware_single_step) (void);
220
221 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
222 inferior is stopped due to SYSCALL_SIGTRAP. */
223 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
224
225 /* See target.h. */
226 int (*get_ipa_tdesc_idx) (void);
227 };
228
229 extern struct linux_target_ops the_low_target;
230
231 /* Target ops definitions for a Linux target. */
232
233 class linux_process_target : public process_stratum_target
234 {
235 public:
236
237 int create_inferior (const char *program,
238 const std::vector<char *> &program_args) override;
239
240 void post_create_inferior () override;
241
242 int attach (unsigned long pid) override;
243
244 int kill (process_info *proc) override;
245
246 int detach (process_info *proc) override;
247
248 void mourn (process_info *proc) override;
249
250 void join (int pid) override;
251
252 bool thread_alive (ptid_t pid) override;
253
254 void resume (thread_resume *resume_info, size_t n) override;
255
256 ptid_t wait (ptid_t ptid, target_waitstatus *status,
257 int options) override;
258
259 void fetch_registers (regcache *regcache, int regno) override;
260
261 void store_registers (regcache *regcache, int regno) override;
262
263 int prepare_to_access_memory () override;
264
265 void done_accessing_memory () override;
266
267 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
268 int len) override;
269
270 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
271 int len) override;
272
273 void look_up_symbols () override;
274
275 void request_interrupt () override;
276
277 bool supports_read_auxv () override;
278
279 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
280 unsigned int len) override;
281
282 bool supports_z_point_type (char z_type) override;
283
284 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
285 int size, raw_breakpoint *bp) override;
286
287 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
288 int size, raw_breakpoint *bp) override;
289
290 bool stopped_by_sw_breakpoint () override;
291
292 bool supports_stopped_by_sw_breakpoint () override;
293
294 bool stopped_by_hw_breakpoint () override;
295
296 bool supports_stopped_by_hw_breakpoint () override;
297
298 bool supports_hardware_single_step () override;
299
300 bool stopped_by_watchpoint () override;
301
302 CORE_ADDR stopped_data_address () override;
303
304 bool supports_read_offsets () override;
305
306 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
307
308 bool supports_get_tls_address () override;
309
310 int get_tls_address (thread_info *thread, CORE_ADDR offset,
311 CORE_ADDR load_module, CORE_ADDR *address) override;
312
313 bool supports_qxfer_osdata () override;
314
315 int qxfer_osdata (const char *annex, unsigned char *readbuf,
316 unsigned const char *writebuf,
317 CORE_ADDR offset, int len) override;
318
319 bool supports_qxfer_siginfo () override;
320
321 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
322 unsigned const char *writebuf,
323 CORE_ADDR offset, int len) override;
324
325 bool supports_non_stop () override;
326
327 bool async (bool enable) override;
328
329 int start_non_stop (bool enable) override;
330
331 bool supports_multi_process () override;
332
333 bool supports_fork_events () override;
334
335 bool supports_vfork_events () override;
336
337 bool supports_exec_events () override;
338
339 void handle_new_gdb_connection () override;
340
341 int handle_monitor_command (char *mon) override;
342
343 int core_of_thread (ptid_t ptid) override;
344
345 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
346 bool supports_read_loadmap () override;
347
348 int read_loadmap (const char *annex, CORE_ADDR offset,
349 unsigned char *myaddr, unsigned int len) override;
350 #endif
351
352 void process_qsupported (char **features, int count) override;
353
354 bool supports_tracepoints () override;
355
356 CORE_ADDR read_pc (regcache *regcache) override;
357
358 void write_pc (regcache *regcache, CORE_ADDR pc) override;
359
360 bool supports_thread_stopped () override;
361
362 bool thread_stopped (thread_info *thread) override;
363
364 void pause_all (bool freeze) override;
365
366 void unpause_all (bool unfreeze) override;
367
368 void stabilize_threads () override;
369
370 bool supports_fast_tracepoints () override;
371
372 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
373 CORE_ADDR tpaddr,
374 CORE_ADDR collector,
375 CORE_ADDR lockaddr,
376 ULONGEST orig_size,
377 CORE_ADDR *jump_entry,
378 CORE_ADDR *trampoline,
379 ULONGEST *trampoline_size,
380 unsigned char *jjump_pad_insn,
381 ULONGEST *jjump_pad_insn_size,
382 CORE_ADDR *adjusted_insn_addr,
383 CORE_ADDR *adjusted_insn_addr_end,
384 char *err) override;
385
386 int get_min_fast_tracepoint_insn_len () override;
387
388 struct emit_ops *emit_ops () override;
389
390 bool supports_disable_randomization () override;
391
392 bool supports_qxfer_libraries_svr4 () override;
393
394 int qxfer_libraries_svr4 (const char *annex,
395 unsigned char *readbuf,
396 unsigned const char *writebuf,
397 CORE_ADDR offset, int len) override;
398
399 bool supports_agent () override;
400
401 #ifdef HAVE_LINUX_BTRACE
402 btrace_target_info *enable_btrace (ptid_t ptid,
403 const btrace_config *conf) override;
404
405 int disable_btrace (btrace_target_info *tinfo) override;
406
407 int read_btrace (btrace_target_info *tinfo, buffer *buf,
408 enum btrace_read_type type) override;
409
410 int read_btrace_conf (const btrace_target_info *tinfo,
411 buffer *buf) override;
412 #endif
413
414 bool supports_range_stepping () override;
415
416 bool supports_pid_to_exec_file () override;
417
418 char *pid_to_exec_file (int pid) override;
419
420 bool supports_multifs () override;
421
422 int multifs_open (int pid, const char *filename, int flags,
423 mode_t mode) override;
424
425 int multifs_unlink (int pid, const char *filename) override;
426
427 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
428 size_t bufsiz) override;
429
430 const char *thread_name (ptid_t thread) override;
431
432 #if USE_THREAD_DB
433 bool thread_handle (ptid_t ptid, gdb_byte **handle,
434 int *handle_len) override;
435 #endif
436
437 bool supports_catch_syscall () override;
438
439 int get_ipa_tdesc_idx () override;
440
441 /* Return the information to access registers. This has public
442 visibility because proc-service uses it. */
443 virtual const regs_info *get_regs_info () = 0;
444
445 private:
446
447 /* Handle a GNU/Linux extended wait response. If we see a clone,
448 fork, or vfork event, we need to add the new LWP to our list
449 (and return 0 so as not to report the trap to higher layers).
450 If we see an exec event, we will modify ORIG_EVENT_LWP to point
451 to a new LWP representing the new program. */
452 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
453
454 /* Do low-level handling of the event, and check if we should go on
455 and pass it to caller code. Return the affected lwp if we are, or
456 NULL otherwise. */
457 lwp_info *filter_event (int lwpid, int wstat);
458
459 /* Wait for an event from child(ren) WAIT_PTID, and return any that
460 match FILTER_PTID (leaving others pending). The PTIDs can be:
461 minus_one_ptid, to specify any child; a pid PTID, specifying all
462 lwps of a thread group; or a PTID representing a single lwp. Store
463 the stop status through the status pointer WSTAT. OPTIONS is
464 passed to the waitpid call. Return 0 if no event was found and
465 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
466 was found. Return the PID of the stopped child otherwise. */
467 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
468 int *wstatp, int options);
469
470 /* Wait for an event from child(ren) PTID. PTIDs can be:
471 minus_one_ptid, to specify any child; a pid PTID, specifying all
472 lwps of a thread group; or a PTID representing a single lwp. Store
473 the stop status through the status pointer WSTAT. OPTIONS is
474 passed to the waitpid call. Return 0 if no event was found and
475 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
476 was found. Return the PID of the stopped child otherwise. */
477 int wait_for_event (ptid_t ptid, int *wstatp, int options);
478
479 /* Wait for all children to stop for the SIGSTOPs we just queued. */
480 void wait_for_sigstop ();
481
482 /* Wait for process, returns status. */
483 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
484 int target_options);
485
486 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
487 If SUSPEND, then also increase the suspend count of every LWP,
488 except EXCEPT. */
489 void stop_all_lwps (int suspend, lwp_info *except);
490
491 /* Stopped LWPs that the client wanted to be running, that don't have
492 pending statuses, are set to run again, except for EXCEPT, if not
493 NULL. This undoes a stop_all_lwps call. */
494 void unstop_all_lwps (int unsuspend, lwp_info *except);
495
496 /* Start a step-over operation on LWP. When LWP stopped at a
497 breakpoint, to make progress, we need to remove the breakpoint out
498 of the way. If we let other threads run while we do that, they may
499 pass by the breakpoint location and miss hitting it. To avoid
500 that, a step-over momentarily stops all threads while LWP is
501 single-stepped by either hardware or software while the breakpoint
502 is temporarily uninserted from the inferior. When the single-step
503 finishes, we reinsert the breakpoint, and let all threads that are
504 supposed to be running, run again. */
505 void start_step_over (lwp_info *lwp);
506
507 /* If there's a step over in progress, wait until all threads stop
508 (that is, until the stepping thread finishes its step), and
509 unsuspend all lwps. The stepping thread ends with its status
510 pending, which is processed later when we get back to processing
511 events. */
512 void complete_ongoing_step_over ();
513
514 /* When we finish a step-over, set threads running again. If there's
515 another thread that may need a step-over, now's the time to start
516 it. Eventually, we'll move all threads past their breakpoints. */
517 void proceed_all_lwps ();
518
519 /* The reason we resume in the caller, is because we want to be able
520 to pass lwp->status_pending as WSTAT, and we need to clear
521 status_pending_p before resuming, otherwise, resume_one_lwp
522 refuses to resume. */
523 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
524
525 /* Move THREAD out of the jump pad. */
526 void move_out_of_jump_pad (thread_info *thread);
527
528 /* Call low_arch_setup on THREAD. */
529 void arch_setup_thread (thread_info *thread);
530
531 #ifdef HAVE_LINUX_USRREGS
532 /* Fetch one register. */
533 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
534 int regno);
535
536 /* Store one register. */
537 void store_register (const usrregs_info *usrregs, regcache *regcache,
538 int regno);
539 #endif
540
541 /* Fetch all registers, or just one, from the child process.
542 If REGNO is -1, do this for all registers, skipping any that are
543 assumed to have been retrieved by regsets_fetch_inferior_registers,
544 unless ALL is non-zero.
545 Otherwise, REGNO specifies which register (so we can save time). */
546 void usr_fetch_inferior_registers (const regs_info *regs_info,
547 regcache *regcache, int regno, int all);
548
549 /* Store our register values back into the inferior.
550 If REGNO is -1, do this for all registers, skipping any that are
551 assumed to have been saved by regsets_store_inferior_registers,
552 unless ALL is non-zero.
553 Otherwise, REGNO specifies which register (so we can save time). */
554 void usr_store_inferior_registers (const regs_info *regs_info,
555 regcache *regcache, int regno, int all);
556
557 /* Return the PC as read from the regcache of LWP, without any
558 adjustment. */
559 CORE_ADDR get_pc (lwp_info *lwp);
560
561 /* Called when the LWP stopped for a signal/trap. If it stopped for a
562 trap check what caused it (breakpoint, watchpoint, trace, etc.),
563 and save the result in the LWP's stop_reason field. If it stopped
564 for a breakpoint, decrement the PC if necessary on the lwp's
565 architecture. Returns true if we now have the LWP's stop PC. */
566 bool save_stop_reason (lwp_info *lwp);
567
568 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
569 SIGNAL is nonzero, give it that signal. */
570 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
571 siginfo_t *info);
572
573 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
574 disappears while we try to resume it. */
575 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
576
577 /* This function is called once per thread. We check the thread's
578 last resume request, which will tell us whether to resume, step, or
579 leave the thread stopped. Any signal the client requested to be
580 delivered has already been enqueued at this point.
581
582 If any thread that GDB wants running is stopped at an internal
583 breakpoint that needs stepping over, we start a step-over operation
584 on that particular thread, and leave all others stopped. */
585 void proceed_one_lwp (thread_info *thread, lwp_info *except);
586
587 /* This function is called once per thread. We check the thread's
588 resume request, which will tell us whether to resume, step, or
589 leave the thread stopped; and what signal, if any, it should be
590 sent.
591
592 For threads which we aren't explicitly told otherwise, we preserve
593 the stepping flag; this is used for stepping over gdbserver-placed
594 breakpoints.
595
596 If pending_flags was set in any thread, we queue any needed
597 signals, since we won't actually resume. We already have a pending
598 event to report, so we don't need to preserve any step requests;
599 they should be re-issued if necessary. */
600 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
601
602 /* Return true if this lwp has an interesting status pending. */
603 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
604
605 /* Resume LWPs that are currently stopped without any pending status
606 to report, but are resumed from the core's perspective. */
607 void resume_stopped_resumed_lwps (thread_info *thread);
608
609 /* Unsuspend THREAD, except EXCEPT, and proceed. */
610 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
611
612 /* Return true if this lwp still has an interesting status pending.
613 If not (e.g., it had stopped for a breakpoint that is gone), return
614 false. */
615 bool thread_still_has_status_pending (thread_info *thread);
616
617 /* Return true if this lwp is to-be-resumed and has an interesting
618 status pending. */
619 bool resume_status_pending (thread_info *thread);
620
621 /* Return true if this lwp that GDB wants running is stopped at an
622 internal breakpoint that we need to step over. It assumes that
623 any required STOP_PC adjustment has already been propagated to
624 the inferior's regcache. */
625 bool thread_needs_step_over (thread_info *thread);
626
627 /* Single step via hardware or software single step.
628 Return 1 if hardware single stepping, 0 if software single stepping
629 or can't single step. */
630 int single_step (lwp_info* lwp);
631
632 /* Install breakpoints for software single stepping. */
633 void install_software_single_step_breakpoints (lwp_info *lwp);
634
635 protected:
636 /* The architecture-specific "low" methods are listed below. */
637
638 /* Architecture-specific setup for the current thread. */
639 virtual void low_arch_setup () = 0;
640
641 /* Return false if we can fetch/store the register, true if we cannot
642 fetch/store the register. */
643 virtual bool low_cannot_fetch_register (int regno) = 0;
644
645 virtual bool low_cannot_store_register (int regno) = 0;
646
647 /* Hook to fetch a register in some non-standard way. Used for
648 example by backends that have read-only registers with hardcoded
649 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
650 REGNO was supplied, false if not, and we should fallback to the
651 standard ptrace methods. */
652 virtual bool low_fetch_register (regcache *regcache, int regno);
653
654 /* Return true if breakpoints are supported. Such targets must
655 implement the GET_PC and SET_PC methods. */
656 virtual bool low_supports_breakpoints ();
657
658 virtual CORE_ADDR low_get_pc (regcache *regcache);
659
660 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
661
662 /* Find the next possible PCs after the current instruction executes.
663 Targets that override this method should also override
664 'supports_software_single_step' to return true. */
665 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
666
667 /* Return true if there is a breakpoint at PC. */
668 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
669
670 /* How many bytes the PC should be decremented after a break. */
671 virtual int low_decr_pc_after_break ();
672 };
673
674 extern linux_process_target *the_linux_target;
675
676 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
677 #define get_lwp_thread(lwp) ((lwp)->thread)
678
679 /* This struct is recorded in the target_data field of struct thread_info.
680
681 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
682 GDB protocol representation of the thread ID. Threads also have
683 a "process ID" (poorly named) which is (presently) the same as the
684 LWP ID.
685
686 There is also ``all_processes'' is keyed by the "overall process ID",
687 which GNU/Linux calls tgid, "thread group ID". */
688
689 struct lwp_info
690 {
691 /* Backlink to the parent object. */
692 struct thread_info *thread;
693
694 /* If this flag is set, the next SIGSTOP will be ignored (the
695 process will be immediately resumed). This means that either we
696 sent the SIGSTOP to it ourselves and got some other pending event
697 (so the SIGSTOP is still pending), or that we stopped the
698 inferior implicitly via PTRACE_ATTACH and have not waited for it
699 yet. */
700 int stop_expected;
701
702 /* When this is true, we shall not try to resume this thread, even
703 if last_resume_kind isn't resume_stop. */
704 int suspended;
705
706 /* If this flag is set, the lwp is known to be stopped right now (stop
707 event already received in a wait()). */
708 int stopped;
709
710 /* Signal whether we are in a SYSCALL_ENTRY or
711 in a SYSCALL_RETURN event.
712 Values:
713 - TARGET_WAITKIND_SYSCALL_ENTRY
714 - TARGET_WAITKIND_SYSCALL_RETURN */
715 enum target_waitkind syscall_state;
716
717 /* When stopped is set, the last wait status recorded for this lwp. */
718 int last_status;
719
720 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
721 this LWP's last event, to pass to GDB without any further
722 processing. This is used to store extended ptrace event
723 information or exit status until it can be reported to GDB. */
724 struct target_waitstatus waitstatus;
725
726 /* A pointer to the fork child/parent relative. Valid only while
727 the parent fork event is not reported to higher layers. Used to
728 avoid wildcard vCont actions resuming a fork child before GDB is
729 notified about the parent's fork event. */
730 struct lwp_info *fork_relative;
731
732 /* When stopped is set, this is where the lwp last stopped, with
733 decr_pc_after_break already accounted for. If the LWP is
734 running, this is the address at which the lwp was resumed. */
735 CORE_ADDR stop_pc;
736
737 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
738 been reported. */
739 int status_pending_p;
740 int status_pending;
741
742 /* The reason the LWP last stopped, if we need to track it
743 (breakpoint, watchpoint, etc.) */
744 enum target_stop_reason stop_reason;
745
746 /* On architectures where it is possible to know the data address of
747 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
748 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
749 is true. */
750 CORE_ADDR stopped_data_address;
751
752 /* If this is non-zero, it is a breakpoint to be reinserted at our next
753 stop (SIGTRAP stops only). */
754 CORE_ADDR bp_reinsert;
755
756 /* If this flag is set, the last continue operation at the ptrace
757 level on this process was a single-step. */
758 int stepping;
759
760 /* Range to single step within. This is a copy of the step range
761 passed along the last resume request. See 'struct
762 thread_resume'. */
763 CORE_ADDR step_range_start; /* Inclusive */
764 CORE_ADDR step_range_end; /* Exclusive */
765
766 /* If this flag is set, we need to set the event request flags the
767 next time we see this LWP stop. */
768 int must_set_ptrace_flags;
769
770 /* If this is non-zero, it points to a chain of signals which need to
771 be delivered to this process. */
772 struct pending_signals *pending_signals;
773
774 /* A link used when resuming. It is initialized from the resume request,
775 and then processed and cleared in linux_resume_one_lwp. */
776 struct thread_resume *resume;
777
778 /* Information bout this lwp's fast tracepoint collection status (is it
779 currently stopped in the jump pad, and if so, before or at/after the
780 relocated instruction). Normally, we won't care about this, but we will
781 if a signal arrives to this lwp while it is collecting. */
782 fast_tpoint_collect_result collecting_fast_tracepoint;
783
784 /* If this is non-zero, it points to a chain of signals which need
785 to be reported to GDB. These were deferred because the thread
786 was doing a fast tracepoint collect when they arrived. */
787 struct pending_signals *pending_signals_to_report;
788
789 /* When collecting_fast_tracepoint is first found to be 1, we insert
790 a exit-jump-pad-quickly breakpoint. This is it. */
791 struct breakpoint *exit_jump_pad_bkpt;
792
793 #ifdef USE_THREAD_DB
794 int thread_known;
795 /* The thread handle, used for e.g. TLS access. Only valid if
796 THREAD_KNOWN is set. */
797 td_thrhandle_t th;
798
799 /* The pthread_t handle. */
800 thread_t thread_handle;
801 #endif
802
803 /* Arch-specific additions. */
804 struct arch_lwp_info *arch_private;
805 };
806
807 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
808
809 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
810 errno). */
811 int linux_attach_lwp (ptid_t ptid);
812
813 struct lwp_info *find_lwp_pid (ptid_t ptid);
814 /* For linux_stop_lwp see nat/linux-nat.h. */
815
816 #ifdef HAVE_LINUX_REGSETS
817 void initialize_regsets_info (struct regsets_info *regsets_info);
818 #endif
819
820 void initialize_low_arch (void);
821
822 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
823 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
824
825 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
826 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
827
828 /* From thread-db.c */
829 int thread_db_init (void);
830 void thread_db_detach (struct process_info *);
831 void thread_db_mourn (struct process_info *);
832 int thread_db_handle_monitor_command (char *);
833 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
834 CORE_ADDR load_module, CORE_ADDR *address);
835 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
836
837 /* Called from linux-low.c when a clone event is detected. Upon entry,
838 both the clone and the parent should be stopped. This function does
839 whatever is required have the clone under thread_db's control. */
840
841 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
842
843 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
844
845 extern int have_ptrace_getregset;
846
847 /* Search for the value with type MATCH in the auxv vector with
848 entries of length WORDSIZE bytes. If found, store the value in
849 *VALP and return 1. If not found or if there is an error, return
850 0. */
851
852 int linux_get_auxv (int wordsize, CORE_ADDR match,
853 CORE_ADDR *valp);
854
855 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
856 WORDSIZE. If no entry was found, return zero. */
857
858 CORE_ADDR linux_get_hwcap (int wordsize);
859
860 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
861 WORDSIZE. If no entry was found, return zero. */
862
863 CORE_ADDR linux_get_hwcap2 (int wordsize);
864
865 #endif /* GDBSERVER_LINUX_LOW_H */
This page took 0.059168 seconds and 5 git commands to generate.