1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002, 2004-2005, 2007-2012 Free Software Foundation,
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #ifdef HAVE_THREAD_DB_H
21 #include <thread_db.h>
25 #include "gdbthread.h"
26 #include "gdb_proc_service.h"
28 #define PTRACE_ARG3_TYPE void *
29 #define PTRACE_ARG4_TYPE void *
30 #define PTRACE_XFER_TYPE long
32 #ifdef HAVE_LINUX_REGSETS
33 typedef void (*regset_fill_func
) (struct regcache
*, void *);
34 typedef void (*regset_store_func
) (struct regcache
*, const void *);
43 int get_request
, set_request
;
44 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
45 argument and the 4th argument should be "const struct iovec *". */
48 enum regset_type type
;
49 regset_fill_func fill_function
;
50 regset_store_func store_function
;
52 extern struct regset_info target_regsets
[];
55 struct process_info_private
57 /* Arch-specific additions. */
58 struct arch_process_info
*arch_private
;
60 /* libthread_db-specific additions. Not NULL if this process has loaded
61 thread_db, and it is active. */
62 struct thread_db
*thread_db
;
64 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
70 struct linux_target_ops
72 /* Architecture-specific setup. */
73 void (*arch_setup
) (void);
78 /* Regset support bitmap: 1 for registers that are transferred as a part
79 of a regset, 0 for ones that need to be handled individually. This
80 can be NULL if all registers are transferred with regsets or regsets
82 unsigned char *regset_bitmap
;
83 int (*cannot_fetch_register
) (int);
85 /* Returns 0 if we can store the register, 1 if we can not
86 store the register, and 2 if failure to store the register
88 int (*cannot_store_register
) (int);
90 /* Hook to fetch a register in some non-standard way. Used for
91 example by backends that have read-only registers with hardcoded
92 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
93 REGNO was supplied, false if not, and we should fallback to the
94 standard ptrace methods. */
95 int (*fetch_register
) (struct regcache
*regcache
, int regno
);
97 CORE_ADDR (*get_pc
) (struct regcache
*regcache
);
98 void (*set_pc
) (struct regcache
*regcache
, CORE_ADDR newpc
);
99 const unsigned char *breakpoint
;
101 CORE_ADDR (*breakpoint_reinsert_addr
) (void);
103 int decr_pc_after_break
;
104 int (*breakpoint_at
) (CORE_ADDR pc
);
106 /* Breakpoint and watchpoint related functions. See target.h for
108 int (*insert_point
) (char type
, CORE_ADDR addr
, int len
);
109 int (*remove_point
) (char type
, CORE_ADDR addr
, int len
);
110 int (*stopped_by_watchpoint
) (void);
111 CORE_ADDR (*stopped_data_address
) (void);
113 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
114 for registers smaller than an xfer unit). */
115 void (*collect_ptrace_register
) (struct regcache
*regcache
,
116 int regno
, char *buf
);
117 void (*supply_ptrace_register
) (struct regcache
*regcache
,
118 int regno
, const char *buf
);
120 /* Hook to convert from target format to ptrace format and back.
121 Returns true if any conversion was done; false otherwise.
122 If DIRECTION is 1, then copy from INF to NATIVE.
123 If DIRECTION is 0, copy from NATIVE to INF. */
124 int (*siginfo_fixup
) (siginfo_t
*native
, void *inf
, int direction
);
126 /* Hook to call when a new process is created or attached to.
127 If extra per-process architecture-specific data is needed,
129 struct arch_process_info
* (*new_process
) (void);
131 /* Hook to call when a new thread is detected.
132 If extra per-thread architecture-specific data is needed,
134 struct arch_lwp_info
* (*new_thread
) (void);
136 /* Hook to call prior to resuming a thread. */
137 void (*prepare_to_resume
) (struct lwp_info
*);
139 /* Hook to support target specific qSupported. */
140 void (*process_qsupported
) (const char *);
142 /* Returns true if the low target supports tracepoints. */
143 int (*supports_tracepoints
) (void);
145 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
146 success, -1 on failure. */
147 int (*get_thread_area
) (int lwpid
, CORE_ADDR
*addrp
);
149 /* Install a fast tracepoint jump pad. See target.h for
151 int (*install_fast_tracepoint_jump_pad
) (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
155 CORE_ADDR
*jump_entry
,
156 CORE_ADDR
*trampoline
,
157 ULONGEST
*trampoline_size
,
158 unsigned char *jjump_pad_insn
,
159 ULONGEST
*jjump_pad_insn_size
,
160 CORE_ADDR
*adjusted_insn_addr
,
161 CORE_ADDR
*adjusted_insn_addr_end
,
164 /* Return the bytecode operations vector for the current inferior.
165 Returns NULL if bytecode compilation is not supported. */
166 struct emit_ops
*(*emit_ops
) (void);
168 /* Return the minimum length of an instruction that can be safely overwritten
169 for use as a fast tracepoint. */
170 int (*get_min_fast_tracepoint_insn_len
) (void);
174 extern struct linux_target_ops the_low_target
;
176 #define ptid_of(proc) ((proc)->head.id)
177 #define pid_of(proc) ptid_get_pid ((proc)->head.id)
178 #define lwpid_of(proc) ptid_get_lwp ((proc)->head.id)
180 #define get_lwp(inf) ((struct lwp_info *)(inf))
181 #define get_thread_lwp(thr) (get_lwp (inferior_target_data (thr)))
182 #define get_lwp_thread(proc) ((struct thread_info *) \
183 find_inferior_id (&all_threads, \
184 get_lwp (proc)->head.id))
188 struct inferior_list_entry head
;
190 /* If this flag is set, the next SIGSTOP will be ignored (the
191 process will be immediately resumed). This means that either we
192 sent the SIGSTOP to it ourselves and got some other pending event
193 (so the SIGSTOP is still pending), or that we stopped the
194 inferior implicitly via PTRACE_ATTACH and have not waited for it
198 /* When this is true, we shall not try to resume this thread, even
199 if last_resume_kind isn't resume_stop. */
202 /* If this flag is set, the lwp is known to be stopped right now (stop
203 event already received in a wait()). */
206 /* If this flag is set, the lwp is known to be dead already (exit
207 event already received in a wait(), and is cached in
211 /* When stopped is set, the last wait status recorded for this lwp. */
214 /* When stopped is set, this is where the lwp stopped, with
215 decr_pc_after_break already accounted for. */
218 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
220 int status_pending_p
;
223 /* STOPPED_BY_WATCHPOINT is non-zero if this LWP stopped with a data
225 int stopped_by_watchpoint
;
227 /* On architectures where it is possible to know the data address of
228 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
229 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
231 CORE_ADDR stopped_data_address
;
233 /* If this is non-zero, it is a breakpoint to be reinserted at our next
234 stop (SIGTRAP stops only). */
235 CORE_ADDR bp_reinsert
;
237 /* If this flag is set, the last continue operation at the ptrace
238 level on this process was a single-step. */
241 /* If this flag is set, we need to set the event request flags the
242 next time we see this LWP stop. */
243 int must_set_ptrace_flags
;
245 /* If this is non-zero, it points to a chain of signals which need to
246 be delivered to this process. */
247 struct pending_signals
*pending_signals
;
249 /* A link used when resuming. It is initialized from the resume request,
250 and then processed and cleared in linux_resume_one_lwp. */
251 struct thread_resume
*resume
;
253 /* True if it is known that this lwp is presently collecting a fast
254 tracepoint (it is in the jump pad or in some code that will
255 return to the jump pad. Normally, we won't care about this, but
256 we will if a signal arrives to this lwp while it is
258 int collecting_fast_tracepoint
;
260 /* If this is non-zero, it points to a chain of signals which need
261 to be reported to GDB. These were deferred because the thread
262 was doing a fast tracepoint collect when they arrived. */
263 struct pending_signals
*pending_signals_to_report
;
265 /* When collecting_fast_tracepoint is first found to be 1, we insert
266 a exit-jump-pad-quickly breakpoint. This is it. */
267 struct breakpoint
*exit_jump_pad_bkpt
;
269 /* True if the LWP was seen stop at an internal breakpoint and needs
270 stepping over later when it is resumed. */
274 #ifdef HAVE_THREAD_DB_H
275 /* The thread handle, used for e.g. TLS access. Only valid if
276 THREAD_KNOWN is set. */
280 /* Arch-specific additions. */
281 struct arch_lwp_info
*arch_private
;
284 extern struct inferior_list all_lwps
;
286 int linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
);
288 void linux_attach_lwp (unsigned long pid
);
289 struct lwp_info
*find_lwp_pid (ptid_t ptid
);
290 void linux_stop_lwp (struct lwp_info
*lwp
);
292 /* From thread-db.c */
293 int thread_db_init (int use_events
);
294 void thread_db_detach (struct process_info
*);
295 void thread_db_mourn (struct process_info
*);
296 int thread_db_handle_monitor_command (char *);
297 int thread_db_get_tls_address (struct thread_info
*thread
, CORE_ADDR offset
,
298 CORE_ADDR load_module
, CORE_ADDR
*address
);
299 int thread_db_look_up_one_symbol (const char *name
, CORE_ADDR
*addrp
);