1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002, 2004-2005, 2007-2012 Free Software Foundation,
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #ifdef HAVE_THREAD_DB_H
21 #include <thread_db.h>
25 #include "gdb_proc_service.h"
27 #define PTRACE_ARG3_TYPE void *
28 #define PTRACE_ARG4_TYPE void *
29 #define PTRACE_XFER_TYPE long
31 #ifdef HAVE_LINUX_REGSETS
32 typedef void (*regset_fill_func
) (struct regcache
*, void *);
33 typedef void (*regset_store_func
) (struct regcache
*, const void *);
42 int get_request
, set_request
;
43 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
44 argument and the 4th argument should be "const struct iovec *". */
47 enum regset_type type
;
48 regset_fill_func fill_function
;
49 regset_store_func store_function
;
51 extern struct regset_info target_regsets
[];
54 struct process_info_private
56 /* Arch-specific additions. */
57 struct arch_process_info
*arch_private
;
59 /* libthread_db-specific additions. Not NULL if this process has loaded
60 thread_db, and it is active. */
61 struct thread_db
*thread_db
;
63 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
69 struct linux_target_ops
71 /* Architecture-specific setup. */
72 void (*arch_setup
) (void);
77 /* Regset support bitmap: 1 for registers that are transferred as a part
78 of a regset, 0 for ones that need to be handled individually. This
79 can be NULL if all registers are transferred with regsets or regsets
81 unsigned char *regset_bitmap
;
82 int (*cannot_fetch_register
) (int);
84 /* Returns 0 if we can store the register, 1 if we can not
85 store the register, and 2 if failure to store the register
87 int (*cannot_store_register
) (int);
89 /* Hook to fetch a register in some non-standard way. Used for
90 example by backends that have read-only registers with hardcoded
91 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
92 REGNO was supplied, false if not, and we should fallback to the
93 standard ptrace methods. */
94 int (*fetch_register
) (struct regcache
*regcache
, int regno
);
96 CORE_ADDR (*get_pc
) (struct regcache
*regcache
);
97 void (*set_pc
) (struct regcache
*regcache
, CORE_ADDR newpc
);
98 const unsigned char *breakpoint
;
100 CORE_ADDR (*breakpoint_reinsert_addr
) (void);
102 int decr_pc_after_break
;
103 int (*breakpoint_at
) (CORE_ADDR pc
);
105 /* Breakpoint and watchpoint related functions. See target.h for
107 int (*insert_point
) (char type
, CORE_ADDR addr
, int len
);
108 int (*remove_point
) (char type
, CORE_ADDR addr
, int len
);
109 int (*stopped_by_watchpoint
) (void);
110 CORE_ADDR (*stopped_data_address
) (void);
112 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
113 for registers smaller than an xfer unit). */
114 void (*collect_ptrace_register
) (struct regcache
*regcache
,
115 int regno
, char *buf
);
116 void (*supply_ptrace_register
) (struct regcache
*regcache
,
117 int regno
, const char *buf
);
119 /* Hook to convert from target format to ptrace format and back.
120 Returns true if any conversion was done; false otherwise.
121 If DIRECTION is 1, then copy from INF to NATIVE.
122 If DIRECTION is 0, copy from NATIVE to INF. */
123 int (*siginfo_fixup
) (siginfo_t
*native
, void *inf
, int direction
);
125 /* Hook to call when a new process is created or attached to.
126 If extra per-process architecture-specific data is needed,
128 struct arch_process_info
* (*new_process
) (void);
130 /* Hook to call when a new thread is detected.
131 If extra per-thread architecture-specific data is needed,
133 struct arch_lwp_info
* (*new_thread
) (void);
135 /* Hook to call prior to resuming a thread. */
136 void (*prepare_to_resume
) (struct lwp_info
*);
138 /* Hook to support target specific qSupported. */
139 void (*process_qsupported
) (const char *);
141 /* Returns true if the low target supports tracepoints. */
142 int (*supports_tracepoints
) (void);
144 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
145 success, -1 on failure. */
146 int (*get_thread_area
) (int lwpid
, CORE_ADDR
*addrp
);
148 /* Install a fast tracepoint jump pad. See target.h for
150 int (*install_fast_tracepoint_jump_pad
) (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
154 CORE_ADDR
*jump_entry
,
155 CORE_ADDR
*trampoline
,
156 ULONGEST
*trampoline_size
,
157 unsigned char *jjump_pad_insn
,
158 ULONGEST
*jjump_pad_insn_size
,
159 CORE_ADDR
*adjusted_insn_addr
,
160 CORE_ADDR
*adjusted_insn_addr_end
,
163 /* Return the bytecode operations vector for the current inferior.
164 Returns NULL if bytecode compilation is not supported. */
165 struct emit_ops
*(*emit_ops
) (void);
167 /* Return the minimum length of an instruction that can be safely overwritten
168 for use as a fast tracepoint. */
169 int (*get_min_fast_tracepoint_insn_len
) (void);
173 extern struct linux_target_ops the_low_target
;
175 #define ptid_of(proc) ((proc)->head.id)
176 #define pid_of(proc) ptid_get_pid ((proc)->head.id)
177 #define lwpid_of(proc) ptid_get_lwp ((proc)->head.id)
179 #define get_lwp(inf) ((struct lwp_info *)(inf))
180 #define get_thread_lwp(thr) (get_lwp (inferior_target_data (thr)))
181 #define get_lwp_thread(proc) ((struct thread_info *) \
182 find_inferior_id (&all_threads, \
183 get_lwp (proc)->head.id))
187 struct inferior_list_entry head
;
189 /* If this flag is set, the next SIGSTOP will be ignored (the
190 process will be immediately resumed). This means that either we
191 sent the SIGSTOP to it ourselves and got some other pending event
192 (so the SIGSTOP is still pending), or that we stopped the
193 inferior implicitly via PTRACE_ATTACH and have not waited for it
197 /* When this is true, we shall not try to resume this thread, even
198 if last_resume_kind isn't resume_stop. */
201 /* If this flag is set, the lwp is known to be stopped right now (stop
202 event already received in a wait()). */
205 /* If this flag is set, the lwp is known to be dead already (exit
206 event already received in a wait(), and is cached in
210 /* When stopped is set, the last wait status recorded for this lwp. */
213 /* When stopped is set, this is where the lwp stopped, with
214 decr_pc_after_break already accounted for. */
217 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
219 int status_pending_p
;
222 /* STOPPED_BY_WATCHPOINT is non-zero if this LWP stopped with a data
224 int stopped_by_watchpoint
;
226 /* On architectures where it is possible to know the data address of
227 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
228 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
230 CORE_ADDR stopped_data_address
;
232 /* If this is non-zero, it is a breakpoint to be reinserted at our next
233 stop (SIGTRAP stops only). */
234 CORE_ADDR bp_reinsert
;
236 /* If this flag is set, the last continue operation at the ptrace
237 level on this process was a single-step. */
240 /* If this flag is set, we need to set the event request flags the
241 next time we see this LWP stop. */
242 int must_set_ptrace_flags
;
244 /* If this is non-zero, it points to a chain of signals which need to
245 be delivered to this process. */
246 struct pending_signals
*pending_signals
;
248 /* A link used when resuming. It is initialized from the resume request,
249 and then processed and cleared in linux_resume_one_lwp. */
250 struct thread_resume
*resume
;
252 /* True if it is known that this lwp is presently collecting a fast
253 tracepoint (it is in the jump pad or in some code that will
254 return to the jump pad. Normally, we won't care about this, but
255 we will if a signal arrives to this lwp while it is
257 int collecting_fast_tracepoint
;
259 /* If this is non-zero, it points to a chain of signals which need
260 to be reported to GDB. These were deferred because the thread
261 was doing a fast tracepoint collect when they arrived. */
262 struct pending_signals
*pending_signals_to_report
;
264 /* When collecting_fast_tracepoint is first found to be 1, we insert
265 a exit-jump-pad-quickly breakpoint. This is it. */
266 struct breakpoint
*exit_jump_pad_bkpt
;
268 /* True if the LWP was seen stop at an internal breakpoint and needs
269 stepping over later when it is resumed. */
273 #ifdef HAVE_THREAD_DB_H
274 /* The thread handle, used for e.g. TLS access. Only valid if
275 THREAD_KNOWN is set. */
279 /* Arch-specific additions. */
280 struct arch_lwp_info
*arch_private
;
283 extern struct inferior_list all_lwps
;
285 int linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
);
287 void linux_attach_lwp (unsigned long pid
);
288 struct lwp_info
*find_lwp_pid (ptid_t ptid
);
289 void linux_stop_lwp (struct lwp_info
*lwp
);
291 /* From thread-db.c */
292 int thread_db_init (int use_events
);
293 void thread_db_detach (struct process_info
*);
294 void thread_db_mourn (struct process_info
*);
295 int thread_db_handle_monitor_command (char *);
296 int thread_db_get_tls_address (struct thread_info
*thread
, CORE_ADDR offset
,
297 CORE_ADDR load_module
, CORE_ADDR
*address
);
298 int thread_db_look_up_one_symbol (const char *name
, CORE_ADDR
*addrp
);