Remove support for thread events without PTRACE_EVENT_CLONE in GDBServer.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "nat/linux-nat.h"
20 #include "nat/gdb_thread_db.h"
21 #include <signal.h>
22
23 #include "gdbthread.h"
24 #include "gdb_proc_service.h"
25
26 /* Included for ptrace type definitions. */
27 #include "nat/linux-ptrace.h"
28 #include "target/waitstatus.h" /* For enum target_stop_reason. */
29
30 #define PTRACE_XFER_TYPE long
31
32 #ifdef HAVE_LINUX_REGSETS
33 typedef void (*regset_fill_func) (struct regcache *, void *);
34 typedef void (*regset_store_func) (struct regcache *, const void *);
35 enum regset_type {
36 GENERAL_REGS,
37 FP_REGS,
38 EXTENDED_REGS,
39 };
40
41 /* The arch's regsets array initializer must be terminated with a NULL
42 regset. */
43 #define NULL_REGSET \
44 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
45
46 struct regset_info
47 {
48 int get_request, set_request;
49 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
50 argument and the 4th argument should be "const struct iovec *". */
51 int nt_type;
52 int size;
53 enum regset_type type;
54 regset_fill_func fill_function;
55 regset_store_func store_function;
56 };
57
58 /* Aggregation of all the supported regsets of a given
59 architecture/mode. */
60
61 struct regsets_info
62 {
63 /* The regsets array. */
64 struct regset_info *regsets;
65
66 /* The number of regsets in the REGSETS array. */
67 int num_regsets;
68
69 /* If we get EIO on a regset, do not try it again. Note the set of
70 supported regsets may depend on processor mode on biarch
71 machines. This is a (lazily allocated) array holding one boolean
72 byte (0/1) per regset, with each element corresponding to the
73 regset in the REGSETS array above at the same offset. */
74 char *disabled_regsets;
75 };
76
77 #endif
78
79 /* Mapping between the general-purpose registers in `struct user'
80 format and GDB's register array layout. */
81
82 struct usrregs_info
83 {
84 /* The number of registers accessible. */
85 int num_regs;
86
87 /* The registers map. */
88 int *regmap;
89 };
90
91 /* All info needed to access an architecture/mode's registers. */
92
93 struct regs_info
94 {
95 /* Regset support bitmap: 1 for registers that are transferred as a part
96 of a regset, 0 for ones that need to be handled individually. This
97 can be NULL if all registers are transferred with regsets or regsets
98 are not supported. */
99 unsigned char *regset_bitmap;
100
101 /* Info used when accessing registers with PTRACE_PEEKUSER /
102 PTRACE_POKEUSER. This can be NULL if all registers are
103 transferred with regsets .*/
104 struct usrregs_info *usrregs;
105
106 #ifdef HAVE_LINUX_REGSETS
107 /* Info used when accessing registers with regsets. */
108 struct regsets_info *regsets_info;
109 #endif
110 };
111
112 struct process_info_private
113 {
114 /* Arch-specific additions. */
115 struct arch_process_info *arch_private;
116
117 /* libthread_db-specific additions. Not NULL if this process has loaded
118 thread_db, and it is active. */
119 struct thread_db *thread_db;
120
121 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
122 CORE_ADDR r_debug;
123 };
124
125 struct lwp_info;
126
127 struct linux_target_ops
128 {
129 /* Architecture-specific setup. */
130 void (*arch_setup) (void);
131
132 const struct regs_info *(*regs_info) (void);
133 int (*cannot_fetch_register) (int);
134
135 /* Returns 0 if we can store the register, 1 if we can not
136 store the register, and 2 if failure to store the register
137 is acceptable. */
138 int (*cannot_store_register) (int);
139
140 /* Hook to fetch a register in some non-standard way. Used for
141 example by backends that have read-only registers with hardcoded
142 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
143 REGNO was supplied, false if not, and we should fallback to the
144 standard ptrace methods. */
145 int (*fetch_register) (struct regcache *regcache, int regno);
146
147 CORE_ADDR (*get_pc) (struct regcache *regcache);
148 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
149
150 /* See target.h for details. */
151 int (*breakpoint_kind_from_pc) (CORE_ADDR *pcptr);
152
153 /* See target.h for details. */
154 const gdb_byte *(*sw_breakpoint_from_kind) (int kind, int *size);
155
156 CORE_ADDR (*breakpoint_reinsert_addr) (void);
157
158 int decr_pc_after_break;
159 int (*breakpoint_at) (CORE_ADDR pc);
160
161 /* Breakpoint and watchpoint related functions. See target.h for
162 comments. */
163 int (*supports_z_point_type) (char z_type);
164 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
165 int size, struct raw_breakpoint *bp);
166 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
167 int size, struct raw_breakpoint *bp);
168
169 int (*stopped_by_watchpoint) (void);
170 CORE_ADDR (*stopped_data_address) (void);
171
172 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
173 for registers smaller than an xfer unit). */
174 void (*collect_ptrace_register) (struct regcache *regcache,
175 int regno, char *buf);
176 void (*supply_ptrace_register) (struct regcache *regcache,
177 int regno, const char *buf);
178
179 /* Hook to convert from target format to ptrace format and back.
180 Returns true if any conversion was done; false otherwise.
181 If DIRECTION is 1, then copy from INF to NATIVE.
182 If DIRECTION is 0, copy from NATIVE to INF. */
183 int (*siginfo_fixup) (siginfo_t *native, void *inf, int direction);
184
185 /* Hook to call when a new process is created or attached to.
186 If extra per-process architecture-specific data is needed,
187 allocate it here. */
188 struct arch_process_info * (*new_process) (void);
189
190 /* Hook to call when a new thread is detected.
191 If extra per-thread architecture-specific data is needed,
192 allocate it here. */
193 void (*new_thread) (struct lwp_info *);
194
195 /* Hook to call, if any, when a new fork is attached. */
196 void (*new_fork) (struct process_info *parent, struct process_info *child);
197
198 /* Hook to call prior to resuming a thread. */
199 void (*prepare_to_resume) (struct lwp_info *);
200
201 /* Hook to support target specific qSupported. */
202 void (*process_qsupported) (char **, int count);
203
204 /* Returns true if the low target supports tracepoints. */
205 int (*supports_tracepoints) (void);
206
207 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
208 success, -1 on failure. */
209 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
210
211 /* Install a fast tracepoint jump pad. See target.h for
212 comments. */
213 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
214 CORE_ADDR collector,
215 CORE_ADDR lockaddr,
216 ULONGEST orig_size,
217 CORE_ADDR *jump_entry,
218 CORE_ADDR *trampoline,
219 ULONGEST *trampoline_size,
220 unsigned char *jjump_pad_insn,
221 ULONGEST *jjump_pad_insn_size,
222 CORE_ADDR *adjusted_insn_addr,
223 CORE_ADDR *adjusted_insn_addr_end,
224 char *err);
225
226 /* Return the bytecode operations vector for the current inferior.
227 Returns NULL if bytecode compilation is not supported. */
228 struct emit_ops *(*emit_ops) (void);
229
230 /* Return the minimum length of an instruction that can be safely overwritten
231 for use as a fast tracepoint. */
232 int (*get_min_fast_tracepoint_insn_len) (void);
233
234 /* Returns true if the low target supports range stepping. */
235 int (*supports_range_stepping) (void);
236
237 /* See target.h. */
238 int (*breakpoint_kind_from_current_state) (CORE_ADDR *pcptr);
239
240 /* See target.h. */
241 int (*supports_hardware_single_step) (void);
242 };
243
244 extern struct linux_target_ops the_low_target;
245
246 #define get_thread_lwp(thr) ((struct lwp_info *) (inferior_target_data (thr)))
247 #define get_lwp_thread(lwp) ((lwp)->thread)
248
249 /* This struct is recorded in the target_data field of struct thread_info.
250
251 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
252 GDB protocol representation of the thread ID. Threads also have
253 a "process ID" (poorly named) which is (presently) the same as the
254 LWP ID.
255
256 There is also ``all_processes'' is keyed by the "overall process ID",
257 which GNU/Linux calls tgid, "thread group ID". */
258
259 struct lwp_info
260 {
261 /* Backlink to the parent object. */
262 struct thread_info *thread;
263
264 /* If this flag is set, the next SIGSTOP will be ignored (the
265 process will be immediately resumed). This means that either we
266 sent the SIGSTOP to it ourselves and got some other pending event
267 (so the SIGSTOP is still pending), or that we stopped the
268 inferior implicitly via PTRACE_ATTACH and have not waited for it
269 yet. */
270 int stop_expected;
271
272 /* When this is true, we shall not try to resume this thread, even
273 if last_resume_kind isn't resume_stop. */
274 int suspended;
275
276 /* If this flag is set, the lwp is known to be stopped right now (stop
277 event already received in a wait()). */
278 int stopped;
279
280 /* When stopped is set, the last wait status recorded for this lwp. */
281 int last_status;
282
283 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
284 this LWP's last event, to pass to GDB without any further
285 processing. This is used to store extended ptrace event
286 information or exit status until it can be reported to GDB. */
287 struct target_waitstatus waitstatus;
288
289 /* When stopped is set, this is where the lwp last stopped, with
290 decr_pc_after_break already accounted for. If the LWP is
291 running, this is the address at which the lwp was resumed. */
292 CORE_ADDR stop_pc;
293
294 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
295 been reported. */
296 int status_pending_p;
297 int status_pending;
298
299 /* The reason the LWP last stopped, if we need to track it
300 (breakpoint, watchpoint, etc.) */
301 enum target_stop_reason stop_reason;
302
303 /* On architectures where it is possible to know the data address of
304 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
305 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
306 is true. */
307 CORE_ADDR stopped_data_address;
308
309 /* If this is non-zero, it is a breakpoint to be reinserted at our next
310 stop (SIGTRAP stops only). */
311 CORE_ADDR bp_reinsert;
312
313 /* If this flag is set, the last continue operation at the ptrace
314 level on this process was a single-step. */
315 int stepping;
316
317 /* Range to single step within. This is a copy of the step range
318 passed along the last resume request. See 'struct
319 thread_resume'. */
320 CORE_ADDR step_range_start; /* Inclusive */
321 CORE_ADDR step_range_end; /* Exclusive */
322
323 /* If this flag is set, we need to set the event request flags the
324 next time we see this LWP stop. */
325 int must_set_ptrace_flags;
326
327 /* If this is non-zero, it points to a chain of signals which need to
328 be delivered to this process. */
329 struct pending_signals *pending_signals;
330
331 /* A link used when resuming. It is initialized from the resume request,
332 and then processed and cleared in linux_resume_one_lwp. */
333 struct thread_resume *resume;
334
335 /* True if it is known that this lwp is presently collecting a fast
336 tracepoint (it is in the jump pad or in some code that will
337 return to the jump pad. Normally, we won't care about this, but
338 we will if a signal arrives to this lwp while it is
339 collecting. */
340 int collecting_fast_tracepoint;
341
342 /* If this is non-zero, it points to a chain of signals which need
343 to be reported to GDB. These were deferred because the thread
344 was doing a fast tracepoint collect when they arrived. */
345 struct pending_signals *pending_signals_to_report;
346
347 /* When collecting_fast_tracepoint is first found to be 1, we insert
348 a exit-jump-pad-quickly breakpoint. This is it. */
349 struct breakpoint *exit_jump_pad_bkpt;
350
351 /* True if the LWP was seen stop at an internal breakpoint and needs
352 stepping over later when it is resumed. */
353 int need_step_over;
354
355 #ifdef USE_THREAD_DB
356 int thread_known;
357 /* The thread handle, used for e.g. TLS access. Only valid if
358 THREAD_KNOWN is set. */
359 td_thrhandle_t th;
360 #endif
361
362 /* Arch-specific additions. */
363 struct arch_lwp_info *arch_private;
364 };
365
366 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
367
368 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
369 errno). */
370 int linux_attach_lwp (ptid_t ptid);
371
372 struct lwp_info *find_lwp_pid (ptid_t ptid);
373 /* For linux_stop_lwp see nat/linux-nat.h. */
374
375 #ifdef HAVE_LINUX_REGSETS
376 void initialize_regsets_info (struct regsets_info *regsets_info);
377 #endif
378
379 void initialize_low_arch (void);
380
381 /* From thread-db.c */
382 int thread_db_init (void);
383 void thread_db_detach (struct process_info *);
384 void thread_db_mourn (struct process_info *);
385 int thread_db_handle_monitor_command (char *);
386 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
387 CORE_ADDR load_module, CORE_ADDR *address);
388 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
389
390 extern int have_ptrace_getregset;
This page took 0.039494 seconds and 4 git commands to generate.