lwp_info: Make the arch code free arch_lwp_info
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "nat/linux-nat.h"
20 #include "nat/gdb_thread_db.h"
21 #include <signal.h>
22
23 #include "gdbthread.h"
24 #include "gdb_proc_service.h"
25
26 /* Included for ptrace type definitions. */
27 #include "nat/linux-ptrace.h"
28 #include "target/waitstatus.h" /* For enum target_stop_reason. */
29 #include "tracepoint.h"
30
31 #define PTRACE_XFER_TYPE long
32
33 #ifdef HAVE_LINUX_REGSETS
34 typedef void (*regset_fill_func) (struct regcache *, void *);
35 typedef void (*regset_store_func) (struct regcache *, const void *);
36 enum regset_type {
37 GENERAL_REGS,
38 FP_REGS,
39 EXTENDED_REGS,
40 };
41
42 /* The arch's regsets array initializer must be terminated with a NULL
43 regset. */
44 #define NULL_REGSET \
45 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
46
47 struct regset_info
48 {
49 int get_request, set_request;
50 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
51 argument and the 4th argument should be "const struct iovec *". */
52 int nt_type;
53 int size;
54 enum regset_type type;
55 regset_fill_func fill_function;
56 regset_store_func store_function;
57 };
58
59 /* Aggregation of all the supported regsets of a given
60 architecture/mode. */
61
62 struct regsets_info
63 {
64 /* The regsets array. */
65 struct regset_info *regsets;
66
67 /* The number of regsets in the REGSETS array. */
68 int num_regsets;
69
70 /* If we get EIO on a regset, do not try it again. Note the set of
71 supported regsets may depend on processor mode on biarch
72 machines. This is a (lazily allocated) array holding one boolean
73 byte (0/1) per regset, with each element corresponding to the
74 regset in the REGSETS array above at the same offset. */
75 char *disabled_regsets;
76 };
77
78 #endif
79
80 /* Mapping between the general-purpose registers in `struct user'
81 format and GDB's register array layout. */
82
83 struct usrregs_info
84 {
85 /* The number of registers accessible. */
86 int num_regs;
87
88 /* The registers map. */
89 int *regmap;
90 };
91
92 /* All info needed to access an architecture/mode's registers. */
93
94 struct regs_info
95 {
96 /* Regset support bitmap: 1 for registers that are transferred as a part
97 of a regset, 0 for ones that need to be handled individually. This
98 can be NULL if all registers are transferred with regsets or regsets
99 are not supported. */
100 unsigned char *regset_bitmap;
101
102 /* Info used when accessing registers with PTRACE_PEEKUSER /
103 PTRACE_POKEUSER. This can be NULL if all registers are
104 transferred with regsets .*/
105 struct usrregs_info *usrregs;
106
107 #ifdef HAVE_LINUX_REGSETS
108 /* Info used when accessing registers with regsets. */
109 struct regsets_info *regsets_info;
110 #endif
111 };
112
113 struct process_info_private
114 {
115 /* Arch-specific additions. */
116 struct arch_process_info *arch_private;
117
118 /* libthread_db-specific additions. Not NULL if this process has loaded
119 thread_db, and it is active. */
120 struct thread_db *thread_db;
121
122 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
123 CORE_ADDR r_debug;
124 };
125
126 struct lwp_info;
127
128 struct linux_target_ops
129 {
130 /* Architecture-specific setup. */
131 void (*arch_setup) (void);
132
133 const struct regs_info *(*regs_info) (void);
134 int (*cannot_fetch_register) (int);
135
136 /* Returns 0 if we can store the register, 1 if we can not
137 store the register, and 2 if failure to store the register
138 is acceptable. */
139 int (*cannot_store_register) (int);
140
141 /* Hook to fetch a register in some non-standard way. Used for
142 example by backends that have read-only registers with hardcoded
143 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
144 REGNO was supplied, false if not, and we should fallback to the
145 standard ptrace methods. */
146 int (*fetch_register) (struct regcache *regcache, int regno);
147
148 CORE_ADDR (*get_pc) (struct regcache *regcache);
149 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
150
151 /* See target.h for details. */
152 int (*breakpoint_kind_from_pc) (CORE_ADDR *pcptr);
153
154 /* See target.h for details. */
155 const gdb_byte *(*sw_breakpoint_from_kind) (int kind, int *size);
156
157 /* Find the next possible PCs after the current instruction executes. */
158 std::vector<CORE_ADDR> (*get_next_pcs) (struct regcache *regcache);
159
160 int decr_pc_after_break;
161 int (*breakpoint_at) (CORE_ADDR pc);
162
163 /* Breakpoint and watchpoint related functions. See target.h for
164 comments. */
165 int (*supports_z_point_type) (char z_type);
166 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
167 int size, struct raw_breakpoint *bp);
168 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
169 int size, struct raw_breakpoint *bp);
170
171 int (*stopped_by_watchpoint) (void);
172 CORE_ADDR (*stopped_data_address) (void);
173
174 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
175 for registers smaller than an xfer unit). */
176 void (*collect_ptrace_register) (struct regcache *regcache,
177 int regno, char *buf);
178 void (*supply_ptrace_register) (struct regcache *regcache,
179 int regno, const char *buf);
180
181 /* Hook to convert from target format to ptrace format and back.
182 Returns true if any conversion was done; false otherwise.
183 If DIRECTION is 1, then copy from INF to NATIVE.
184 If DIRECTION is 0, copy from NATIVE to INF. */
185 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
186
187 /* Hook to call when a new process is created or attached to.
188 If extra per-process architecture-specific data is needed,
189 allocate it here. */
190 struct arch_process_info * (*new_process) (void);
191
192 /* Hook to call when a new thread is detected.
193 If extra per-thread architecture-specific data is needed,
194 allocate it here. */
195 void (*new_thread) (struct lwp_info *);
196
197 /* Hook to call when a thread is being deleted. If extra per-thread
198 architecture-specific data is needed, delete it here. */
199 void (*delete_thread) (struct arch_lwp_info *);
200
201 /* Hook to call, if any, when a new fork is attached. */
202 void (*new_fork) (struct process_info *parent, struct process_info *child);
203
204 /* Hook to call prior to resuming a thread. */
205 void (*prepare_to_resume) (struct lwp_info *);
206
207 /* Hook to support target specific qSupported. */
208 void (*process_qsupported) (char **, int count);
209
210 /* Returns true if the low target supports tracepoints. */
211 int (*supports_tracepoints) (void);
212
213 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
214 success, -1 on failure. */
215 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
216
217 /* Install a fast tracepoint jump pad. See target.h for
218 comments. */
219 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
220 CORE_ADDR collector,
221 CORE_ADDR lockaddr,
222 ULONGEST orig_size,
223 CORE_ADDR *jump_entry,
224 CORE_ADDR *trampoline,
225 ULONGEST *trampoline_size,
226 unsigned char *jjump_pad_insn,
227 ULONGEST *jjump_pad_insn_size,
228 CORE_ADDR *adjusted_insn_addr,
229 CORE_ADDR *adjusted_insn_addr_end,
230 char *err);
231
232 /* Return the bytecode operations vector for the current inferior.
233 Returns NULL if bytecode compilation is not supported. */
234 struct emit_ops *(*emit_ops) (void);
235
236 /* Return the minimum length of an instruction that can be safely overwritten
237 for use as a fast tracepoint. */
238 int (*get_min_fast_tracepoint_insn_len) (void);
239
240 /* Returns true if the low target supports range stepping. */
241 int (*supports_range_stepping) (void);
242
243 /* See target.h. */
244 int (*breakpoint_kind_from_current_state) (CORE_ADDR *pcptr);
245
246 /* See target.h. */
247 int (*supports_hardware_single_step) (void);
248
249 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
250 inferior is stopped due to SYSCALL_SIGTRAP. */
251 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
252
253 /* See target.h. */
254 int (*get_ipa_tdesc_idx) (void);
255 };
256
257 extern struct linux_target_ops the_low_target;
258
259 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
260 #define get_lwp_thread(lwp) ((lwp)->thread)
261
262 /* This struct is recorded in the target_data field of struct thread_info.
263
264 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
265 GDB protocol representation of the thread ID. Threads also have
266 a "process ID" (poorly named) which is (presently) the same as the
267 LWP ID.
268
269 There is also ``all_processes'' is keyed by the "overall process ID",
270 which GNU/Linux calls tgid, "thread group ID". */
271
272 struct lwp_info
273 {
274 /* Backlink to the parent object. */
275 struct thread_info *thread;
276
277 /* If this flag is set, the next SIGSTOP will be ignored (the
278 process will be immediately resumed). This means that either we
279 sent the SIGSTOP to it ourselves and got some other pending event
280 (so the SIGSTOP is still pending), or that we stopped the
281 inferior implicitly via PTRACE_ATTACH and have not waited for it
282 yet. */
283 int stop_expected;
284
285 /* When this is true, we shall not try to resume this thread, even
286 if last_resume_kind isn't resume_stop. */
287 int suspended;
288
289 /* If this flag is set, the lwp is known to be stopped right now (stop
290 event already received in a wait()). */
291 int stopped;
292
293 /* Signal whether we are in a SYSCALL_ENTRY or
294 in a SYSCALL_RETURN event.
295 Values:
296 - TARGET_WAITKIND_SYSCALL_ENTRY
297 - TARGET_WAITKIND_SYSCALL_RETURN */
298 enum target_waitkind syscall_state;
299
300 /* When stopped is set, the last wait status recorded for this lwp. */
301 int last_status;
302
303 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
304 this LWP's last event, to pass to GDB without any further
305 processing. This is used to store extended ptrace event
306 information or exit status until it can be reported to GDB. */
307 struct target_waitstatus waitstatus;
308
309 /* A pointer to the fork child/parent relative. Valid only while
310 the parent fork event is not reported to higher layers. Used to
311 avoid wildcard vCont actions resuming a fork child before GDB is
312 notified about the parent's fork event. */
313 struct lwp_info *fork_relative;
314
315 /* When stopped is set, this is where the lwp last stopped, with
316 decr_pc_after_break already accounted for. If the LWP is
317 running, this is the address at which the lwp was resumed. */
318 CORE_ADDR stop_pc;
319
320 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
321 been reported. */
322 int status_pending_p;
323 int status_pending;
324
325 /* The reason the LWP last stopped, if we need to track it
326 (breakpoint, watchpoint, etc.) */
327 enum target_stop_reason stop_reason;
328
329 /* On architectures where it is possible to know the data address of
330 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
331 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
332 is true. */
333 CORE_ADDR stopped_data_address;
334
335 /* If this is non-zero, it is a breakpoint to be reinserted at our next
336 stop (SIGTRAP stops only). */
337 CORE_ADDR bp_reinsert;
338
339 /* If this flag is set, the last continue operation at the ptrace
340 level on this process was a single-step. */
341 int stepping;
342
343 /* Range to single step within. This is a copy of the step range
344 passed along the last resume request. See 'struct
345 thread_resume'. */
346 CORE_ADDR step_range_start; /* Inclusive */
347 CORE_ADDR step_range_end; /* Exclusive */
348
349 /* If this flag is set, we need to set the event request flags the
350 next time we see this LWP stop. */
351 int must_set_ptrace_flags;
352
353 /* If this is non-zero, it points to a chain of signals which need to
354 be delivered to this process. */
355 struct pending_signals *pending_signals;
356
357 /* A link used when resuming. It is initialized from the resume request,
358 and then processed and cleared in linux_resume_one_lwp. */
359 struct thread_resume *resume;
360
361 /* Information bout this lwp's fast tracepoint collection status (is it
362 currently stopped in the jump pad, and if so, before or at/after the
363 relocated instruction). Normally, we won't care about this, but we will
364 if a signal arrives to this lwp while it is collecting. */
365 fast_tpoint_collect_result collecting_fast_tracepoint;
366
367 /* If this is non-zero, it points to a chain of signals which need
368 to be reported to GDB. These were deferred because the thread
369 was doing a fast tracepoint collect when they arrived. */
370 struct pending_signals *pending_signals_to_report;
371
372 /* When collecting_fast_tracepoint is first found to be 1, we insert
373 a exit-jump-pad-quickly breakpoint. This is it. */
374 struct breakpoint *exit_jump_pad_bkpt;
375
376 #ifdef USE_THREAD_DB
377 int thread_known;
378 /* The thread handle, used for e.g. TLS access. Only valid if
379 THREAD_KNOWN is set. */
380 td_thrhandle_t th;
381
382 /* The pthread_t handle. */
383 thread_t thread_handle;
384 #endif
385
386 /* Arch-specific additions. */
387 struct arch_lwp_info *arch_private;
388 };
389
390 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
391
392 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
393 errno). */
394 int linux_attach_lwp (ptid_t ptid);
395
396 struct lwp_info *find_lwp_pid (ptid_t ptid);
397 /* For linux_stop_lwp see nat/linux-nat.h. */
398
399 #ifdef HAVE_LINUX_REGSETS
400 void initialize_regsets_info (struct regsets_info *regsets_info);
401 #endif
402
403 void initialize_low_arch (void);
404
405 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
406 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
407
408 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
409 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
410
411 /* From thread-db.c */
412 int thread_db_init (void);
413 void thread_db_detach (struct process_info *);
414 void thread_db_mourn (struct process_info *);
415 int thread_db_handle_monitor_command (char *);
416 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
417 CORE_ADDR load_module, CORE_ADDR *address);
418 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
419
420 /* Called from linux-low.c when a clone event is detected. Upon entry,
421 both the clone and the parent should be stopped. This function does
422 whatever is required have the clone under thread_db's control. */
423
424 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
425
426 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
427
428 extern int have_ptrace_getregset;
This page took 0.039563 seconds and 4 git commands to generate.