[PowerPC] Reject tdescs with VSX and no FPU or Altivec
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2018 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "nat/linux-nat.h"
20 #include "nat/gdb_thread_db.h"
21 #include <signal.h>
22
23 #include "gdbthread.h"
24 #include "gdb_proc_service.h"
25
26 /* Included for ptrace type definitions. */
27 #include "nat/linux-ptrace.h"
28 #include "target/waitstatus.h" /* For enum target_stop_reason. */
29 #include "tracepoint.h"
30
31 #define PTRACE_XFER_TYPE long
32
33 #ifdef HAVE_LINUX_REGSETS
34 typedef void (*regset_fill_func) (struct regcache *, void *);
35 typedef void (*regset_store_func) (struct regcache *, const void *);
36 enum regset_type {
37 GENERAL_REGS,
38 FP_REGS,
39 EXTENDED_REGS,
40 };
41
42 /* The arch's regsets array initializer must be terminated with a NULL
43 regset. */
44 #define NULL_REGSET \
45 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
46
47 struct regset_info
48 {
49 int get_request, set_request;
50 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
51 argument and the 4th argument should be "const struct iovec *". */
52 int nt_type;
53 int size;
54 enum regset_type type;
55 regset_fill_func fill_function;
56 regset_store_func store_function;
57 };
58
59 /* Aggregation of all the supported regsets of a given
60 architecture/mode. */
61
62 struct regsets_info
63 {
64 /* The regsets array. */
65 struct regset_info *regsets;
66
67 /* The number of regsets in the REGSETS array. */
68 int num_regsets;
69
70 /* If we get EIO on a regset, do not try it again. Note the set of
71 supported regsets may depend on processor mode on biarch
72 machines. This is a (lazily allocated) array holding one boolean
73 byte (0/1) per regset, with each element corresponding to the
74 regset in the REGSETS array above at the same offset. */
75 char *disabled_regsets;
76 };
77
78 #endif
79
80 /* Mapping between the general-purpose registers in `struct user'
81 format and GDB's register array layout. */
82
83 struct usrregs_info
84 {
85 /* The number of registers accessible. */
86 int num_regs;
87
88 /* The registers map. */
89 int *regmap;
90 };
91
92 /* All info needed to access an architecture/mode's registers. */
93
94 struct regs_info
95 {
96 /* Regset support bitmap: 1 for registers that are transferred as a part
97 of a regset, 0 for ones that need to be handled individually. This
98 can be NULL if all registers are transferred with regsets or regsets
99 are not supported. */
100 unsigned char *regset_bitmap;
101
102 /* Info used when accessing registers with PTRACE_PEEKUSER /
103 PTRACE_POKEUSER. This can be NULL if all registers are
104 transferred with regsets .*/
105 struct usrregs_info *usrregs;
106
107 #ifdef HAVE_LINUX_REGSETS
108 /* Info used when accessing registers with regsets. */
109 struct regsets_info *regsets_info;
110 #endif
111 };
112
113 struct process_info_private
114 {
115 /* Arch-specific additions. */
116 struct arch_process_info *arch_private;
117
118 /* libthread_db-specific additions. Not NULL if this process has loaded
119 thread_db, and it is active. */
120 struct thread_db *thread_db;
121
122 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
123 CORE_ADDR r_debug;
124 };
125
126 struct lwp_info;
127
128 struct linux_target_ops
129 {
130 /* Architecture-specific setup. */
131 void (*arch_setup) (void);
132
133 const struct regs_info *(*regs_info) (void);
134 int (*cannot_fetch_register) (int);
135
136 /* Returns 0 if we can store the register, 1 if we can not
137 store the register, and 2 if failure to store the register
138 is acceptable. */
139 int (*cannot_store_register) (int);
140
141 /* Hook to fetch a register in some non-standard way. Used for
142 example by backends that have read-only registers with hardcoded
143 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
144 REGNO was supplied, false if not, and we should fallback to the
145 standard ptrace methods. */
146 int (*fetch_register) (struct regcache *regcache, int regno);
147
148 CORE_ADDR (*get_pc) (struct regcache *regcache);
149 void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
150
151 /* See target.h for details. */
152 int (*breakpoint_kind_from_pc) (CORE_ADDR *pcptr);
153
154 /* See target.h for details. */
155 const gdb_byte *(*sw_breakpoint_from_kind) (int kind, int *size);
156
157 /* Find the next possible PCs after the current instruction executes. */
158 std::vector<CORE_ADDR> (*get_next_pcs) (struct regcache *regcache);
159
160 int decr_pc_after_break;
161 int (*breakpoint_at) (CORE_ADDR pc);
162
163 /* Breakpoint and watchpoint related functions. See target.h for
164 comments. */
165 int (*supports_z_point_type) (char z_type);
166 int (*insert_point) (enum raw_bkpt_type type, CORE_ADDR addr,
167 int size, struct raw_breakpoint *bp);
168 int (*remove_point) (enum raw_bkpt_type type, CORE_ADDR addr,
169 int size, struct raw_breakpoint *bp);
170
171 int (*stopped_by_watchpoint) (void);
172 CORE_ADDR (*stopped_data_address) (void);
173
174 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
175 for registers smaller than an xfer unit). */
176 void (*collect_ptrace_register) (struct regcache *regcache,
177 int regno, char *buf);
178 void (*supply_ptrace_register) (struct regcache *regcache,
179 int regno, const char *buf);
180
181 /* Hook to convert from target format to ptrace format and back.
182 Returns true if any conversion was done; false otherwise.
183 If DIRECTION is 1, then copy from INF to NATIVE.
184 If DIRECTION is 0, copy from NATIVE to INF. */
185 int (*siginfo_fixup) (siginfo_t *native, gdb_byte *inf, int direction);
186
187 /* Hook to call when a new process is created or attached to.
188 If extra per-process architecture-specific data is needed,
189 allocate it here. */
190 struct arch_process_info * (*new_process) (void);
191
192 /* Hook to call when a process is being deleted. If extra per-process
193 architecture-specific data is needed, delete it here. */
194 void (*delete_process) (struct arch_process_info *info);
195
196 /* Hook to call when a new thread is detected.
197 If extra per-thread architecture-specific data is needed,
198 allocate it here. */
199 void (*new_thread) (struct lwp_info *);
200
201 /* Hook to call when a thread is being deleted. If extra per-thread
202 architecture-specific data is needed, delete it here. */
203 void (*delete_thread) (struct arch_lwp_info *);
204
205 /* Hook to call, if any, when a new fork is attached. */
206 void (*new_fork) (struct process_info *parent, struct process_info *child);
207
208 /* Hook to call prior to resuming a thread. */
209 void (*prepare_to_resume) (struct lwp_info *);
210
211 /* Hook to support target specific qSupported. */
212 void (*process_qsupported) (char **, int count);
213
214 /* Returns true if the low target supports tracepoints. */
215 int (*supports_tracepoints) (void);
216
217 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
218 success, -1 on failure. */
219 int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
220
221 /* Install a fast tracepoint jump pad. See target.h for
222 comments. */
223 int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
224 CORE_ADDR collector,
225 CORE_ADDR lockaddr,
226 ULONGEST orig_size,
227 CORE_ADDR *jump_entry,
228 CORE_ADDR *trampoline,
229 ULONGEST *trampoline_size,
230 unsigned char *jjump_pad_insn,
231 ULONGEST *jjump_pad_insn_size,
232 CORE_ADDR *adjusted_insn_addr,
233 CORE_ADDR *adjusted_insn_addr_end,
234 char *err);
235
236 /* Return the bytecode operations vector for the current inferior.
237 Returns NULL if bytecode compilation is not supported. */
238 struct emit_ops *(*emit_ops) (void);
239
240 /* Return the minimum length of an instruction that can be safely overwritten
241 for use as a fast tracepoint. */
242 int (*get_min_fast_tracepoint_insn_len) (void);
243
244 /* Returns true if the low target supports range stepping. */
245 int (*supports_range_stepping) (void);
246
247 /* See target.h. */
248 int (*breakpoint_kind_from_current_state) (CORE_ADDR *pcptr);
249
250 /* See target.h. */
251 int (*supports_hardware_single_step) (void);
252
253 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
254 inferior is stopped due to SYSCALL_SIGTRAP. */
255 void (*get_syscall_trapinfo) (struct regcache *regcache, int *sysno);
256
257 /* See target.h. */
258 int (*get_ipa_tdesc_idx) (void);
259 };
260
261 extern struct linux_target_ops the_low_target;
262
263 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
264 #define get_lwp_thread(lwp) ((lwp)->thread)
265
266 /* This struct is recorded in the target_data field of struct thread_info.
267
268 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
269 GDB protocol representation of the thread ID. Threads also have
270 a "process ID" (poorly named) which is (presently) the same as the
271 LWP ID.
272
273 There is also ``all_processes'' is keyed by the "overall process ID",
274 which GNU/Linux calls tgid, "thread group ID". */
275
276 struct lwp_info
277 {
278 /* Backlink to the parent object. */
279 struct thread_info *thread;
280
281 /* If this flag is set, the next SIGSTOP will be ignored (the
282 process will be immediately resumed). This means that either we
283 sent the SIGSTOP to it ourselves and got some other pending event
284 (so the SIGSTOP is still pending), or that we stopped the
285 inferior implicitly via PTRACE_ATTACH and have not waited for it
286 yet. */
287 int stop_expected;
288
289 /* When this is true, we shall not try to resume this thread, even
290 if last_resume_kind isn't resume_stop. */
291 int suspended;
292
293 /* If this flag is set, the lwp is known to be stopped right now (stop
294 event already received in a wait()). */
295 int stopped;
296
297 /* Signal whether we are in a SYSCALL_ENTRY or
298 in a SYSCALL_RETURN event.
299 Values:
300 - TARGET_WAITKIND_SYSCALL_ENTRY
301 - TARGET_WAITKIND_SYSCALL_RETURN */
302 enum target_waitkind syscall_state;
303
304 /* When stopped is set, the last wait status recorded for this lwp. */
305 int last_status;
306
307 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
308 this LWP's last event, to pass to GDB without any further
309 processing. This is used to store extended ptrace event
310 information or exit status until it can be reported to GDB. */
311 struct target_waitstatus waitstatus;
312
313 /* A pointer to the fork child/parent relative. Valid only while
314 the parent fork event is not reported to higher layers. Used to
315 avoid wildcard vCont actions resuming a fork child before GDB is
316 notified about the parent's fork event. */
317 struct lwp_info *fork_relative;
318
319 /* When stopped is set, this is where the lwp last stopped, with
320 decr_pc_after_break already accounted for. If the LWP is
321 running, this is the address at which the lwp was resumed. */
322 CORE_ADDR stop_pc;
323
324 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
325 been reported. */
326 int status_pending_p;
327 int status_pending;
328
329 /* The reason the LWP last stopped, if we need to track it
330 (breakpoint, watchpoint, etc.) */
331 enum target_stop_reason stop_reason;
332
333 /* On architectures where it is possible to know the data address of
334 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
335 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
336 is true. */
337 CORE_ADDR stopped_data_address;
338
339 /* If this is non-zero, it is a breakpoint to be reinserted at our next
340 stop (SIGTRAP stops only). */
341 CORE_ADDR bp_reinsert;
342
343 /* If this flag is set, the last continue operation at the ptrace
344 level on this process was a single-step. */
345 int stepping;
346
347 /* Range to single step within. This is a copy of the step range
348 passed along the last resume request. See 'struct
349 thread_resume'. */
350 CORE_ADDR step_range_start; /* Inclusive */
351 CORE_ADDR step_range_end; /* Exclusive */
352
353 /* If this flag is set, we need to set the event request flags the
354 next time we see this LWP stop. */
355 int must_set_ptrace_flags;
356
357 /* If this is non-zero, it points to a chain of signals which need to
358 be delivered to this process. */
359 struct pending_signals *pending_signals;
360
361 /* A link used when resuming. It is initialized from the resume request,
362 and then processed and cleared in linux_resume_one_lwp. */
363 struct thread_resume *resume;
364
365 /* Information bout this lwp's fast tracepoint collection status (is it
366 currently stopped in the jump pad, and if so, before or at/after the
367 relocated instruction). Normally, we won't care about this, but we will
368 if a signal arrives to this lwp while it is collecting. */
369 fast_tpoint_collect_result collecting_fast_tracepoint;
370
371 /* If this is non-zero, it points to a chain of signals which need
372 to be reported to GDB. These were deferred because the thread
373 was doing a fast tracepoint collect when they arrived. */
374 struct pending_signals *pending_signals_to_report;
375
376 /* When collecting_fast_tracepoint is first found to be 1, we insert
377 a exit-jump-pad-quickly breakpoint. This is it. */
378 struct breakpoint *exit_jump_pad_bkpt;
379
380 #ifdef USE_THREAD_DB
381 int thread_known;
382 /* The thread handle, used for e.g. TLS access. Only valid if
383 THREAD_KNOWN is set. */
384 td_thrhandle_t th;
385
386 /* The pthread_t handle. */
387 thread_t thread_handle;
388 #endif
389
390 /* Arch-specific additions. */
391 struct arch_lwp_info *arch_private;
392 };
393
394 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
395
396 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
397 errno). */
398 int linux_attach_lwp (ptid_t ptid);
399
400 struct lwp_info *find_lwp_pid (ptid_t ptid);
401 /* For linux_stop_lwp see nat/linux-nat.h. */
402
403 #ifdef HAVE_LINUX_REGSETS
404 void initialize_regsets_info (struct regsets_info *regsets_info);
405 #endif
406
407 void initialize_low_arch (void);
408
409 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
410 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
411
412 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
413 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
414
415 /* From thread-db.c */
416 int thread_db_init (void);
417 void thread_db_detach (struct process_info *);
418 void thread_db_mourn (struct process_info *);
419 int thread_db_handle_monitor_command (char *);
420 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
421 CORE_ADDR load_module, CORE_ADDR *address);
422 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
423
424 /* Called from linux-low.c when a clone event is detected. Upon entry,
425 both the clone and the parent should be stopped. This function does
426 whatever is required have the clone under thread_db's control. */
427
428 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
429
430 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
431
432 extern int have_ptrace_getregset;
This page took 0.049464 seconds and 4 git commands to generate.