gdbserver/linux-low: turn watchpoint ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
b811d2c2 2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
69f4c9cc
AH
74#ifndef AT_HWCAP2
75#define AT_HWCAP2 26
76#endif
77
db0dfaa0
LM
78/* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81#if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84#if defined(__mcoldfire__)
85/* These are still undefined in 3.10 kernels. */
86#define PT_TEXT_ADDR 49*4
87#define PT_DATA_ADDR 50*4
88#define PT_TEXT_END_ADDR 51*4
89/* BFIN already defines these since at least 2.6.32 kernels. */
90#elif defined(BFIN)
91#define PT_TEXT_ADDR 220
92#define PT_TEXT_END_ADDR 224
93#define PT_DATA_ADDR 228
94/* These are still undefined in 3.10 kernels. */
95#elif defined(__TMS320C6X__)
96#define PT_TEXT_ADDR (0x10000*4)
97#define PT_DATA_ADDR (0x10004*4)
98#define PT_TEXT_END_ADDR (0x10008*4)
99#endif
100#endif
101
5203ae1e
TBA
102#if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107#define SUPPORTS_READ_OFFSETS
108#endif
109
9accd112 110#ifdef HAVE_LINUX_BTRACE
125f8a3d 111# include "nat/linux-btrace.h"
268a13a5 112# include "gdbsupport/btrace-common.h"
9accd112
MM
113#endif
114
8365dcf5
TJB
115#ifndef HAVE_ELF32_AUXV_T
116/* Copied from glibc's elf.h. */
117typedef struct
118{
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127} Elf32_auxv_t;
128#endif
129
130#ifndef HAVE_ELF64_AUXV_T
131/* Copied from glibc's elf.h. */
132typedef struct
133{
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142} Elf64_auxv_t;
143#endif
144
ded48a5e
YQ
145/* Does the current host support PTRACE_GETREGSET? */
146int have_ptrace_getregset = -1;
147
cff068da
GB
148/* LWP accessors. */
149
150/* See nat/linux-nat.h. */
151
152ptid_t
153ptid_of_lwp (struct lwp_info *lwp)
154{
155 return ptid_of (get_lwp_thread (lwp));
156}
157
158/* See nat/linux-nat.h. */
159
4b134ca1
GB
160void
161lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163{
164 lwp->arch_private = info;
165}
166
167/* See nat/linux-nat.h. */
168
169struct arch_lwp_info *
170lwp_arch_private_info (struct lwp_info *lwp)
171{
172 return lwp->arch_private;
173}
174
175/* See nat/linux-nat.h. */
176
cff068da
GB
177int
178lwp_is_stopped (struct lwp_info *lwp)
179{
180 return lwp->stopped;
181}
182
183/* See nat/linux-nat.h. */
184
185enum target_stop_reason
186lwp_stop_reason (struct lwp_info *lwp)
187{
188 return lwp->stop_reason;
189}
190
0e00e962
AA
191/* See nat/linux-nat.h. */
192
193int
194lwp_is_stepping (struct lwp_info *lwp)
195{
196 return lwp->stepping;
197}
198
05044653
PA
199/* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
24a09b5f 202
05044653
PA
203struct simple_pid_list
204{
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
216/* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219static void
220add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221{
8d749320 222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228}
229
230static int
231pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232{
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246}
24a09b5f 247
bde24c0a
PA
248enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260/* This is set while stop_all_lwps is in effect. */
261enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
262
263/* FIXME make into a target method? */
24a09b5f 264int using_threads = 1;
24a09b5f 265
fa593d66
PA
266/* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268static int stabilizing_threads;
269
f50bf8e5 270static void unsuspend_all_lwps (struct lwp_info *except);
b3312d80 271static struct lwp_info *add_lwp (ptid_t ptid);
95954743 272static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 273static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 274static int finish_step_over (struct lwp_info *lwp);
d50171e4 275static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 276static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 277static int linux_low_ptrace_options (int attached);
ced2dffb 278static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 279
582511be
PA
280/* When the event-loop is doing a step-over, this points at the thread
281 being stepped. */
282ptid_t step_over_bkpt;
283
7d00775e 284/* True if the low target can hardware single-step. */
d50171e4
PA
285
286static int
287can_hardware_single_step (void)
288{
7d00775e
AT
289 if (the_low_target.supports_hardware_single_step != NULL)
290 return the_low_target.supports_hardware_single_step ();
291 else
292 return 0;
293}
294
bf9ae9d8
TBA
295bool
296linux_process_target::low_supports_breakpoints ()
297{
298 return false;
299}
d50171e4 300
bf9ae9d8
TBA
301CORE_ADDR
302linux_process_target::low_get_pc (regcache *regcache)
303{
304 return 0;
305}
306
307void
308linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 309{
bf9ae9d8 310 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 311}
0d62e5e8 312
7582c77c
TBA
313std::vector<CORE_ADDR>
314linux_process_target::low_get_next_pcs (regcache *regcache)
315{
316 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 "implemented");
318}
319
d4807ea2
TBA
320int
321linux_process_target::low_decr_pc_after_break ()
322{
323 return 0;
324}
325
fa593d66
PA
326/* Returns true if this target can support fast tracepoints. This
327 does not mean that the in-process agent has been loaded in the
328 inferior. */
329
330static int
331supports_fast_tracepoints (void)
332{
333 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
334}
335
c2d6af84
PA
336/* True if LWP is stopped in its stepping range. */
337
338static int
339lwp_in_step_range (struct lwp_info *lwp)
340{
341 CORE_ADDR pc = lwp->stop_pc;
342
343 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
344}
345
0d62e5e8
DJ
346struct pending_signals
347{
348 int signal;
32ca6d61 349 siginfo_t info;
0d62e5e8
DJ
350 struct pending_signals *prev;
351};
611cb4a5 352
bd99dc85
PA
353/* The read/write ends of the pipe registered as waitable file in the
354 event loop. */
355static int linux_event_pipe[2] = { -1, -1 };
356
357/* True if we're currently in async mode. */
358#define target_is_async_p() (linux_event_pipe[0] != -1)
359
02fc4de7 360static void send_sigstop (struct lwp_info *lwp);
bd99dc85 361
d0722149
DE
362/* Return non-zero if HEADER is a 64-bit ELF file. */
363
364static int
214d508e 365elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 366{
214d508e
L
367 if (header->e_ident[EI_MAG0] == ELFMAG0
368 && header->e_ident[EI_MAG1] == ELFMAG1
369 && header->e_ident[EI_MAG2] == ELFMAG2
370 && header->e_ident[EI_MAG3] == ELFMAG3)
371 {
372 *machine = header->e_machine;
373 return header->e_ident[EI_CLASS] == ELFCLASS64;
374
375 }
376 *machine = EM_NONE;
377 return -1;
d0722149
DE
378}
379
380/* Return non-zero if FILE is a 64-bit ELF file,
381 zero if the file is not a 64-bit ELF file,
382 and -1 if the file is not accessible or doesn't exist. */
383
be07f1a2 384static int
214d508e 385elf_64_file_p (const char *file, unsigned int *machine)
d0722149 386{
957f3f49 387 Elf64_Ehdr header;
d0722149
DE
388 int fd;
389
390 fd = open (file, O_RDONLY);
391 if (fd < 0)
392 return -1;
393
394 if (read (fd, &header, sizeof (header)) != sizeof (header))
395 {
396 close (fd);
397 return 0;
398 }
399 close (fd);
400
214d508e 401 return elf_64_header_p (&header, machine);
d0722149
DE
402}
403
be07f1a2
PA
404/* Accepts an integer PID; Returns true if the executable PID is
405 running is a 64-bit ELF file.. */
406
407int
214d508e 408linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 409{
d8d2a3ee 410 char file[PATH_MAX];
be07f1a2
PA
411
412 sprintf (file, "/proc/%d/exe", pid);
214d508e 413 return elf_64_file_p (file, machine);
be07f1a2
PA
414}
415
bd99dc85
PA
416static void
417delete_lwp (struct lwp_info *lwp)
418{
fa96cb38
PA
419 struct thread_info *thr = get_lwp_thread (lwp);
420
421 if (debug_threads)
422 debug_printf ("deleting %ld\n", lwpid_of (thr));
423
424 remove_thread (thr);
466eecee
SM
425
426 if (the_low_target.delete_thread != NULL)
427 the_low_target.delete_thread (lwp->arch_private);
428 else
429 gdb_assert (lwp->arch_private == NULL);
430
bd99dc85
PA
431 free (lwp);
432}
433
95954743
PA
434/* Add a process to the common process list, and set its private
435 data. */
436
437static struct process_info *
438linux_add_process (int pid, int attached)
439{
440 struct process_info *proc;
441
95954743 442 proc = add_process (pid, attached);
8d749320 443 proc->priv = XCNEW (struct process_info_private);
95954743 444
aa5ca48f 445 if (the_low_target.new_process != NULL)
fe978cb0 446 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 447
95954743
PA
448 return proc;
449}
450
797bcff5
TBA
451void
452linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
453{
454 struct thread_info *saved_thread;
455
456 saved_thread = current_thread;
457 current_thread = thread;
458
797bcff5 459 low_arch_setup ();
94585166
DB
460
461 current_thread = saved_thread;
462}
463
d16f3f6c
TBA
464int
465linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
466 int wstat)
24a09b5f 467{
c12a5089 468 client_state &cs = get_client_state ();
94585166 469 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 470 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 471 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 472 struct lwp_info *new_lwp;
24a09b5f 473
65706a29
PA
474 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
475
82075af2
JS
476 /* All extended events we currently use are mid-syscall. Only
477 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
478 you have to be using PTRACE_SEIZE to get that. */
479 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
480
c269dbdb
DB
481 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
482 || (event == PTRACE_EVENT_CLONE))
24a09b5f 483 {
95954743 484 ptid_t ptid;
24a09b5f 485 unsigned long new_pid;
05044653 486 int ret, status;
24a09b5f 487
de0d863e 488 /* Get the pid of the new lwp. */
d86d4aaf 489 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 490 &new_pid);
24a09b5f
DJ
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 493 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497
97438e3f 498 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
499
500 if (ret == -1)
501 perror_with_name ("waiting for new child");
502 else if (ret != new_pid)
503 warning ("wait returned unexpected PID %d", ret);
da5898ce 504 else if (!WIFSTOPPED (status))
24a09b5f
DJ
505 warning ("wait returned unexpected status 0x%x", status);
506 }
507
c269dbdb 508 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
509 {
510 struct process_info *parent_proc;
511 struct process_info *child_proc;
512 struct lwp_info *child_lwp;
bfacd19d 513 struct thread_info *child_thr;
de0d863e
DB
514 struct target_desc *tdesc;
515
fd79271b 516 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
517
518 if (debug_threads)
519 {
520 debug_printf ("HEW: Got fork event from LWP %ld, "
521 "new child is %d\n",
e38504b3 522 ptid_of (event_thr).lwp (),
e99b03dc 523 ptid.pid ());
de0d863e
DB
524 }
525
526 /* Add the new process to the tables and clone the breakpoint
527 lists of the parent. We need to do this even if the new process
528 will be detached, since we will need the process object and the
529 breakpoints to remove any breakpoints from memory when we
530 detach, and the client side will access registers. */
531 child_proc = linux_add_process (new_pid, 0);
532 gdb_assert (child_proc != NULL);
533 child_lwp = add_lwp (ptid);
534 gdb_assert (child_lwp != NULL);
535 child_lwp->stopped = 1;
bfacd19d
DB
536 child_lwp->must_set_ptrace_flags = 1;
537 child_lwp->status_pending_p = 0;
538 child_thr = get_lwp_thread (child_lwp);
539 child_thr->last_resume_kind = resume_stop;
998d452a
PA
540 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
541
863d01bd 542 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
543 too. If the fork/clone parent is stepping over a breakpoint,
544 all other threads have been suspended already. Leave the
545 child suspended too. */
546 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
547 || event_lwp->bp_reinsert != 0)
863d01bd
PA
548 {
549 if (debug_threads)
550 debug_printf ("HEW: leaving child suspended\n");
551 child_lwp->suspended = 1;
552 }
553
de0d863e
DB
554 parent_proc = get_thread_process (event_thr);
555 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
556
557 if (event_lwp->bp_reinsert != 0
7582c77c 558 && supports_software_single_step ()
2e7b624b
YQ
559 && event == PTRACE_EVENT_VFORK)
560 {
3b9a79ef
YQ
561 /* If we leave single-step breakpoints there, child will
562 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
563 (and child). Once vfork child is done, reinsert
564 them back to parent. */
3b9a79ef 565 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
566 }
567
63c40ec7 568 clone_all_breakpoints (child_thr, event_thr);
de0d863e 569
cc397f3a 570 tdesc = allocate_target_description ();
de0d863e
DB
571 copy_target_description (tdesc, parent_proc->tdesc);
572 child_proc->tdesc = tdesc;
de0d863e 573
3a8a0396
DB
574 /* Clone arch-specific process data. */
575 if (the_low_target.new_fork != NULL)
576 the_low_target.new_fork (parent_proc, child_proc);
577
de0d863e 578 /* Save fork info in the parent thread. */
c269dbdb
DB
579 if (event == PTRACE_EVENT_FORK)
580 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
581 else if (event == PTRACE_EVENT_VFORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
583
de0d863e 584 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 585
de0d863e
DB
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
5a04c4cf
PA
592 /* Link the threads until the parent event is passed on to
593 higher layers. */
594 event_lwp->fork_relative = child_lwp;
595 child_lwp->fork_relative = event_lwp;
596
3b9a79ef
YQ
597 /* If the parent thread is doing step-over with single-step
598 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
599 from the parent's. Remove them from the child process.
600 In case of vfork, we'll reinsert them back once vforked
601 child is done. */
8a81c5d7 602 if (event_lwp->bp_reinsert != 0
7582c77c 603 && supports_software_single_step ())
8a81c5d7 604 {
8a81c5d7
YQ
605 /* The child process is forked and stopped, so it is safe
606 to access its memory without stopping all other threads
607 from other processes. */
3b9a79ef 608 delete_single_step_breakpoints (child_thr);
8a81c5d7 609
3b9a79ef
YQ
610 gdb_assert (has_single_step_breakpoints (event_thr));
611 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
612 }
613
de0d863e
DB
614 /* Report the event. */
615 return 0;
616 }
617
fa96cb38
PA
618 if (debug_threads)
619 debug_printf ("HEW: Got clone event "
620 "from LWP %ld, new child is LWP %ld\n",
621 lwpid_of (event_thr), new_pid);
622
fd79271b 623 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 624 new_lwp = add_lwp (ptid);
24a09b5f 625
e27d73f6 626 /* Either we're going to immediately resume the new thread
df95181f 627 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 628 thinks the thread is currently running, so set this first
df95181f 629 before calling resume_one_lwp. */
e27d73f6
DE
630 new_lwp->stopped = 1;
631
0f8288ae
YQ
632 /* If we're suspending all threads, leave this one suspended
633 too. If the fork/clone parent is stepping over a breakpoint,
634 all other threads have been suspended already. Leave the
635 child suspended too. */
636 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
637 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
638 new_lwp->suspended = 1;
639
da5898ce
DJ
640 /* Normally we will get the pending SIGSTOP. But in some cases
641 we might get another signal delivered to the group first.
f21cc1a2 642 If we do get another signal, be sure not to lose it. */
20ba1ce6 643 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 644 {
54a0b537 645 new_lwp->stop_expected = 1;
20ba1ce6
PA
646 new_lwp->status_pending_p = 1;
647 new_lwp->status_pending = status;
da5898ce 648 }
c12a5089 649 else if (cs.report_thread_events)
65706a29
PA
650 {
651 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
652 new_lwp->status_pending_p = 1;
653 new_lwp->status_pending = status;
654 }
de0d863e 655
a0aad537 656#ifdef USE_THREAD_DB
94c207e0 657 thread_db_notice_clone (event_thr, ptid);
a0aad537 658#endif
86299109 659
de0d863e
DB
660 /* Don't report the event. */
661 return 1;
24a09b5f 662 }
c269dbdb
DB
663 else if (event == PTRACE_EVENT_VFORK_DONE)
664 {
665 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
666
7582c77c 667 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 668 {
3b9a79ef 669 reinsert_single_step_breakpoints (event_thr);
2e7b624b 670
3b9a79ef 671 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
672 }
673
c269dbdb
DB
674 /* Report the event. */
675 return 0;
676 }
c12a5089 677 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
678 {
679 struct process_info *proc;
f27866ba 680 std::vector<int> syscalls_to_catch;
94585166
DB
681 ptid_t event_ptid;
682 pid_t event_pid;
683
684 if (debug_threads)
685 {
686 debug_printf ("HEW: Got exec event from LWP %ld\n",
687 lwpid_of (event_thr));
688 }
689
690 /* Get the event ptid. */
691 event_ptid = ptid_of (event_thr);
e99b03dc 692 event_pid = event_ptid.pid ();
94585166 693
82075af2 694 /* Save the syscall list from the execing process. */
94585166 695 proc = get_thread_process (event_thr);
f27866ba 696 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
697
698 /* Delete the execing process and all its threads. */
d16f3f6c 699 mourn (proc);
94585166
DB
700 current_thread = NULL;
701
702 /* Create a new process/lwp/thread. */
703 proc = linux_add_process (event_pid, 0);
704 event_lwp = add_lwp (event_ptid);
705 event_thr = get_lwp_thread (event_lwp);
706 gdb_assert (current_thread == event_thr);
797bcff5 707 arch_setup_thread (event_thr);
94585166
DB
708
709 /* Set the event status. */
710 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
711 event_lwp->waitstatus.value.execd_pathname
712 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
713
714 /* Mark the exec status as pending. */
715 event_lwp->stopped = 1;
716 event_lwp->status_pending_p = 1;
717 event_lwp->status_pending = wstat;
718 event_thr->last_resume_kind = resume_continue;
719 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
720
82075af2
JS
721 /* Update syscall state in the new lwp, effectively mid-syscall too. */
722 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
723
724 /* Restore the list to catch. Don't rely on the client, which is free
725 to avoid sending a new list when the architecture doesn't change.
726 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 727 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 728
94585166
DB
729 /* Report the event. */
730 *orig_event_lwp = event_lwp;
731 return 0;
732 }
de0d863e
DB
733
734 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
735}
736
df95181f
TBA
737CORE_ADDR
738linux_process_target::get_pc (lwp_info *lwp)
d50171e4 739{
0bfdf32f 740 struct thread_info *saved_thread;
d50171e4
PA
741 struct regcache *regcache;
742 CORE_ADDR pc;
743
bf9ae9d8 744 if (!low_supports_breakpoints ())
d50171e4
PA
745 return 0;
746
0bfdf32f
GB
747 saved_thread = current_thread;
748 current_thread = get_lwp_thread (lwp);
d50171e4 749
0bfdf32f 750 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 751 pc = low_get_pc (regcache);
d50171e4
PA
752
753 if (debug_threads)
87ce2a04 754 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 755
0bfdf32f 756 current_thread = saved_thread;
d50171e4
PA
757 return pc;
758}
759
82075af2 760/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 761 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
762
763static void
4cc32bec 764get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
765{
766 struct thread_info *saved_thread;
767 struct regcache *regcache;
768
769 if (the_low_target.get_syscall_trapinfo == NULL)
770 {
771 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 772 system call number. */
82075af2 773 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
774 return;
775 }
776
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
779
780 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 781 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
782
783 if (debug_threads)
4cc32bec 784 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
785
786 current_thread = saved_thread;
787}
788
df95181f
TBA
789bool
790linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 791{
582511be
PA
792 CORE_ADDR pc;
793 CORE_ADDR sw_breakpoint_pc;
794 struct thread_info *saved_thread;
3e572f71
PA
795#if USE_SIGTRAP_SIGINFO
796 siginfo_t siginfo;
797#endif
d50171e4 798
bf9ae9d8 799 if (!low_supports_breakpoints ())
df95181f 800 return false;
0d62e5e8 801
582511be 802 pc = get_pc (lwp);
d4807ea2 803 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 804
582511be
PA
805 /* breakpoint_at reads from the current thread. */
806 saved_thread = current_thread;
807 current_thread = get_lwp_thread (lwp);
47c0c975 808
3e572f71
PA
809#if USE_SIGTRAP_SIGINFO
810 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
811 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
812 {
813 if (siginfo.si_signo == SIGTRAP)
814 {
e7ad2f14
PA
815 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
816 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 817 {
e7ad2f14
PA
818 /* The si_code is ambiguous on this arch -- check debug
819 registers. */
820 if (!check_stopped_by_watchpoint (lwp))
821 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
822 }
823 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
824 {
825 /* If we determine the LWP stopped for a SW breakpoint,
826 trust it. Particularly don't check watchpoint
827 registers, because at least on s390, we'd find
828 stopped-by-watchpoint as long as there's a watchpoint
829 set. */
3e572f71 830 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 831 }
e7ad2f14 832 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 833 {
e7ad2f14
PA
834 /* This can indicate either a hardware breakpoint or
835 hardware watchpoint. Check debug registers. */
836 if (!check_stopped_by_watchpoint (lwp))
837 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 838 }
2bf6fb9d
PA
839 else if (siginfo.si_code == TRAP_TRACE)
840 {
e7ad2f14
PA
841 /* We may have single stepped an instruction that
842 triggered a watchpoint. In that case, on some
843 architectures (such as x86), instead of TRAP_HWBKPT,
844 si_code indicates TRAP_TRACE, and we need to check
845 the debug registers separately. */
846 if (!check_stopped_by_watchpoint (lwp))
847 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 848 }
3e572f71
PA
849 }
850 }
851#else
582511be
PA
852 /* We may have just stepped a breakpoint instruction. E.g., in
853 non-stop mode, GDB first tells the thread A to step a range, and
854 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
855 case we need to report the breakpoint PC. */
856 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 857 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
858 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
859
860 if (hardware_breakpoint_inserted_here (pc))
861 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
862
863 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
864 check_stopped_by_watchpoint (lwp);
865#endif
866
867 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
868 {
869 if (debug_threads)
870 {
871 struct thread_info *thr = get_lwp_thread (lwp);
872
873 debug_printf ("CSBB: %s stopped by software breakpoint\n",
874 target_pid_to_str (ptid_of (thr)));
875 }
876
877 /* Back up the PC if necessary. */
878 if (pc != sw_breakpoint_pc)
e7ad2f14 879 {
582511be
PA
880 struct regcache *regcache
881 = get_thread_regcache (current_thread, 1);
bf9ae9d8 882 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
883 }
884
e7ad2f14
PA
885 /* Update this so we record the correct stop PC below. */
886 pc = sw_breakpoint_pc;
582511be 887 }
e7ad2f14 888 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
e7ad2f14
PA
897 }
898 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
899 {
900 if (debug_threads)
901 {
902 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 903
e7ad2f14
PA
904 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
905 target_pid_to_str (ptid_of (thr)));
906 }
582511be 907 }
e7ad2f14
PA
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
909 {
910 if (debug_threads)
911 {
912 struct thread_info *thr = get_lwp_thread (lwp);
582511be 913
e7ad2f14
PA
914 debug_printf ("CSBB: %s stopped by trace\n",
915 target_pid_to_str (ptid_of (thr)));
916 }
917 }
918
919 lwp->stop_pc = pc;
582511be 920 current_thread = saved_thread;
df95181f 921 return true;
0d62e5e8 922}
ce3a066d 923
b3312d80 924static struct lwp_info *
95954743 925add_lwp (ptid_t ptid)
611cb4a5 926{
54a0b537 927 struct lwp_info *lwp;
0d62e5e8 928
8d749320 929 lwp = XCNEW (struct lwp_info);
00db26fa
PA
930
931 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 932
754e3168
AH
933 lwp->thread = add_thread (ptid, lwp);
934
aa5ca48f 935 if (the_low_target.new_thread != NULL)
34c703da 936 the_low_target.new_thread (lwp);
aa5ca48f 937
54a0b537 938 return lwp;
0d62e5e8 939}
611cb4a5 940
2090129c
SDJ
941/* Callback to be used when calling fork_inferior, responsible for
942 actually initiating the tracing of the inferior. */
943
944static void
945linux_ptrace_fun ()
946{
947 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
948 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 949 trace_start_error_with_name ("ptrace");
2090129c
SDJ
950
951 if (setpgid (0, 0) < 0)
952 trace_start_error_with_name ("setpgid");
953
954 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
955 stdout to stderr so that inferior i/o doesn't corrupt the connection.
956 Also, redirect stdin to /dev/null. */
957 if (remote_connection_is_stdio ())
958 {
959 if (close (0) < 0)
960 trace_start_error_with_name ("close");
961 if (open ("/dev/null", O_RDONLY) < 0)
962 trace_start_error_with_name ("open");
963 if (dup2 (2, 1) < 0)
964 trace_start_error_with_name ("dup2");
965 if (write (2, "stdin/stdout redirected\n",
966 sizeof ("stdin/stdout redirected\n") - 1) < 0)
967 {
968 /* Errors ignored. */;
969 }
970 }
971}
972
da6d8c04 973/* Start an inferior process and returns its pid.
2090129c
SDJ
974 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
975 are its arguments. */
da6d8c04 976
15295543
TBA
977int
978linux_process_target::create_inferior (const char *program,
979 const std::vector<char *> &program_args)
da6d8c04 980{
c12a5089 981 client_state &cs = get_client_state ();
a6dbe5df 982 struct lwp_info *new_lwp;
da6d8c04 983 int pid;
95954743 984 ptid_t ptid;
03583c20 985
41272101
TT
986 {
987 maybe_disable_address_space_randomization restore_personality
c12a5089 988 (cs.disable_randomization);
41272101
TT
989 std::string str_program_args = stringify_argv (program_args);
990
991 pid = fork_inferior (program,
992 str_program_args.c_str (),
993 get_environ ()->envp (), linux_ptrace_fun,
994 NULL, NULL, NULL, NULL);
995 }
03583c20 996
55d7b841 997 linux_add_process (pid, 0);
95954743 998
fd79271b 999 ptid = ptid_t (pid, pid, 0);
95954743 1000 new_lwp = add_lwp (ptid);
a6dbe5df 1001 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1002
2090129c
SDJ
1003 post_fork_inferior (pid, program);
1004
a9fa9f7d 1005 return pid;
da6d8c04
DJ
1006}
1007
ece66d65
JS
1008/* Implement the post_create_inferior target_ops method. */
1009
6dee9afb
TBA
1010void
1011linux_process_target::post_create_inferior ()
ece66d65
JS
1012{
1013 struct lwp_info *lwp = get_thread_lwp (current_thread);
1014
797bcff5 1015 low_arch_setup ();
ece66d65
JS
1016
1017 if (lwp->must_set_ptrace_flags)
1018 {
1019 struct process_info *proc = current_process ();
1020 int options = linux_low_ptrace_options (proc->attached);
1021
1022 linux_enable_event_reporting (lwpid_of (current_thread), options);
1023 lwp->must_set_ptrace_flags = 0;
1024 }
1025}
1026
8784d563
PA
1027/* Attach to an inferior process. Returns 0 on success, ERRNO on
1028 error. */
da6d8c04 1029
7ae1a6a6
PA
1030int
1031linux_attach_lwp (ptid_t ptid)
da6d8c04 1032{
54a0b537 1033 struct lwp_info *new_lwp;
e38504b3 1034 int lwpid = ptid.lwp ();
611cb4a5 1035
b8e1b30e 1036 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1037 != 0)
7ae1a6a6 1038 return errno;
24a09b5f 1039
b3312d80 1040 new_lwp = add_lwp (ptid);
0d62e5e8 1041
a6dbe5df
PA
1042 /* We need to wait for SIGSTOP before being able to make the next
1043 ptrace call on this LWP. */
1044 new_lwp->must_set_ptrace_flags = 1;
1045
644cebc9 1046 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1047 {
1048 if (debug_threads)
87ce2a04 1049 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1050
1051 /* The process is definitely stopped. It is in a job control
1052 stop, unless the kernel predates the TASK_STOPPED /
1053 TASK_TRACED distinction, in which case it might be in a
1054 ptrace stop. Make sure it is in a ptrace stop; from there we
1055 can kill it, signal it, et cetera.
1056
1057 First make sure there is a pending SIGSTOP. Since we are
1058 already attached, the process can not transition from stopped
1059 to running without a PTRACE_CONT; so we know this signal will
1060 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1061 probably already in the queue (unless this kernel is old
1062 enough to use TASK_STOPPED for ptrace stops); but since
1063 SIGSTOP is not an RT signal, it can only be queued once. */
1064 kill_lwp (lwpid, SIGSTOP);
1065
1066 /* Finally, resume the stopped process. This will deliver the
1067 SIGSTOP (or a higher priority signal, just like normal
1068 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1069 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1070 }
1071
0d62e5e8 1072 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1073 brings it to a halt.
1074
1075 There are several cases to consider here:
1076
1077 1) gdbserver has already attached to the process and is being notified
1b3f6016 1078 of a new thread that is being created.
d50171e4
PA
1079 In this case we should ignore that SIGSTOP and resume the
1080 process. This is handled below by setting stop_expected = 1,
8336d594 1081 and the fact that add_thread sets last_resume_kind ==
d50171e4 1082 resume_continue.
0e21c1ec
DE
1083
1084 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1085 to it via attach_inferior.
1086 In this case we want the process thread to stop.
d50171e4
PA
1087 This is handled by having linux_attach set last_resume_kind ==
1088 resume_stop after we return.
e3deef73
LM
1089
1090 If the pid we are attaching to is also the tgid, we attach to and
1091 stop all the existing threads. Otherwise, we attach to pid and
1092 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1093
1094 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1095 existing threads.
1096 In this case we want the thread to stop.
1097 FIXME: This case is currently not properly handled.
1098 We should wait for the SIGSTOP but don't. Things work apparently
1099 because enough time passes between when we ptrace (ATTACH) and when
1100 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1101
1102 On the other hand, if we are currently trying to stop all threads, we
1103 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1104 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1105 end of the list, and so the new thread has not yet reached
1106 wait_for_sigstop (but will). */
d50171e4 1107 new_lwp->stop_expected = 1;
0d62e5e8 1108
7ae1a6a6 1109 return 0;
95954743
PA
1110}
1111
8784d563
PA
1112/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1113 already attached. Returns true if a new LWP is found, false
1114 otherwise. */
1115
1116static int
1117attach_proc_task_lwp_callback (ptid_t ptid)
1118{
1119 /* Is this a new thread? */
1120 if (find_thread_ptid (ptid) == NULL)
1121 {
e38504b3 1122 int lwpid = ptid.lwp ();
8784d563
PA
1123 int err;
1124
1125 if (debug_threads)
1126 debug_printf ("Found new lwp %d\n", lwpid);
1127
1128 err = linux_attach_lwp (ptid);
1129
1130 /* Be quiet if we simply raced with the thread exiting. EPERM
1131 is returned if the thread's task still exists, and is marked
1132 as exited or zombie, as well as other conditions, so in that
1133 case, confirm the status in /proc/PID/status. */
1134 if (err == ESRCH
1135 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1136 {
1137 if (debug_threads)
1138 {
1139 debug_printf ("Cannot attach to lwp %d: "
1140 "thread is gone (%d: %s)\n",
6d91ce9a 1141 lwpid, err, safe_strerror (err));
8784d563
PA
1142 }
1143 }
1144 else if (err != 0)
1145 {
4d9b86e1 1146 std::string reason
50fa3001 1147 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1148
1149 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1150 }
1151
1152 return 1;
1153 }
1154 return 0;
1155}
1156
500c1d85
PA
1157static void async_file_mark (void);
1158
e3deef73
LM
1159/* Attach to PID. If PID is the tgid, attach to it and all
1160 of its threads. */
1161
ef03dad8
TBA
1162int
1163linux_process_target::attach (unsigned long pid)
0d62e5e8 1164{
500c1d85
PA
1165 struct process_info *proc;
1166 struct thread_info *initial_thread;
fd79271b 1167 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1168 int err;
1169
df0da8a2
AH
1170 proc = linux_add_process (pid, 1);
1171
e3deef73
LM
1172 /* Attach to PID. We will check for other threads
1173 soon. */
7ae1a6a6
PA
1174 err = linux_attach_lwp (ptid);
1175 if (err != 0)
4d9b86e1 1176 {
df0da8a2 1177 remove_process (proc);
4d9b86e1 1178
50fa3001
SDJ
1179 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1180 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1181 }
7ae1a6a6 1182
500c1d85
PA
1183 /* Don't ignore the initial SIGSTOP if we just attached to this
1184 process. It will be collected by wait shortly. */
fd79271b 1185 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1186 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1187
8784d563
PA
1188 /* We must attach to every LWP. If /proc is mounted, use that to
1189 find them now. On the one hand, the inferior may be using raw
1190 clone instead of using pthreads. On the other hand, even if it
1191 is using pthreads, GDB may not be connected yet (thread_db needs
1192 to do symbol lookups, through qSymbol). Also, thread_db walks
1193 structures in the inferior's address space to find the list of
1194 threads/LWPs, and those structures may well be corrupted. Note
1195 that once thread_db is loaded, we'll still use it to list threads
1196 and associate pthread info with each LWP. */
1197 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1198
1199 /* GDB will shortly read the xml target description for this
1200 process, to figure out the process' architecture. But the target
1201 description is only filled in when the first process/thread in
1202 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1203 that now, otherwise, if GDB is fast enough, it could read the
1204 target description _before_ that initial stop. */
1205 if (non_stop)
1206 {
1207 struct lwp_info *lwp;
1208 int wstat, lwpid;
f2907e49 1209 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1210
d16f3f6c 1211 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1212 gdb_assert (lwpid > 0);
1213
f2907e49 1214 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1215
1216 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1217 {
1218 lwp->status_pending_p = 1;
1219 lwp->status_pending = wstat;
1220 }
1221
1222 initial_thread->last_resume_kind = resume_continue;
1223
1224 async_file_mark ();
1225
1226 gdb_assert (proc->tdesc != NULL);
1227 }
1228
95954743
PA
1229 return 0;
1230}
1231
95954743 1232static int
e4eb0dec 1233last_thread_of_process_p (int pid)
95954743 1234{
e4eb0dec 1235 bool seen_one = false;
95954743 1236
da4ae14a 1237 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1238 {
e4eb0dec
SM
1239 if (!seen_one)
1240 {
1241 /* This is the first thread of this process we see. */
1242 seen_one = true;
1243 return false;
1244 }
1245 else
1246 {
1247 /* This is the second thread of this process we see. */
1248 return true;
1249 }
1250 });
da6d8c04 1251
e4eb0dec 1252 return thread == NULL;
95954743
PA
1253}
1254
da84f473
PA
1255/* Kill LWP. */
1256
1257static void
1258linux_kill_one_lwp (struct lwp_info *lwp)
1259{
d86d4aaf
DE
1260 struct thread_info *thr = get_lwp_thread (lwp);
1261 int pid = lwpid_of (thr);
da84f473
PA
1262
1263 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1264 there is no signal context, and ptrace(PTRACE_KILL) (or
1265 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1266 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1267 alternative is to kill with SIGKILL. We only need one SIGKILL
1268 per process, not one for each thread. But since we still support
4a6ed09b
PA
1269 support debugging programs using raw clone without CLONE_THREAD,
1270 we send one for each thread. For years, we used PTRACE_KILL
1271 only, so we're being a bit paranoid about some old kernels where
1272 PTRACE_KILL might work better (dubious if there are any such, but
1273 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1274 second, and so we're fine everywhere. */
da84f473
PA
1275
1276 errno = 0;
69ff6be5 1277 kill_lwp (pid, SIGKILL);
da84f473 1278 if (debug_threads)
ce9e3fe7
PA
1279 {
1280 int save_errno = errno;
1281
1282 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1283 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1284 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1285 }
da84f473
PA
1286
1287 errno = 0;
b8e1b30e 1288 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1289 if (debug_threads)
ce9e3fe7
PA
1290 {
1291 int save_errno = errno;
1292
1293 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1294 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1295 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1296 }
da84f473
PA
1297}
1298
e76126e8
PA
1299/* Kill LWP and wait for it to die. */
1300
1301static void
1302kill_wait_lwp (struct lwp_info *lwp)
1303{
1304 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1305 int pid = ptid_of (thr).pid ();
e38504b3 1306 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1307 int wstat;
1308 int res;
1309
1310 if (debug_threads)
1311 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1312
1313 do
1314 {
1315 linux_kill_one_lwp (lwp);
1316
1317 /* Make sure it died. Notes:
1318
1319 - The loop is most likely unnecessary.
1320
d16f3f6c 1321 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1322 while we're iterating over them. We're not interested in
1323 any pending status at this point, only in making sure all
1324 wait status on the kernel side are collected until the
1325 process is reaped.
1326
1327 - We don't use __WALL here as the __WALL emulation relies on
1328 SIGCHLD, and killing a stopped process doesn't generate
1329 one, nor an exit status.
1330 */
1331 res = my_waitpid (lwpid, &wstat, 0);
1332 if (res == -1 && errno == ECHILD)
1333 res = my_waitpid (lwpid, &wstat, __WCLONE);
1334 } while (res > 0 && WIFSTOPPED (wstat));
1335
586b02a9
PA
1336 /* Even if it was stopped, the child may have already disappeared.
1337 E.g., if it was killed by SIGKILL. */
1338 if (res < 0 && errno != ECHILD)
1339 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1340}
1341
578290ec 1342/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1343 except the leader. */
95954743 1344
578290ec
SM
1345static void
1346kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1347{
54a0b537 1348 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1349
fd500816
DJ
1350 /* We avoid killing the first thread here, because of a Linux kernel (at
1351 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1352 the children get a chance to be reaped, it will remain a zombie
1353 forever. */
95954743 1354
d86d4aaf 1355 if (lwpid_of (thread) == pid)
95954743
PA
1356 {
1357 if (debug_threads)
87ce2a04 1358 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1359 target_pid_to_str (thread->id));
578290ec 1360 return;
95954743 1361 }
fd500816 1362
e76126e8 1363 kill_wait_lwp (lwp);
da6d8c04
DJ
1364}
1365
c6885a57
TBA
1366int
1367linux_process_target::kill (process_info *process)
0d62e5e8 1368{
a780ef4f 1369 int pid = process->pid;
9d606399 1370
f9e39928
PA
1371 /* If we're killing a running inferior, make sure it is stopped
1372 first, as PTRACE_KILL will not work otherwise. */
7984d532 1373 stop_all_lwps (0, NULL);
f9e39928 1374
578290ec
SM
1375 for_each_thread (pid, [&] (thread_info *thread)
1376 {
1377 kill_one_lwp_callback (thread, pid);
1378 });
fd500816 1379
54a0b537 1380 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1381 thread in the list, so do so now. */
a780ef4f 1382 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1383
784867a5 1384 if (lwp == NULL)
fd500816 1385 {
784867a5 1386 if (debug_threads)
d86d4aaf
DE
1387 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1388 pid);
784867a5
JK
1389 }
1390 else
e76126e8 1391 kill_wait_lwp (lwp);
2d717e4f 1392
8adb37b9 1393 mourn (process);
f9e39928
PA
1394
1395 /* Since we presently can only stop all lwps of all processes, we
1396 need to unstop lwps of other processes. */
7984d532 1397 unstop_all_lwps (0, NULL);
95954743 1398 return 0;
0d62e5e8
DJ
1399}
1400
9b224c5e
PA
1401/* Get pending signal of THREAD, for detaching purposes. This is the
1402 signal the thread last stopped for, which we need to deliver to the
1403 thread when detaching, otherwise, it'd be suppressed/lost. */
1404
1405static int
1406get_detach_signal (struct thread_info *thread)
1407{
c12a5089 1408 client_state &cs = get_client_state ();
a493e3e2 1409 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1410 int status;
1411 struct lwp_info *lp = get_thread_lwp (thread);
1412
1413 if (lp->status_pending_p)
1414 status = lp->status_pending;
1415 else
1416 {
1417 /* If the thread had been suspended by gdbserver, and it stopped
1418 cleanly, then it'll have stopped with SIGSTOP. But we don't
1419 want to deliver that SIGSTOP. */
1420 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1421 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1422 return 0;
1423
1424 /* Otherwise, we may need to deliver the signal we
1425 intercepted. */
1426 status = lp->last_status;
1427 }
1428
1429 if (!WIFSTOPPED (status))
1430 {
1431 if (debug_threads)
87ce2a04 1432 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1433 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1434 return 0;
1435 }
1436
1437 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1438 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1439 {
1440 if (debug_threads)
87ce2a04
DE
1441 debug_printf ("GPS: lwp %s had stopped with extended "
1442 "status: no pending signal\n",
d86d4aaf 1443 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1444 return 0;
1445 }
1446
2ea28649 1447 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1448
c12a5089 1449 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1450 {
1451 if (debug_threads)
87ce2a04 1452 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1453 target_pid_to_str (ptid_of (thread)),
87ce2a04 1454 gdb_signal_to_string (signo));
9b224c5e
PA
1455 return 0;
1456 }
c12a5089 1457 else if (!cs.program_signals_p
9b224c5e
PA
1458 /* If we have no way to know which signals GDB does not
1459 want to have passed to the program, assume
1460 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1461 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1462 {
1463 if (debug_threads)
87ce2a04
DE
1464 debug_printf ("GPS: lwp %s had signal %s, "
1465 "but we don't know if we should pass it. "
1466 "Default to not.\n",
d86d4aaf 1467 target_pid_to_str (ptid_of (thread)),
87ce2a04 1468 gdb_signal_to_string (signo));
9b224c5e
PA
1469 return 0;
1470 }
1471 else
1472 {
1473 if (debug_threads)
87ce2a04 1474 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1475 target_pid_to_str (ptid_of (thread)),
87ce2a04 1476 gdb_signal_to_string (signo));
9b224c5e
PA
1477
1478 return WSTOPSIG (status);
1479 }
1480}
1481
ced2dffb
PA
1482/* Detach from LWP. */
1483
1484static void
1485linux_detach_one_lwp (struct lwp_info *lwp)
6ad8ae5c 1486{
ced2dffb 1487 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1488 int sig;
ced2dffb 1489 int lwpid;
6ad8ae5c 1490
9b224c5e 1491 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1492 if (lwp->stop_expected)
ae13219e 1493 {
9b224c5e 1494 if (debug_threads)
87ce2a04 1495 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1496 target_pid_to_str (ptid_of (thread)));
9b224c5e 1497
d86d4aaf 1498 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1499 lwp->stop_expected = 0;
ae13219e
DJ
1500 }
1501
9b224c5e
PA
1502 /* Pass on any pending signal for this thread. */
1503 sig = get_detach_signal (thread);
1504
ced2dffb
PA
1505 /* Preparing to resume may try to write registers, and fail if the
1506 lwp is zombie. If that happens, ignore the error. We'll handle
1507 it below, when detach fails with ESRCH. */
a70b8144 1508 try
ced2dffb
PA
1509 {
1510 /* Flush any pending changes to the process's registers. */
1511 regcache_invalidate_thread (thread);
1512
1513 /* Finally, let it resume. */
1514 if (the_low_target.prepare_to_resume != NULL)
1515 the_low_target.prepare_to_resume (lwp);
1516 }
230d2906 1517 catch (const gdb_exception_error &ex)
ced2dffb
PA
1518 {
1519 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1520 throw;
ced2dffb 1521 }
ced2dffb
PA
1522
1523 lwpid = lwpid_of (thread);
1524 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1525 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1526 {
1527 int save_errno = errno;
1528
1529 /* We know the thread exists, so ESRCH must mean the lwp is
1530 zombie. This can happen if one of the already-detached
1531 threads exits the whole thread group. In that case we're
1532 still attached, and must reap the lwp. */
1533 if (save_errno == ESRCH)
1534 {
1535 int ret, status;
1536
1537 ret = my_waitpid (lwpid, &status, __WALL);
1538 if (ret == -1)
1539 {
1540 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1541 lwpid, safe_strerror (errno));
ced2dffb
PA
1542 }
1543 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1544 {
1545 warning (_("Reaping LWP %d while detaching "
1546 "returned unexpected status 0x%x"),
1547 lwpid, status);
1548 }
1549 }
1550 else
1551 {
1552 error (_("Can't detach %s: %s"),
1553 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1554 safe_strerror (save_errno));
ced2dffb
PA
1555 }
1556 }
1557 else if (debug_threads)
1558 {
1559 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1560 target_pid_to_str (ptid_of (thread)),
1561 strsignal (sig));
1562 }
bd99dc85
PA
1563
1564 delete_lwp (lwp);
ced2dffb
PA
1565}
1566
798a38e8 1567/* Callback for for_each_thread. Detaches from non-leader threads of a
ced2dffb
PA
1568 given process. */
1569
798a38e8
SM
1570static void
1571linux_detach_lwp_callback (thread_info *thread)
ced2dffb 1572{
ced2dffb
PA
1573 /* We don't actually detach from the thread group leader just yet.
1574 If the thread group exits, we must reap the zombie clone lwps
1575 before we're able to reap the leader. */
798a38e8
SM
1576 if (thread->id.pid () == thread->id.lwp ())
1577 return;
ced2dffb 1578
798a38e8 1579 lwp_info *lwp = get_thread_lwp (thread);
ced2dffb 1580 linux_detach_one_lwp (lwp);
6ad8ae5c
DJ
1581}
1582
9061c9cf
TBA
1583int
1584linux_process_target::detach (process_info *process)
95954743 1585{
ced2dffb 1586 struct lwp_info *main_lwp;
95954743 1587
863d01bd
PA
1588 /* As there's a step over already in progress, let it finish first,
1589 otherwise nesting a stabilize_threads operation on top gets real
1590 messy. */
1591 complete_ongoing_step_over ();
1592
f9e39928 1593 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1594 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1595 may need to uninstall thread event breakpoints from memory, which
1596 only works with a stopped process anyway. */
7984d532 1597 stop_all_lwps (0, NULL);
f9e39928 1598
ca5c370d 1599#ifdef USE_THREAD_DB
8336d594 1600 thread_db_detach (process);
ca5c370d
PA
1601#endif
1602
fa593d66 1603 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1604 target_stabilize_threads ();
fa593d66 1605
ced2dffb
PA
1606 /* Detach from the clone lwps first. If the thread group exits just
1607 while we're detaching, we must reap the clone lwps before we're
1608 able to reap the leader. */
ef2ddb33 1609 for_each_thread (process->pid, linux_detach_lwp_callback);
ced2dffb 1610
ef2ddb33 1611 main_lwp = find_lwp_pid (ptid_t (process->pid));
ced2dffb 1612 linux_detach_one_lwp (main_lwp);
8336d594 1613
8adb37b9 1614 mourn (process);
f9e39928
PA
1615
1616 /* Since we presently can only stop all lwps of all processes, we
1617 need to unstop lwps of other processes. */
7984d532 1618 unstop_all_lwps (0, NULL);
f9e39928
PA
1619 return 0;
1620}
1621
1622/* Remove all LWPs that belong to process PROC from the lwp list. */
1623
8adb37b9
TBA
1624void
1625linux_process_target::mourn (process_info *process)
8336d594
PA
1626{
1627 struct process_info_private *priv;
1628
1629#ifdef USE_THREAD_DB
1630 thread_db_mourn (process);
1631#endif
1632
6b2a85da
SM
1633 for_each_thread (process->pid, [] (thread_info *thread)
1634 {
1635 delete_lwp (get_thread_lwp (thread));
1636 });
f9e39928 1637
8336d594 1638 /* Freeing all private data. */
fe978cb0 1639 priv = process->priv;
04ec7890
SM
1640 if (the_low_target.delete_process != NULL)
1641 the_low_target.delete_process (priv->arch_private);
1642 else
1643 gdb_assert (priv->arch_private == NULL);
8336d594 1644 free (priv);
fe978cb0 1645 process->priv = NULL;
505106cd
PA
1646
1647 remove_process (process);
8336d594
PA
1648}
1649
95a49a39
TBA
1650void
1651linux_process_target::join (int pid)
444d6139 1652{
444d6139
PA
1653 int status, ret;
1654
1655 do {
d105de22 1656 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1657 if (WIFEXITED (status) || WIFSIGNALED (status))
1658 break;
1659 } while (ret != -1 || errno != ECHILD);
1660}
1661
13d3d99b
TBA
1662/* Return true if the given thread is still alive. */
1663
1664bool
1665linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1666{
95954743
PA
1667 struct lwp_info *lwp = find_lwp_pid (ptid);
1668
1669 /* We assume we always know if a thread exits. If a whole process
1670 exited but we still haven't been able to report it to GDB, we'll
1671 hold on to the last lwp of the dead process. */
1672 if (lwp != NULL)
00db26fa 1673 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1674 else
1675 return 0;
1676}
1677
df95181f
TBA
1678bool
1679linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1680{
1681 struct lwp_info *lp = get_thread_lwp (thread);
1682
1683 if (!lp->status_pending_p)
1684 return 0;
1685
582511be 1686 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1687 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1688 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1689 {
1690 struct thread_info *saved_thread;
1691 CORE_ADDR pc;
1692 int discard = 0;
1693
1694 gdb_assert (lp->last_status != 0);
1695
1696 pc = get_pc (lp);
1697
1698 saved_thread = current_thread;
1699 current_thread = thread;
1700
1701 if (pc != lp->stop_pc)
1702 {
1703 if (debug_threads)
1704 debug_printf ("PC of %ld changed\n",
1705 lwpid_of (thread));
1706 discard = 1;
1707 }
3e572f71
PA
1708
1709#if !USE_SIGTRAP_SIGINFO
15c66dd6 1710 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1711 && !low_breakpoint_at (pc))
582511be
PA
1712 {
1713 if (debug_threads)
1714 debug_printf ("previous SW breakpoint of %ld gone\n",
1715 lwpid_of (thread));
1716 discard = 1;
1717 }
15c66dd6 1718 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1719 && !hardware_breakpoint_inserted_here (pc))
1720 {
1721 if (debug_threads)
1722 debug_printf ("previous HW breakpoint of %ld gone\n",
1723 lwpid_of (thread));
1724 discard = 1;
1725 }
3e572f71 1726#endif
582511be
PA
1727
1728 current_thread = saved_thread;
1729
1730 if (discard)
1731 {
1732 if (debug_threads)
1733 debug_printf ("discarding pending breakpoint status\n");
1734 lp->status_pending_p = 0;
1735 return 0;
1736 }
1737 }
1738
1739 return 1;
1740}
1741
a681f9c9
PA
1742/* Returns true if LWP is resumed from the client's perspective. */
1743
1744static int
1745lwp_resumed (struct lwp_info *lwp)
1746{
1747 struct thread_info *thread = get_lwp_thread (lwp);
1748
1749 if (thread->last_resume_kind != resume_stop)
1750 return 1;
1751
1752 /* Did gdb send us a `vCont;t', but we haven't reported the
1753 corresponding stop to gdb yet? If so, the thread is still
1754 resumed/running from gdb's perspective. */
1755 if (thread->last_resume_kind == resume_stop
1756 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1757 return 1;
1758
1759 return 0;
1760}
1761
df95181f
TBA
1762bool
1763linux_process_target::status_pending_p_callback (thread_info *thread,
1764 ptid_t ptid)
0d62e5e8 1765{
582511be 1766 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1767
1768 /* Check if we're only interested in events from a specific process
afa8d396 1769 or a specific LWP. */
83e1b6c1 1770 if (!thread->id.matches (ptid))
95954743 1771 return 0;
0d62e5e8 1772
a681f9c9
PA
1773 if (!lwp_resumed (lp))
1774 return 0;
1775
582511be 1776 if (lp->status_pending_p
df95181f 1777 && !thread_still_has_status_pending (thread))
582511be 1778 {
df95181f 1779 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1780 return 0;
1781 }
0d62e5e8 1782
582511be 1783 return lp->status_pending_p;
0d62e5e8
DJ
1784}
1785
95954743
PA
1786struct lwp_info *
1787find_lwp_pid (ptid_t ptid)
1788{
da4ae14a 1789 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1790 {
1791 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1792 return thr_arg->id.lwp () == lwp;
454296a2 1793 });
d86d4aaf
DE
1794
1795 if (thread == NULL)
1796 return NULL;
1797
9c80ecd6 1798 return get_thread_lwp (thread);
95954743
PA
1799}
1800
fa96cb38 1801/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1802
fa96cb38
PA
1803static int
1804num_lwps (int pid)
1805{
fa96cb38 1806 int count = 0;
0d62e5e8 1807
4d3bb80e
SM
1808 for_each_thread (pid, [&] (thread_info *thread)
1809 {
9c80ecd6 1810 count++;
4d3bb80e 1811 });
3aee8918 1812
fa96cb38
PA
1813 return count;
1814}
d61ddec4 1815
6d4ee8c6
GB
1816/* See nat/linux-nat.h. */
1817
1818struct lwp_info *
1819iterate_over_lwps (ptid_t filter,
d3a70e03 1820 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1821{
da4ae14a 1822 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1823 {
da4ae14a 1824 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1825
d3a70e03 1826 return callback (lwp);
6d1e5673 1827 });
6d4ee8c6 1828
9c80ecd6 1829 if (thread == NULL)
6d4ee8c6
GB
1830 return NULL;
1831
9c80ecd6 1832 return get_thread_lwp (thread);
6d4ee8c6
GB
1833}
1834
fa96cb38
PA
1835/* Detect zombie thread group leaders, and "exit" them. We can't reap
1836 their exits until all other threads in the group have exited. */
c3adc08c 1837
fa96cb38
PA
1838static void
1839check_zombie_leaders (void)
1840{
9179355e
SM
1841 for_each_process ([] (process_info *proc) {
1842 pid_t leader_pid = pid_of (proc);
1843 struct lwp_info *leader_lp;
1844
f2907e49 1845 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1846
1847 if (debug_threads)
1848 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1849 "num_lwps=%d, zombie=%d\n",
1850 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1851 linux_proc_pid_is_zombie (leader_pid));
1852
1853 if (leader_lp != NULL && !leader_lp->stopped
1854 /* Check if there are other threads in the group, as we may
1855 have raced with the inferior simply exiting. */
1856 && !last_thread_of_process_p (leader_pid)
1857 && linux_proc_pid_is_zombie (leader_pid))
1858 {
1859 /* A leader zombie can mean one of two things:
1860
1861 - It exited, and there's an exit status pending
1862 available, or only the leader exited (not the whole
1863 program). In the latter case, we can't waitpid the
1864 leader's exit status until all other threads are gone.
1865
1866 - There are 3 or more threads in the group, and a thread
1867 other than the leader exec'd. On an exec, the Linux
1868 kernel destroys all other threads (except the execing
1869 one) in the thread group, and resets the execing thread's
1870 tid to the tgid. No exit notification is sent for the
1871 execing thread -- from the ptracer's perspective, it
1872 appears as though the execing thread just vanishes.
1873 Until we reap all other threads except the leader and the
1874 execing thread, the leader will be zombie, and the
1875 execing thread will be in `D (disc sleep)'. As soon as
1876 all other threads are reaped, the execing thread changes
1877 it's tid to the tgid, and the previous (zombie) leader
1878 vanishes, giving place to the "new" leader. We could try
1879 distinguishing the exit and exec cases, by waiting once
1880 more, and seeing if something comes out, but it doesn't
1881 sound useful. The previous leader _does_ go away, and
1882 we'll re-add the new one once we see the exec event
1883 (which is just the same as what would happen if the
1884 previous leader did exit voluntarily before some other
1885 thread execs). */
1886
1887 if (debug_threads)
1888 debug_printf ("CZL: Thread group leader %d zombie "
1889 "(it exited, or another thread execd).\n",
1890 leader_pid);
1891
1892 delete_lwp (leader_lp);
1893 }
1894 });
fa96cb38 1895}
c3adc08c 1896
a1385b7b
SM
1897/* Callback for `find_thread'. Returns the first LWP that is not
1898 stopped. */
d50171e4 1899
a1385b7b
SM
1900static bool
1901not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1902{
a1385b7b
SM
1903 if (!thread->id.matches (filter))
1904 return false;
47c0c975 1905
a1385b7b 1906 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1907
a1385b7b 1908 return !lwp->stopped;
0d62e5e8 1909}
611cb4a5 1910
863d01bd
PA
1911/* Increment LWP's suspend count. */
1912
1913static void
1914lwp_suspended_inc (struct lwp_info *lwp)
1915{
1916 lwp->suspended++;
1917
1918 if (debug_threads && lwp->suspended > 4)
1919 {
1920 struct thread_info *thread = get_lwp_thread (lwp);
1921
1922 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1923 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1924 }
1925}
1926
1927/* Decrement LWP's suspend count. */
1928
1929static void
1930lwp_suspended_decr (struct lwp_info *lwp)
1931{
1932 lwp->suspended--;
1933
1934 if (lwp->suspended < 0)
1935 {
1936 struct thread_info *thread = get_lwp_thread (lwp);
1937
1938 internal_error (__FILE__, __LINE__,
1939 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1940 lwp->suspended);
1941 }
1942}
1943
219f2f23
PA
1944/* This function should only be called if the LWP got a SIGTRAP.
1945
1946 Handle any tracepoint steps or hits. Return true if a tracepoint
1947 event was handled, 0 otherwise. */
1948
1949static int
1950handle_tracepoints (struct lwp_info *lwp)
1951{
1952 struct thread_info *tinfo = get_lwp_thread (lwp);
1953 int tpoint_related_event = 0;
1954
582511be
PA
1955 gdb_assert (lwp->suspended == 0);
1956
7984d532
PA
1957 /* If this tracepoint hit causes a tracing stop, we'll immediately
1958 uninsert tracepoints. To do this, we temporarily pause all
1959 threads, unpatch away, and then unpause threads. We need to make
1960 sure the unpausing doesn't resume LWP too. */
863d01bd 1961 lwp_suspended_inc (lwp);
7984d532 1962
219f2f23
PA
1963 /* And we need to be sure that any all-threads-stopping doesn't try
1964 to move threads out of the jump pads, as it could deadlock the
1965 inferior (LWP could be in the jump pad, maybe even holding the
1966 lock.) */
1967
1968 /* Do any necessary step collect actions. */
1969 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1970
fa593d66
PA
1971 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1972
219f2f23
PA
1973 /* See if we just hit a tracepoint and do its main collect
1974 actions. */
1975 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1976
863d01bd 1977 lwp_suspended_decr (lwp);
7984d532
PA
1978
1979 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1980 gdb_assert (!stabilizing_threads
1981 || (lwp->collecting_fast_tracepoint
1982 != fast_tpoint_collect_result::not_collecting));
7984d532 1983
219f2f23
PA
1984 if (tpoint_related_event)
1985 {
1986 if (debug_threads)
87ce2a04 1987 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1988 return 1;
1989 }
1990
1991 return 0;
1992}
1993
229d26fc
SM
1994/* Convenience wrapper. Returns information about LWP's fast tracepoint
1995 collection status. */
fa593d66 1996
229d26fc 1997static fast_tpoint_collect_result
fa593d66
PA
1998linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1999 struct fast_tpoint_collect_status *status)
2000{
2001 CORE_ADDR thread_area;
d86d4aaf 2002 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2003
2004 if (the_low_target.get_thread_area == NULL)
229d26fc 2005 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2006
2007 /* Get the thread area address. This is used to recognize which
2008 thread is which when tracing with the in-process agent library.
2009 We don't read anything from the address, and treat it as opaque;
2010 it's the address itself that we assume is unique per-thread. */
d86d4aaf 2011 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
229d26fc 2012 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2013
2014 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2015}
2016
d16f3f6c
TBA
2017bool
2018linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2019{
0bfdf32f 2020 struct thread_info *saved_thread;
fa593d66 2021
0bfdf32f
GB
2022 saved_thread = current_thread;
2023 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2024
2025 if ((wstat == NULL
2026 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2027 && supports_fast_tracepoints ()
58b4daa5 2028 && agent_loaded_p ())
fa593d66
PA
2029 {
2030 struct fast_tpoint_collect_status status;
fa593d66
PA
2031
2032 if (debug_threads)
87ce2a04
DE
2033 debug_printf ("Checking whether LWP %ld needs to move out of the "
2034 "jump pad.\n",
0bfdf32f 2035 lwpid_of (current_thread));
fa593d66 2036
229d26fc
SM
2037 fast_tpoint_collect_result r
2038 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2039
2040 if (wstat == NULL
2041 || (WSTOPSIG (*wstat) != SIGILL
2042 && WSTOPSIG (*wstat) != SIGFPE
2043 && WSTOPSIG (*wstat) != SIGSEGV
2044 && WSTOPSIG (*wstat) != SIGBUS))
2045 {
2046 lwp->collecting_fast_tracepoint = r;
2047
229d26fc 2048 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2049 {
229d26fc
SM
2050 if (r == fast_tpoint_collect_result::before_insn
2051 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2052 {
2053 /* Haven't executed the original instruction yet.
2054 Set breakpoint there, and wait till it's hit,
2055 then single-step until exiting the jump pad. */
2056 lwp->exit_jump_pad_bkpt
2057 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2058 }
2059
2060 if (debug_threads)
87ce2a04
DE
2061 debug_printf ("Checking whether LWP %ld needs to move out of "
2062 "the jump pad...it does\n",
0bfdf32f
GB
2063 lwpid_of (current_thread));
2064 current_thread = saved_thread;
fa593d66 2065
d16f3f6c 2066 return true;
fa593d66
PA
2067 }
2068 }
2069 else
2070 {
2071 /* If we get a synchronous signal while collecting, *and*
2072 while executing the (relocated) original instruction,
2073 reset the PC to point at the tpoint address, before
2074 reporting to GDB. Otherwise, it's an IPA lib bug: just
2075 report the signal to GDB, and pray for the best. */
2076
229d26fc
SM
2077 lwp->collecting_fast_tracepoint
2078 = fast_tpoint_collect_result::not_collecting;
fa593d66 2079
229d26fc 2080 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2081 && (status.adjusted_insn_addr <= lwp->stop_pc
2082 && lwp->stop_pc < status.adjusted_insn_addr_end))
2083 {
2084 siginfo_t info;
2085 struct regcache *regcache;
2086
2087 /* The si_addr on a few signals references the address
2088 of the faulting instruction. Adjust that as
2089 well. */
2090 if ((WSTOPSIG (*wstat) == SIGILL
2091 || WSTOPSIG (*wstat) == SIGFPE
2092 || WSTOPSIG (*wstat) == SIGBUS
2093 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2094 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2095 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2096 /* Final check just to make sure we don't clobber
2097 the siginfo of non-kernel-sent signals. */
2098 && (uintptr_t) info.si_addr == lwp->stop_pc)
2099 {
2100 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2101 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2102 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2103 }
2104
0bfdf32f 2105 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2106 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2107 lwp->stop_pc = status.tpoint_addr;
2108
2109 /* Cancel any fast tracepoint lock this thread was
2110 holding. */
2111 force_unlock_trace_buffer ();
2112 }
2113
2114 if (lwp->exit_jump_pad_bkpt != NULL)
2115 {
2116 if (debug_threads)
87ce2a04
DE
2117 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2118 "stopping all threads momentarily.\n");
fa593d66
PA
2119
2120 stop_all_lwps (1, lwp);
fa593d66
PA
2121
2122 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2123 lwp->exit_jump_pad_bkpt = NULL;
2124
2125 unstop_all_lwps (1, lwp);
2126
2127 gdb_assert (lwp->suspended >= 0);
2128 }
2129 }
2130 }
2131
2132 if (debug_threads)
87ce2a04
DE
2133 debug_printf ("Checking whether LWP %ld needs to move out of the "
2134 "jump pad...no\n",
0bfdf32f 2135 lwpid_of (current_thread));
0cccb683 2136
0bfdf32f 2137 current_thread = saved_thread;
d16f3f6c 2138 return false;
fa593d66
PA
2139}
2140
2141/* Enqueue one signal in the "signals to report later when out of the
2142 jump pad" list. */
2143
2144static void
2145enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2146{
2147 struct pending_signals *p_sig;
d86d4aaf 2148 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2149
2150 if (debug_threads)
87ce2a04 2151 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2152 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2153
2154 if (debug_threads)
2155 {
2156 struct pending_signals *sig;
2157
2158 for (sig = lwp->pending_signals_to_report;
2159 sig != NULL;
2160 sig = sig->prev)
87ce2a04
DE
2161 debug_printf (" Already queued %d\n",
2162 sig->signal);
fa593d66 2163
87ce2a04 2164 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2165 }
2166
1a981360
PA
2167 /* Don't enqueue non-RT signals if they are already in the deferred
2168 queue. (SIGSTOP being the easiest signal to see ending up here
2169 twice) */
2170 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2171 {
2172 struct pending_signals *sig;
2173
2174 for (sig = lwp->pending_signals_to_report;
2175 sig != NULL;
2176 sig = sig->prev)
2177 {
2178 if (sig->signal == WSTOPSIG (*wstat))
2179 {
2180 if (debug_threads)
87ce2a04
DE
2181 debug_printf ("Not requeuing already queued non-RT signal %d"
2182 " for LWP %ld\n",
2183 sig->signal,
d86d4aaf 2184 lwpid_of (thread));
1a981360
PA
2185 return;
2186 }
2187 }
2188 }
2189
8d749320 2190 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2191 p_sig->prev = lwp->pending_signals_to_report;
2192 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2193
d86d4aaf 2194 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2195 &p_sig->info);
fa593d66
PA
2196
2197 lwp->pending_signals_to_report = p_sig;
2198}
2199
2200/* Dequeue one signal from the "signals to report later when out of
2201 the jump pad" list. */
2202
2203static int
2204dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2205{
d86d4aaf
DE
2206 struct thread_info *thread = get_lwp_thread (lwp);
2207
fa593d66
PA
2208 if (lwp->pending_signals_to_report != NULL)
2209 {
2210 struct pending_signals **p_sig;
2211
2212 p_sig = &lwp->pending_signals_to_report;
2213 while ((*p_sig)->prev != NULL)
2214 p_sig = &(*p_sig)->prev;
2215
2216 *wstat = W_STOPCODE ((*p_sig)->signal);
2217 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2218 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2219 &(*p_sig)->info);
fa593d66
PA
2220 free (*p_sig);
2221 *p_sig = NULL;
2222
2223 if (debug_threads)
87ce2a04 2224 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2225 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2226
2227 if (debug_threads)
2228 {
2229 struct pending_signals *sig;
2230
2231 for (sig = lwp->pending_signals_to_report;
2232 sig != NULL;
2233 sig = sig->prev)
87ce2a04
DE
2234 debug_printf (" Still queued %d\n",
2235 sig->signal);
fa593d66 2236
87ce2a04 2237 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2238 }
2239
2240 return 1;
2241 }
2242
2243 return 0;
2244}
2245
ac1bbaca
TBA
2246bool
2247linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2248{
ac1bbaca
TBA
2249 struct thread_info *saved_thread = current_thread;
2250 current_thread = get_lwp_thread (child);
d50171e4 2251
ac1bbaca
TBA
2252 if (low_stopped_by_watchpoint ())
2253 {
2254 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2255 child->stopped_data_address = low_stopped_data_address ();
2256 }
582511be 2257
ac1bbaca 2258 current_thread = saved_thread;
582511be 2259
ac1bbaca
TBA
2260 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2261}
d50171e4 2262
ac1bbaca
TBA
2263bool
2264linux_process_target::low_stopped_by_watchpoint ()
2265{
2266 return false;
2267}
d50171e4 2268
ac1bbaca
TBA
2269CORE_ADDR
2270linux_process_target::low_stopped_data_address ()
2271{
2272 return 0;
c4d9ceb6
YQ
2273}
2274
de0d863e
DB
2275/* Return the ptrace options that we want to try to enable. */
2276
2277static int
2278linux_low_ptrace_options (int attached)
2279{
c12a5089 2280 client_state &cs = get_client_state ();
de0d863e
DB
2281 int options = 0;
2282
2283 if (!attached)
2284 options |= PTRACE_O_EXITKILL;
2285
c12a5089 2286 if (cs.report_fork_events)
de0d863e
DB
2287 options |= PTRACE_O_TRACEFORK;
2288
c12a5089 2289 if (cs.report_vfork_events)
c269dbdb
DB
2290 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2291
c12a5089 2292 if (cs.report_exec_events)
94585166
DB
2293 options |= PTRACE_O_TRACEEXEC;
2294
82075af2
JS
2295 options |= PTRACE_O_TRACESYSGOOD;
2296
de0d863e
DB
2297 return options;
2298}
2299
d16f3f6c
TBA
2300lwp_info *
2301linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2302{
c12a5089 2303 client_state &cs = get_client_state ();
fa96cb38
PA
2304 struct lwp_info *child;
2305 struct thread_info *thread;
582511be 2306 int have_stop_pc = 0;
fa96cb38 2307
f2907e49 2308 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2309
94585166
DB
2310 /* Check for stop events reported by a process we didn't already
2311 know about - anything not already in our LWP list.
2312
2313 If we're expecting to receive stopped processes after
2314 fork, vfork, and clone events, then we'll just add the
2315 new one to our list and go back to waiting for the event
2316 to be reported - the stopped process might be returned
2317 from waitpid before or after the event is.
2318
2319 But note the case of a non-leader thread exec'ing after the
2320 leader having exited, and gone from our lists (because
2321 check_zombie_leaders deleted it). The non-leader thread
2322 changes its tid to the tgid. */
2323
2324 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2325 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2326 {
2327 ptid_t child_ptid;
2328
2329 /* A multi-thread exec after we had seen the leader exiting. */
2330 if (debug_threads)
2331 {
2332 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2333 "after exec.\n", lwpid);
2334 }
2335
fd79271b 2336 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2337 child = add_lwp (child_ptid);
2338 child->stopped = 1;
2339 current_thread = child->thread;
2340 }
2341
fa96cb38
PA
2342 /* If we didn't find a process, one of two things presumably happened:
2343 - A process we started and then detached from has exited. Ignore it.
2344 - A process we are controlling has forked and the new child's stop
2345 was reported to us by the kernel. Save its PID. */
2346 if (child == NULL && WIFSTOPPED (wstat))
2347 {
2348 add_to_pid_list (&stopped_pids, lwpid, wstat);
2349 return NULL;
2350 }
2351 else if (child == NULL)
2352 return NULL;
2353
2354 thread = get_lwp_thread (child);
2355
2356 child->stopped = 1;
2357
2358 child->last_status = wstat;
2359
582511be
PA
2360 /* Check if the thread has exited. */
2361 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2362 {
2363 if (debug_threads)
2364 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2365
2366 if (finish_step_over (child))
2367 {
2368 /* Unsuspend all other LWPs, and set them back running again. */
2369 unsuspend_all_lwps (child);
2370 }
2371
65706a29
PA
2372 /* If there is at least one more LWP, then the exit signal was
2373 not the end of the debugged application and should be
2374 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2375 if (cs.report_thread_events
65706a29 2376 || last_thread_of_process_p (pid_of (thread)))
582511be 2377 {
65706a29
PA
2378 /* Since events are serialized to GDB core, and we can't
2379 report this one right now. Leave the status pending for
2380 the next time we're able to report it. */
2381 mark_lwp_dead (child, wstat);
2382 return child;
582511be
PA
2383 }
2384 else
2385 {
65706a29
PA
2386 delete_lwp (child);
2387 return NULL;
582511be
PA
2388 }
2389 }
2390
2391 gdb_assert (WIFSTOPPED (wstat));
2392
fa96cb38
PA
2393 if (WIFSTOPPED (wstat))
2394 {
2395 struct process_info *proc;
2396
c06cbd92 2397 /* Architecture-specific setup after inferior is running. */
fa96cb38 2398 proc = find_process_pid (pid_of (thread));
c06cbd92 2399 if (proc->tdesc == NULL)
fa96cb38 2400 {
c06cbd92
YQ
2401 if (proc->attached)
2402 {
c06cbd92
YQ
2403 /* This needs to happen after we have attached to the
2404 inferior and it is stopped for the first time, but
2405 before we access any inferior registers. */
797bcff5 2406 arch_setup_thread (thread);
c06cbd92
YQ
2407 }
2408 else
2409 {
2410 /* The process is started, but GDBserver will do
2411 architecture-specific setup after the program stops at
2412 the first instruction. */
2413 child->status_pending_p = 1;
2414 child->status_pending = wstat;
2415 return child;
2416 }
fa96cb38
PA
2417 }
2418 }
2419
fa96cb38
PA
2420 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2421 {
beed38b8 2422 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2423 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2424
de0d863e 2425 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2426 child->must_set_ptrace_flags = 0;
2427 }
2428
82075af2
JS
2429 /* Always update syscall_state, even if it will be filtered later. */
2430 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2431 {
2432 child->syscall_state
2433 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2434 ? TARGET_WAITKIND_SYSCALL_RETURN
2435 : TARGET_WAITKIND_SYSCALL_ENTRY);
2436 }
2437 else
2438 {
2439 /* Almost all other ptrace-stops are known to be outside of system
2440 calls, with further exceptions in handle_extended_wait. */
2441 child->syscall_state = TARGET_WAITKIND_IGNORE;
2442 }
2443
e7ad2f14
PA
2444 /* Be careful to not overwrite stop_pc until save_stop_reason is
2445 called. */
fa96cb38 2446 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2447 && linux_is_extended_waitstatus (wstat))
fa96cb38 2448 {
582511be 2449 child->stop_pc = get_pc (child);
94585166 2450 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2451 {
2452 /* The event has been handled, so just return without
2453 reporting it. */
2454 return NULL;
2455 }
fa96cb38
PA
2456 }
2457
80aea927 2458 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2459 {
e7ad2f14 2460 if (save_stop_reason (child))
582511be
PA
2461 have_stop_pc = 1;
2462 }
2463
2464 if (!have_stop_pc)
2465 child->stop_pc = get_pc (child);
2466
fa96cb38
PA
2467 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2468 && child->stop_expected)
2469 {
2470 if (debug_threads)
2471 debug_printf ("Expected stop.\n");
2472 child->stop_expected = 0;
2473
2474 if (thread->last_resume_kind == resume_stop)
2475 {
2476 /* We want to report the stop to the core. Treat the
2477 SIGSTOP as a normal event. */
2bf6fb9d
PA
2478 if (debug_threads)
2479 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2480 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2481 }
2482 else if (stopping_threads != NOT_STOPPING_THREADS)
2483 {
2484 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2485 pending. */
2bf6fb9d
PA
2486 if (debug_threads)
2487 debug_printf ("LLW: SIGSTOP caught for %s "
2488 "while stopping threads.\n",
2489 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2490 return NULL;
2491 }
2492 else
2493 {
2bf6fb9d
PA
2494 /* This is a delayed SIGSTOP. Filter out the event. */
2495 if (debug_threads)
2496 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2497 child->stepping ? "step" : "continue",
2498 target_pid_to_str (ptid_of (thread)));
2499
df95181f 2500 resume_one_lwp (child, child->stepping, 0, NULL);
fa96cb38
PA
2501 return NULL;
2502 }
2503 }
2504
582511be
PA
2505 child->status_pending_p = 1;
2506 child->status_pending = wstat;
fa96cb38
PA
2507 return child;
2508}
2509
f79b145d
YQ
2510/* Return true if THREAD is doing hardware single step. */
2511
2512static int
2513maybe_hw_step (struct thread_info *thread)
2514{
2515 if (can_hardware_single_step ())
2516 return 1;
2517 else
2518 {
3b9a79ef 2519 /* GDBserver must insert single-step breakpoint for software
f79b145d 2520 single step. */
3b9a79ef 2521 gdb_assert (has_single_step_breakpoints (thread));
f79b145d
YQ
2522 return 0;
2523 }
2524}
2525
df95181f
TBA
2526void
2527linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2528{
20ba1ce6
PA
2529 struct lwp_info *lp = get_thread_lwp (thread);
2530
2531 if (lp->stopped
863d01bd 2532 && !lp->suspended
20ba1ce6 2533 && !lp->status_pending_p
20ba1ce6
PA
2534 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2535 {
8901d193
YQ
2536 int step = 0;
2537
2538 if (thread->last_resume_kind == resume_step)
2539 step = maybe_hw_step (thread);
20ba1ce6
PA
2540
2541 if (debug_threads)
2542 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2543 target_pid_to_str (ptid_of (thread)),
2544 paddress (lp->stop_pc),
2545 step);
2546
df95181f 2547 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2548 }
2549}
2550
d16f3f6c
TBA
2551int
2552linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2553 ptid_t filter_ptid,
2554 int *wstatp, int options)
0d62e5e8 2555{
d86d4aaf 2556 struct thread_info *event_thread;
d50171e4 2557 struct lwp_info *event_child, *requested_child;
fa96cb38 2558 sigset_t block_mask, prev_mask;
d50171e4 2559
fa96cb38 2560 retry:
d86d4aaf
DE
2561 /* N.B. event_thread points to the thread_info struct that contains
2562 event_child. Keep them in sync. */
2563 event_thread = NULL;
d50171e4
PA
2564 event_child = NULL;
2565 requested_child = NULL;
0d62e5e8 2566
95954743 2567 /* Check for a lwp with a pending status. */
bd99dc85 2568
d7e15655 2569 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2570 {
83e1b6c1
SM
2571 event_thread = find_thread_in_random ([&] (thread_info *thread)
2572 {
2573 return status_pending_p_callback (thread, filter_ptid);
2574 });
2575
d86d4aaf
DE
2576 if (event_thread != NULL)
2577 event_child = get_thread_lwp (event_thread);
2578 if (debug_threads && event_thread)
2579 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2580 }
d7e15655 2581 else if (filter_ptid != null_ptid)
0d62e5e8 2582 {
fa96cb38 2583 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2584
bde24c0a 2585 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2586 && requested_child->status_pending_p
229d26fc
SM
2587 && (requested_child->collecting_fast_tracepoint
2588 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2589 {
2590 enqueue_one_deferred_signal (requested_child,
2591 &requested_child->status_pending);
2592 requested_child->status_pending_p = 0;
2593 requested_child->status_pending = 0;
df95181f 2594 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2595 }
2596
2597 if (requested_child->suspended
2598 && requested_child->status_pending_p)
38e08fca
GB
2599 {
2600 internal_error (__FILE__, __LINE__,
2601 "requesting an event out of a"
2602 " suspended child?");
2603 }
fa593d66 2604
d50171e4 2605 if (requested_child->status_pending_p)
d86d4aaf
DE
2606 {
2607 event_child = requested_child;
2608 event_thread = get_lwp_thread (event_child);
2609 }
0d62e5e8 2610 }
611cb4a5 2611
0d62e5e8
DJ
2612 if (event_child != NULL)
2613 {
bd99dc85 2614 if (debug_threads)
87ce2a04 2615 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2616 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2617 *wstatp = event_child->status_pending;
bd99dc85
PA
2618 event_child->status_pending_p = 0;
2619 event_child->status_pending = 0;
0bfdf32f 2620 current_thread = event_thread;
d86d4aaf 2621 return lwpid_of (event_thread);
0d62e5e8
DJ
2622 }
2623
fa96cb38
PA
2624 /* But if we don't find a pending event, we'll have to wait.
2625
2626 We only enter this loop if no process has a pending wait status.
2627 Thus any action taken in response to a wait status inside this
2628 loop is responding as soon as we detect the status, not after any
2629 pending events. */
d8301ad1 2630
fa96cb38
PA
2631 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2632 all signals while here. */
2633 sigfillset (&block_mask);
21987b9c 2634 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2635
582511be
PA
2636 /* Always pull all events out of the kernel. We'll randomly select
2637 an event LWP out of all that have events, to prevent
2638 starvation. */
fa96cb38 2639 while (event_child == NULL)
0d62e5e8 2640 {
fa96cb38 2641 pid_t ret = 0;
0d62e5e8 2642
fa96cb38
PA
2643 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2644 quirks:
0d62e5e8 2645
fa96cb38
PA
2646 - If the thread group leader exits while other threads in the
2647 thread group still exist, waitpid(TGID, ...) hangs. That
2648 waitpid won't return an exit status until the other threads
2649 in the group are reaped.
611cb4a5 2650
fa96cb38
PA
2651 - When a non-leader thread execs, that thread just vanishes
2652 without reporting an exit (so we'd hang if we waited for it
2653 explicitly in that case). The exec event is reported to
94585166 2654 the TGID pid. */
fa96cb38
PA
2655 errno = 0;
2656 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2657
fa96cb38
PA
2658 if (debug_threads)
2659 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2660 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2661
fa96cb38 2662 if (ret > 0)
0d62e5e8 2663 {
89be2091 2664 if (debug_threads)
bd99dc85 2665 {
fa96cb38
PA
2666 debug_printf ("LLW: waitpid %ld received %s\n",
2667 (long) ret, status_to_str (*wstatp));
bd99dc85 2668 }
89be2091 2669
582511be
PA
2670 /* Filter all events. IOW, leave all events pending. We'll
2671 randomly select an event LWP out of all that have events
2672 below. */
d16f3f6c 2673 filter_event (ret, *wstatp);
fa96cb38
PA
2674 /* Retry until nothing comes out of waitpid. A single
2675 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2676 continue;
2677 }
2678
20ba1ce6
PA
2679 /* Now that we've pulled all events out of the kernel, resume
2680 LWPs that don't have an interesting event to report. */
2681 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2682 for_each_thread ([this] (thread_info *thread)
2683 {
2684 resume_stopped_resumed_lwps (thread);
2685 });
20ba1ce6
PA
2686
2687 /* ... and find an LWP with a status to report to the core, if
2688 any. */
83e1b6c1
SM
2689 event_thread = find_thread_in_random ([&] (thread_info *thread)
2690 {
2691 return status_pending_p_callback (thread, filter_ptid);
2692 });
2693
582511be
PA
2694 if (event_thread != NULL)
2695 {
2696 event_child = get_thread_lwp (event_thread);
2697 *wstatp = event_child->status_pending;
2698 event_child->status_pending_p = 0;
2699 event_child->status_pending = 0;
2700 break;
2701 }
2702
fa96cb38
PA
2703 /* Check for zombie thread group leaders. Those can't be reaped
2704 until all other threads in the thread group are. */
2705 check_zombie_leaders ();
2706
a1385b7b
SM
2707 auto not_stopped = [&] (thread_info *thread)
2708 {
2709 return not_stopped_callback (thread, wait_ptid);
2710 };
2711
fa96cb38
PA
2712 /* If there are no resumed children left in the set of LWPs we
2713 want to wait for, bail. We can't just block in
2714 waitpid/sigsuspend, because lwps might have been left stopped
2715 in trace-stop state, and we'd be stuck forever waiting for
2716 their status to change (which would only happen if we resumed
2717 them). Even if WNOHANG is set, this return code is preferred
2718 over 0 (below), as it is more detailed. */
a1385b7b 2719 if (find_thread (not_stopped) == NULL)
a6dbe5df 2720 {
fa96cb38
PA
2721 if (debug_threads)
2722 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2723 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2724 return -1;
a6dbe5df
PA
2725 }
2726
fa96cb38
PA
2727 /* No interesting event to report to the caller. */
2728 if ((options & WNOHANG))
24a09b5f 2729 {
fa96cb38
PA
2730 if (debug_threads)
2731 debug_printf ("WNOHANG set, no event found\n");
2732
21987b9c 2733 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2734 return 0;
24a09b5f
DJ
2735 }
2736
fa96cb38
PA
2737 /* Block until we get an event reported with SIGCHLD. */
2738 if (debug_threads)
2739 debug_printf ("sigsuspend'ing\n");
d50171e4 2740
fa96cb38 2741 sigsuspend (&prev_mask);
21987b9c 2742 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2743 goto retry;
2744 }
d50171e4 2745
21987b9c 2746 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2747
0bfdf32f 2748 current_thread = event_thread;
d50171e4 2749
fa96cb38
PA
2750 return lwpid_of (event_thread);
2751}
2752
d16f3f6c
TBA
2753int
2754linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2755{
d16f3f6c 2756 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2757}
2758
6bf5e0ba
PA
2759/* Select one LWP out of those that have events pending. */
2760
2761static void
2762select_event_lwp (struct lwp_info **orig_lp)
2763{
582511be
PA
2764 struct thread_info *event_thread = NULL;
2765
2766 /* In all-stop, give preference to the LWP that is being
2767 single-stepped. There will be at most one, and it's the LWP that
2768 the core is most interested in. If we didn't do this, then we'd
2769 have to handle pending step SIGTRAPs somehow in case the core
2770 later continues the previously-stepped thread, otherwise we'd
2771 report the pending SIGTRAP, and the core, not having stepped the
2772 thread, wouldn't understand what the trap was for, and therefore
2773 would report it to the user as a random signal. */
2774 if (!non_stop)
6bf5e0ba 2775 {
39a64da5
SM
2776 event_thread = find_thread ([] (thread_info *thread)
2777 {
2778 lwp_info *lp = get_thread_lwp (thread);
2779
2780 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2781 && thread->last_resume_kind == resume_step
2782 && lp->status_pending_p);
2783 });
2784
582511be
PA
2785 if (event_thread != NULL)
2786 {
2787 if (debug_threads)
2788 debug_printf ("SEL: Select single-step %s\n",
2789 target_pid_to_str (ptid_of (event_thread)));
2790 }
6bf5e0ba 2791 }
582511be 2792 if (event_thread == NULL)
6bf5e0ba
PA
2793 {
2794 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2795 which have had events. */
6bf5e0ba 2796
b0319eaa 2797 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2798 {
2799 lwp_info *lp = get_thread_lwp (thread);
2800
b0319eaa
TT
2801 /* Only resumed LWPs that have an event pending. */
2802 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2803 && lp->status_pending_p);
39a64da5 2804 });
6bf5e0ba
PA
2805 }
2806
d86d4aaf 2807 if (event_thread != NULL)
6bf5e0ba 2808 {
d86d4aaf
DE
2809 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2810
6bf5e0ba
PA
2811 /* Switch the event LWP. */
2812 *orig_lp = event_lp;
2813 }
2814}
2815
7984d532
PA
2816/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2817 NULL. */
2818
2819static void
2820unsuspend_all_lwps (struct lwp_info *except)
2821{
139720c5
SM
2822 for_each_thread ([&] (thread_info *thread)
2823 {
2824 lwp_info *lwp = get_thread_lwp (thread);
2825
2826 if (lwp != except)
2827 lwp_suspended_decr (lwp);
2828 });
7984d532
PA
2829}
2830
fcb056a5 2831static bool stuck_in_jump_pad_callback (thread_info *thread);
5a6b0a41 2832static bool lwp_running (thread_info *thread);
fa593d66
PA
2833
2834/* Stabilize threads (move out of jump pads).
2835
2836 If a thread is midway collecting a fast tracepoint, we need to
2837 finish the collection and move it out of the jump pad before
2838 reporting the signal.
2839
2840 This avoids recursion while collecting (when a signal arrives
2841 midway, and the signal handler itself collects), which would trash
2842 the trace buffer. In case the user set a breakpoint in a signal
2843 handler, this avoids the backtrace showing the jump pad, etc..
2844 Most importantly, there are certain things we can't do safely if
2845 threads are stopped in a jump pad (or in its callee's). For
2846 example:
2847
2848 - starting a new trace run. A thread still collecting the
2849 previous run, could trash the trace buffer when resumed. The trace
2850 buffer control structures would have been reset but the thread had
2851 no way to tell. The thread could even midway memcpy'ing to the
2852 buffer, which would mean that when resumed, it would clobber the
2853 trace buffer that had been set for a new run.
2854
2855 - we can't rewrite/reuse the jump pads for new tracepoints
2856 safely. Say you do tstart while a thread is stopped midway while
2857 collecting. When the thread is later resumed, it finishes the
2858 collection, and returns to the jump pad, to execute the original
2859 instruction that was under the tracepoint jump at the time the
2860 older run had been started. If the jump pad had been rewritten
2861 since for something else in the new run, the thread would now
2862 execute the wrong / random instructions. */
2863
5c9eb2f2
TBA
2864void
2865linux_process_target::stabilize_threads ()
fa593d66 2866{
fcb056a5 2867 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
fa593d66 2868
d86d4aaf 2869 if (thread_stuck != NULL)
fa593d66 2870 {
b4d51a55 2871 if (debug_threads)
87ce2a04 2872 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2873 lwpid_of (thread_stuck));
fa593d66
PA
2874 return;
2875 }
2876
fcb056a5 2877 thread_info *saved_thread = current_thread;
fa593d66
PA
2878
2879 stabilizing_threads = 1;
2880
2881 /* Kick 'em all. */
d16f3f6c
TBA
2882 for_each_thread ([this] (thread_info *thread)
2883 {
2884 move_out_of_jump_pad (thread);
2885 });
fa593d66
PA
2886
2887 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2888 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2889 {
2890 struct target_waitstatus ourstatus;
2891 struct lwp_info *lwp;
fa593d66
PA
2892 int wstat;
2893
2894 /* Note that we go through the full wait even loop. While
2895 moving threads out of jump pad, we need to be able to step
2896 over internal breakpoints and such. */
d16f3f6c 2897 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2898
2899 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2900 {
0bfdf32f 2901 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2902
2903 /* Lock it. */
863d01bd 2904 lwp_suspended_inc (lwp);
fa593d66 2905
a493e3e2 2906 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2907 || current_thread->last_resume_kind == resume_stop)
fa593d66 2908 {
2ea28649 2909 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2910 enqueue_one_deferred_signal (lwp, &wstat);
2911 }
2912 }
2913 }
2914
fcdad592 2915 unsuspend_all_lwps (NULL);
fa593d66
PA
2916
2917 stabilizing_threads = 0;
2918
0bfdf32f 2919 current_thread = saved_thread;
fa593d66 2920
b4d51a55 2921 if (debug_threads)
fa593d66 2922 {
fcb056a5
SM
2923 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2924
d86d4aaf 2925 if (thread_stuck != NULL)
87ce2a04 2926 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2927 lwpid_of (thread_stuck));
fa593d66
PA
2928 }
2929}
2930
582511be
PA
2931/* Convenience function that is called when the kernel reports an
2932 event that is not passed out to GDB. */
2933
2934static ptid_t
2935ignore_event (struct target_waitstatus *ourstatus)
2936{
2937 /* If we got an event, there may still be others, as a single
2938 SIGCHLD can indicate more than one child stopped. This forces
2939 another target_wait call. */
2940 async_file_mark ();
2941
2942 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2943 return null_ptid;
2944}
2945
65706a29
PA
2946/* Convenience function that is called when the kernel reports an exit
2947 event. This decides whether to report the event to GDB as a
2948 process exit event, a thread exit event, or to suppress the
2949 event. */
2950
2951static ptid_t
2952filter_exit_event (struct lwp_info *event_child,
2953 struct target_waitstatus *ourstatus)
2954{
c12a5089 2955 client_state &cs = get_client_state ();
65706a29
PA
2956 struct thread_info *thread = get_lwp_thread (event_child);
2957 ptid_t ptid = ptid_of (thread);
2958
2959 if (!last_thread_of_process_p (pid_of (thread)))
2960 {
c12a5089 2961 if (cs.report_thread_events)
65706a29
PA
2962 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2963 else
2964 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2965
2966 delete_lwp (event_child);
2967 }
2968 return ptid;
2969}
2970
82075af2
JS
2971/* Returns 1 if GDB is interested in any event_child syscalls. */
2972
2973static int
2974gdb_catching_syscalls_p (struct lwp_info *event_child)
2975{
2976 struct thread_info *thread = get_lwp_thread (event_child);
2977 struct process_info *proc = get_thread_process (thread);
2978
f27866ba 2979 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2980}
2981
2982/* Returns 1 if GDB is interested in the event_child syscall.
2983 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2984
2985static int
2986gdb_catch_this_syscall_p (struct lwp_info *event_child)
2987{
4cc32bec 2988 int sysno;
82075af2
JS
2989 struct thread_info *thread = get_lwp_thread (event_child);
2990 struct process_info *proc = get_thread_process (thread);
2991
f27866ba 2992 if (proc->syscalls_to_catch.empty ())
82075af2
JS
2993 return 0;
2994
f27866ba 2995 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
82075af2
JS
2996 return 1;
2997
4cc32bec 2998 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2999
3000 for (int iter : proc->syscalls_to_catch)
82075af2
JS
3001 if (iter == sysno)
3002 return 1;
3003
3004 return 0;
3005}
3006
d16f3f6c
TBA
3007ptid_t
3008linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3009 int target_options)
da6d8c04 3010{
c12a5089 3011 client_state &cs = get_client_state ();
e5f1222d 3012 int w;
fc7238bb 3013 struct lwp_info *event_child;
bd99dc85 3014 int options;
bd99dc85 3015 int pid;
6bf5e0ba
PA
3016 int step_over_finished;
3017 int bp_explains_trap;
3018 int maybe_internal_trap;
3019 int report_to_gdb;
219f2f23 3020 int trace_event;
c2d6af84 3021 int in_step_range;
f2faf941 3022 int any_resumed;
bd99dc85 3023
87ce2a04
DE
3024 if (debug_threads)
3025 {
3026 debug_enter ();
d16f3f6c 3027 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
3028 }
3029
bd99dc85
PA
3030 /* Translate generic target options into linux options. */
3031 options = __WALL;
3032 if (target_options & TARGET_WNOHANG)
3033 options |= WNOHANG;
0d62e5e8 3034
fa593d66
PA
3035 bp_explains_trap = 0;
3036 trace_event = 0;
c2d6af84 3037 in_step_range = 0;
bd99dc85
PA
3038 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3039
83e1b6c1
SM
3040 auto status_pending_p_any = [&] (thread_info *thread)
3041 {
3042 return status_pending_p_callback (thread, minus_one_ptid);
3043 };
3044
a1385b7b
SM
3045 auto not_stopped = [&] (thread_info *thread)
3046 {
3047 return not_stopped_callback (thread, minus_one_ptid);
3048 };
3049
f2faf941 3050 /* Find a resumed LWP, if any. */
83e1b6c1 3051 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3052 any_resumed = 1;
a1385b7b 3053 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3054 any_resumed = 1;
3055 else
3056 any_resumed = 0;
3057
d7e15655 3058 if (step_over_bkpt == null_ptid)
d16f3f6c 3059 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3060 else
3061 {
3062 if (debug_threads)
87ce2a04
DE
3063 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3064 target_pid_to_str (step_over_bkpt));
d16f3f6c 3065 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3066 }
3067
f2faf941 3068 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3069 {
fa96cb38
PA
3070 gdb_assert (target_options & TARGET_WNOHANG);
3071
87ce2a04
DE
3072 if (debug_threads)
3073 {
d16f3f6c 3074 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3075 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3076 debug_exit ();
3077 }
fa96cb38
PA
3078
3079 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3080 return null_ptid;
3081 }
fa96cb38
PA
3082 else if (pid == -1)
3083 {
3084 if (debug_threads)
3085 {
d16f3f6c 3086 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3087 "TARGET_WAITKIND_NO_RESUMED\n");
3088 debug_exit ();
3089 }
bd99dc85 3090
fa96cb38
PA
3091 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3092 return null_ptid;
3093 }
0d62e5e8 3094
0bfdf32f 3095 event_child = get_thread_lwp (current_thread);
0d62e5e8 3096
d16f3f6c 3097 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3098 child of a process. Report it. */
3099 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3100 {
fa96cb38 3101 if (WIFEXITED (w))
0d62e5e8 3102 {
fa96cb38
PA
3103 ourstatus->kind = TARGET_WAITKIND_EXITED;
3104 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3105
fa96cb38 3106 if (debug_threads)
bd99dc85 3107 {
d16f3f6c 3108 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3109 "retcode %d\n",
0bfdf32f 3110 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3111 WEXITSTATUS (w));
3112 debug_exit ();
bd99dc85 3113 }
fa96cb38
PA
3114 }
3115 else
3116 {
3117 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3118 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3119
fa96cb38
PA
3120 if (debug_threads)
3121 {
d16f3f6c 3122 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3123 "signal %d\n",
0bfdf32f 3124 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3125 WTERMSIG (w));
3126 debug_exit ();
3127 }
0d62e5e8 3128 }
fa96cb38 3129
65706a29
PA
3130 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3131 return filter_exit_event (event_child, ourstatus);
3132
0bfdf32f 3133 return ptid_of (current_thread);
da6d8c04
DJ
3134 }
3135
2d97cd35
AT
3136 /* If step-over executes a breakpoint instruction, in the case of a
3137 hardware single step it means a gdb/gdbserver breakpoint had been
3138 planted on top of a permanent breakpoint, in the case of a software
3139 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3140 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3141 the breakpoint address.
3142 So in the case of the hardware single step advance the PC manually
3143 past the breakpoint and in the case of software single step advance only
3b9a79ef 3144 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3145 This avoids that a program would keep trapping a permanent breakpoint
3146 forever. */
d7e15655 3147 if (step_over_bkpt != null_ptid
2d97cd35
AT
3148 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3149 && (event_child->stepping
3b9a79ef 3150 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3151 {
dd373349
AT
3152 int increment_pc = 0;
3153 int breakpoint_kind = 0;
3154 CORE_ADDR stop_pc = event_child->stop_pc;
3155
d16f3f6c
TBA
3156 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3157 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3158
3159 if (debug_threads)
3160 {
3161 debug_printf ("step-over for %s executed software breakpoint\n",
3162 target_pid_to_str (ptid_of (current_thread)));
3163 }
3164
3165 if (increment_pc != 0)
3166 {
3167 struct regcache *regcache
3168 = get_thread_regcache (current_thread, 1);
3169
3170 event_child->stop_pc += increment_pc;
bf9ae9d8 3171 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3172
d7146cda 3173 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3174 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3175 }
3176 }
3177
6bf5e0ba
PA
3178 /* If this event was not handled before, and is not a SIGTRAP, we
3179 report it. SIGILL and SIGSEGV are also treated as traps in case
3180 a breakpoint is inserted at the current PC. If this target does
3181 not support internal breakpoints at all, we also report the
3182 SIGTRAP without further processing; it's of no concern to us. */
3183 maybe_internal_trap
bf9ae9d8 3184 = (low_supports_breakpoints ()
6bf5e0ba
PA
3185 && (WSTOPSIG (w) == SIGTRAP
3186 || ((WSTOPSIG (w) == SIGILL
3187 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3188 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3189
3190 if (maybe_internal_trap)
3191 {
3192 /* Handle anything that requires bookkeeping before deciding to
3193 report the event or continue waiting. */
3194
3195 /* First check if we can explain the SIGTRAP with an internal
3196 breakpoint, or if we should possibly report the event to GDB.
3197 Do this before anything that may remove or insert a
3198 breakpoint. */
3199 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3200
3201 /* We have a SIGTRAP, possibly a step-over dance has just
3202 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3203 reinsert breakpoints and delete any single-step
3204 breakpoints. */
6bf5e0ba
PA
3205 step_over_finished = finish_step_over (event_child);
3206
3207 /* Now invoke the callbacks of any internal breakpoints there. */
3208 check_breakpoints (event_child->stop_pc);
3209
219f2f23
PA
3210 /* Handle tracepoint data collecting. This may overflow the
3211 trace buffer, and cause a tracing stop, removing
3212 breakpoints. */
3213 trace_event = handle_tracepoints (event_child);
3214
6bf5e0ba
PA
3215 if (bp_explains_trap)
3216 {
6bf5e0ba 3217 if (debug_threads)
87ce2a04 3218 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3219 }
3220 }
3221 else
3222 {
3223 /* We have some other signal, possibly a step-over dance was in
3224 progress, and it should be cancelled too. */
3225 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3226 }
3227
3228 /* We have all the data we need. Either report the event to GDB, or
3229 resume threads and keep waiting for more. */
3230
3231 /* If we're collecting a fast tracepoint, finish the collection and
3232 move out of the jump pad before delivering a signal. See
3233 linux_stabilize_threads. */
3234
3235 if (WIFSTOPPED (w)
3236 && WSTOPSIG (w) != SIGTRAP
3237 && supports_fast_tracepoints ()
58b4daa5 3238 && agent_loaded_p ())
fa593d66
PA
3239 {
3240 if (debug_threads)
87ce2a04
DE
3241 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3242 "to defer or adjust it.\n",
0bfdf32f 3243 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3244
3245 /* Allow debugging the jump pad itself. */
0bfdf32f 3246 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3247 && maybe_move_out_of_jump_pad (event_child, &w))
3248 {
3249 enqueue_one_deferred_signal (event_child, &w);
3250
3251 if (debug_threads)
87ce2a04 3252 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3253 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3254
df95181f 3255 resume_one_lwp (event_child, 0, 0, NULL);
582511be 3256
edeeb602
YQ
3257 if (debug_threads)
3258 debug_exit ();
582511be 3259 return ignore_event (ourstatus);
fa593d66
PA
3260 }
3261 }
219f2f23 3262
229d26fc
SM
3263 if (event_child->collecting_fast_tracepoint
3264 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3265 {
3266 if (debug_threads)
87ce2a04
DE
3267 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3268 "Check if we're already there.\n",
0bfdf32f 3269 lwpid_of (current_thread),
229d26fc 3270 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3271
3272 trace_event = 1;
3273
3274 event_child->collecting_fast_tracepoint
3275 = linux_fast_tracepoint_collecting (event_child, NULL);
3276
229d26fc
SM
3277 if (event_child->collecting_fast_tracepoint
3278 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3279 {
3280 /* No longer need this breakpoint. */
3281 if (event_child->exit_jump_pad_bkpt != NULL)
3282 {
3283 if (debug_threads)
87ce2a04
DE
3284 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3285 "stopping all threads momentarily.\n");
fa593d66
PA
3286
3287 /* Other running threads could hit this breakpoint.
3288 We don't handle moribund locations like GDB does,
3289 instead we always pause all threads when removing
3290 breakpoints, so that any step-over or
3291 decr_pc_after_break adjustment is always taken
3292 care of while the breakpoint is still
3293 inserted. */
3294 stop_all_lwps (1, event_child);
fa593d66
PA
3295
3296 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3297 event_child->exit_jump_pad_bkpt = NULL;
3298
3299 unstop_all_lwps (1, event_child);
3300
3301 gdb_assert (event_child->suspended >= 0);
3302 }
3303 }
3304
229d26fc
SM
3305 if (event_child->collecting_fast_tracepoint
3306 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3307 {
3308 if (debug_threads)
87ce2a04
DE
3309 debug_printf ("fast tracepoint finished "
3310 "collecting successfully.\n");
fa593d66
PA
3311
3312 /* We may have a deferred signal to report. */
3313 if (dequeue_one_deferred_signal (event_child, &w))
3314 {
3315 if (debug_threads)
87ce2a04 3316 debug_printf ("dequeued one signal.\n");
fa593d66 3317 }
3c11dd79 3318 else
fa593d66 3319 {
3c11dd79 3320 if (debug_threads)
87ce2a04 3321 debug_printf ("no deferred signals.\n");
fa593d66
PA
3322
3323 if (stabilizing_threads)
3324 {
3325 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3326 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3327
3328 if (debug_threads)
3329 {
d16f3f6c 3330 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3331 "while stabilizing threads\n",
0bfdf32f 3332 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3333 debug_exit ();
3334 }
3335
0bfdf32f 3336 return ptid_of (current_thread);
fa593d66
PA
3337 }
3338 }
3339 }
6bf5e0ba
PA
3340 }
3341
e471f25b
PA
3342 /* Check whether GDB would be interested in this event. */
3343
82075af2
JS
3344 /* Check if GDB is interested in this syscall. */
3345 if (WIFSTOPPED (w)
3346 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3347 && !gdb_catch_this_syscall_p (event_child))
3348 {
3349 if (debug_threads)
3350 {
3351 debug_printf ("Ignored syscall for LWP %ld.\n",
3352 lwpid_of (current_thread));
3353 }
3354
df95181f 3355 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602
YQ
3356
3357 if (debug_threads)
3358 debug_exit ();
82075af2
JS
3359 return ignore_event (ourstatus);
3360 }
3361
e471f25b
PA
3362 /* If GDB is not interested in this signal, don't stop other
3363 threads, and don't report it to GDB. Just resume the inferior
3364 right away. We do this for threading-related signals as well as
3365 any that GDB specifically requested we ignore. But never ignore
3366 SIGSTOP if we sent it ourselves, and do not ignore signals when
3367 stepping - they may require special handling to skip the signal
c9587f88
AT
3368 handler. Also never ignore signals that could be caused by a
3369 breakpoint. */
e471f25b 3370 if (WIFSTOPPED (w)
0bfdf32f 3371 && current_thread->last_resume_kind != resume_step
e471f25b 3372 && (
1a981360 3373#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3374 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3375 && (WSTOPSIG (w) == __SIGRTMIN
3376 || WSTOPSIG (w) == __SIGRTMIN + 1))
3377 ||
3378#endif
c12a5089 3379 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3380 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3381 && current_thread->last_resume_kind == resume_stop)
3382 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3383 {
3384 siginfo_t info, *info_p;
3385
3386 if (debug_threads)
87ce2a04 3387 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3388 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3389
0bfdf32f 3390 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3391 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3392 info_p = &info;
3393 else
3394 info_p = NULL;
863d01bd
PA
3395
3396 if (step_over_finished)
3397 {
3398 /* We cancelled this thread's step-over above. We still
3399 need to unsuspend all other LWPs, and set them back
3400 running again while the signal handler runs. */
3401 unsuspend_all_lwps (event_child);
3402
3403 /* Enqueue the pending signal info so that proceed_all_lwps
3404 doesn't lose it. */
3405 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3406
3407 proceed_all_lwps ();
3408 }
3409 else
3410 {
df95181f
TBA
3411 resume_one_lwp (event_child, event_child->stepping,
3412 WSTOPSIG (w), info_p);
863d01bd 3413 }
edeeb602
YQ
3414
3415 if (debug_threads)
3416 debug_exit ();
3417
582511be 3418 return ignore_event (ourstatus);
e471f25b
PA
3419 }
3420
c2d6af84
PA
3421 /* Note that all addresses are always "out of the step range" when
3422 there's no range to begin with. */
3423 in_step_range = lwp_in_step_range (event_child);
3424
3425 /* If GDB wanted this thread to single step, and the thread is out
3426 of the step range, we always want to report the SIGTRAP, and let
3427 GDB handle it. Watchpoints should always be reported. So should
3428 signals we can't explain. A SIGTRAP we can't explain could be a
3429 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3430 do, we're be able to handle GDB breakpoints on top of internal
3431 breakpoints, by handling the internal breakpoint and still
3432 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3433 won't see the breakpoint hit. If we see a single-step event but
3434 the thread should be continuing, don't pass the trap to gdb.
3435 That indicates that we had previously finished a single-step but
3436 left the single-step pending -- see
3437 complete_ongoing_step_over. */
6bf5e0ba 3438 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3439 || (current_thread->last_resume_kind == resume_step
c2d6af84 3440 && !in_step_range)
15c66dd6 3441 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3442 || (!in_step_range
3443 && !bp_explains_trap
3444 && !trace_event
3445 && !step_over_finished
3446 && !(current_thread->last_resume_kind == resume_continue
3447 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3448 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3449 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3450 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3451 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3452
3453 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3454
3455 /* We found no reason GDB would want us to stop. We either hit one
3456 of our own breakpoints, or finished an internal step GDB
3457 shouldn't know about. */
3458 if (!report_to_gdb)
3459 {
3460 if (debug_threads)
3461 {
3462 if (bp_explains_trap)
87ce2a04 3463 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3464 if (step_over_finished)
87ce2a04 3465 debug_printf ("Step-over finished.\n");
219f2f23 3466 if (trace_event)
87ce2a04 3467 debug_printf ("Tracepoint event.\n");
c2d6af84 3468 if (lwp_in_step_range (event_child))
87ce2a04
DE
3469 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3470 paddress (event_child->stop_pc),
3471 paddress (event_child->step_range_start),
3472 paddress (event_child->step_range_end));
6bf5e0ba
PA
3473 }
3474
3475 /* We're not reporting this breakpoint to GDB, so apply the
3476 decr_pc_after_break adjustment to the inferior's regcache
3477 ourselves. */
3478
bf9ae9d8 3479 if (low_supports_breakpoints ())
6bf5e0ba
PA
3480 {
3481 struct regcache *regcache
0bfdf32f 3482 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3483 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3484 }
3485
7984d532 3486 if (step_over_finished)
e3652c84
YQ
3487 {
3488 /* If we have finished stepping over a breakpoint, we've
3489 stopped and suspended all LWPs momentarily except the
3490 stepping one. This is where we resume them all again.
3491 We're going to keep waiting, so use proceed, which
3492 handles stepping over the next breakpoint. */
3493 unsuspend_all_lwps (event_child);
3494 }
3495 else
3496 {
3497 /* Remove the single-step breakpoints if any. Note that
3498 there isn't single-step breakpoint if we finished stepping
3499 over. */
7582c77c 3500 if (supports_software_single_step ()
e3652c84
YQ
3501 && has_single_step_breakpoints (current_thread))
3502 {
3503 stop_all_lwps (0, event_child);
3504 delete_single_step_breakpoints (current_thread);
3505 unstop_all_lwps (0, event_child);
3506 }
3507 }
7984d532 3508
e3652c84
YQ
3509 if (debug_threads)
3510 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3511 proceed_all_lwps ();
edeeb602
YQ
3512
3513 if (debug_threads)
3514 debug_exit ();
3515
582511be 3516 return ignore_event (ourstatus);
6bf5e0ba
PA
3517 }
3518
3519 if (debug_threads)
3520 {
00db26fa 3521 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3522 {
23fdd69e
SM
3523 std::string str
3524 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3525
ad071a30 3526 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3527 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3528 }
0bfdf32f 3529 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3530 {
3531 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3532 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3533 else if (!lwp_in_step_range (event_child))
87ce2a04 3534 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3535 }
15c66dd6 3536 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3537 debug_printf ("Stopped by watchpoint.\n");
582511be 3538 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3539 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3540 if (debug_threads)
87ce2a04 3541 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3542 }
3543
3544 /* Alright, we're going to report a stop. */
3545
3b9a79ef 3546 /* Remove single-step breakpoints. */
7582c77c 3547 if (supports_software_single_step ())
8901d193 3548 {
3b9a79ef 3549 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3550 lwps, so that other threads won't hit the breakpoint in the
3551 staled memory. */
3b9a79ef 3552 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3553
3554 if (non_stop)
3555 {
3b9a79ef
YQ
3556 remove_single_step_breakpoints_p
3557 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3558 }
3559 else
3560 {
3561 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3562 requests. Delete all single-step breakpoints. */
8901d193 3563
9c80ecd6
SM
3564 find_thread ([&] (thread_info *thread) {
3565 if (has_single_step_breakpoints (thread))
3566 {
3567 remove_single_step_breakpoints_p = 1;
3568 return true;
3569 }
8901d193 3570
9c80ecd6
SM
3571 return false;
3572 });
8901d193
YQ
3573 }
3574
3b9a79ef 3575 if (remove_single_step_breakpoints_p)
8901d193 3576 {
3b9a79ef 3577 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3578 so that other threads won't hit the breakpoint in the staled
3579 memory. */
3580 stop_all_lwps (0, event_child);
3581
3582 if (non_stop)
3583 {
3b9a79ef
YQ
3584 gdb_assert (has_single_step_breakpoints (current_thread));
3585 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3586 }
3587 else
3588 {
9c80ecd6
SM
3589 for_each_thread ([] (thread_info *thread){
3590 if (has_single_step_breakpoints (thread))
3591 delete_single_step_breakpoints (thread);
3592 });
8901d193
YQ
3593 }
3594
3595 unstop_all_lwps (0, event_child);
3596 }
3597 }
3598
582511be 3599 if (!stabilizing_threads)
6bf5e0ba
PA
3600 {
3601 /* In all-stop, stop all threads. */
582511be
PA
3602 if (!non_stop)
3603 stop_all_lwps (0, NULL);
6bf5e0ba 3604
c03e6ccc 3605 if (step_over_finished)
582511be
PA
3606 {
3607 if (!non_stop)
3608 {
3609 /* If we were doing a step-over, all other threads but
3610 the stepping one had been paused in start_step_over,
3611 with their suspend counts incremented. We don't want
3612 to do a full unstop/unpause, because we're in
3613 all-stop mode (so we want threads stopped), but we
3614 still need to unsuspend the other threads, to
3615 decrement their `suspended' count back. */
3616 unsuspend_all_lwps (event_child);
3617 }
3618 else
3619 {
3620 /* If we just finished a step-over, then all threads had
3621 been momentarily paused. In all-stop, that's fine,
3622 we want threads stopped by now anyway. In non-stop,
3623 we need to re-resume threads that GDB wanted to be
3624 running. */
3625 unstop_all_lwps (1, event_child);
3626 }
3627 }
c03e6ccc 3628
3aa5cfa0
AT
3629 /* If we're not waiting for a specific LWP, choose an event LWP
3630 from among those that have had events. Giving equal priority
3631 to all LWPs that have had events helps prevent
3632 starvation. */
d7e15655 3633 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3634 {
3635 event_child->status_pending_p = 1;
3636 event_child->status_pending = w;
3637
3638 select_event_lwp (&event_child);
3639
3640 /* current_thread and event_child must stay in sync. */
3641 current_thread = get_lwp_thread (event_child);
3642
3643 event_child->status_pending_p = 0;
3644 w = event_child->status_pending;
3645 }
3646
3647
fa593d66 3648 /* Stabilize threads (move out of jump pads). */
582511be 3649 if (!non_stop)
5c9eb2f2 3650 target_stabilize_threads ();
6bf5e0ba
PA
3651 }
3652 else
3653 {
3654 /* If we just finished a step-over, then all threads had been
3655 momentarily paused. In all-stop, that's fine, we want
3656 threads stopped by now anyway. In non-stop, we need to
3657 re-resume threads that GDB wanted to be running. */
3658 if (step_over_finished)
7984d532 3659 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3660 }
3661
00db26fa 3662 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3663 {
00db26fa
PA
3664 /* If the reported event is an exit, fork, vfork or exec, let
3665 GDB know. */
5a04c4cf
PA
3666
3667 /* Break the unreported fork relationship chain. */
3668 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3669 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3670 {
3671 event_child->fork_relative->fork_relative = NULL;
3672 event_child->fork_relative = NULL;
3673 }
3674
00db26fa 3675 *ourstatus = event_child->waitstatus;
de0d863e
DB
3676 /* Clear the event lwp's waitstatus since we handled it already. */
3677 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3678 }
3679 else
3680 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3681
582511be 3682 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3683 it was a software breakpoint, and the client doesn't know we can
3684 adjust the breakpoint ourselves. */
3685 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3686 && !cs.swbreak_feature)
582511be 3687 {
d4807ea2 3688 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3689
3690 if (decr_pc != 0)
3691 {
3692 struct regcache *regcache
3693 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3694 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3695 }
3696 }
3697
82075af2
JS
3698 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3699 {
82075af2 3700 get_syscall_trapinfo (event_child,
4cc32bec 3701 &ourstatus->value.syscall_number);
82075af2
JS
3702 ourstatus->kind = event_child->syscall_state;
3703 }
3704 else if (current_thread->last_resume_kind == resume_stop
3705 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3706 {
3707 /* A thread that has been requested to stop by GDB with vCont;t,
3708 and it stopped cleanly, so report as SIG0. The use of
3709 SIGSTOP is an implementation detail. */
a493e3e2 3710 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3711 }
0bfdf32f 3712 else if (current_thread->last_resume_kind == resume_stop
8336d594 3713 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3714 {
3715 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3716 but, it stopped for other reasons. */
2ea28649 3717 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3718 }
de0d863e 3719 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3720 {
2ea28649 3721 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3722 }
3723
d7e15655 3724 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3725
bd99dc85 3726 if (debug_threads)
87ce2a04 3727 {
d16f3f6c 3728 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3729 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3730 ourstatus->kind, ourstatus->value.sig);
3731 debug_exit ();
3732 }
bd99dc85 3733
65706a29
PA
3734 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3735 return filter_exit_event (event_child, ourstatus);
3736
0bfdf32f 3737 return ptid_of (current_thread);
bd99dc85
PA
3738}
3739
3740/* Get rid of any pending event in the pipe. */
3741static void
3742async_file_flush (void)
3743{
3744 int ret;
3745 char buf;
3746
3747 do
3748 ret = read (linux_event_pipe[0], &buf, 1);
3749 while (ret >= 0 || (ret == -1 && errno == EINTR));
3750}
3751
3752/* Put something in the pipe, so the event loop wakes up. */
3753static void
3754async_file_mark (void)
3755{
3756 int ret;
3757
3758 async_file_flush ();
3759
3760 do
3761 ret = write (linux_event_pipe[1], "+", 1);
3762 while (ret == 0 || (ret == -1 && errno == EINTR));
3763
3764 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3765 be awakened anyway. */
3766}
3767
6532e7e3
TBA
3768ptid_t
3769linux_process_target::wait (ptid_t ptid,
3770 target_waitstatus *ourstatus,
3771 int target_options)
bd99dc85 3772{
95954743 3773 ptid_t event_ptid;
bd99dc85 3774
bd99dc85
PA
3775 /* Flush the async file first. */
3776 if (target_is_async_p ())
3777 async_file_flush ();
3778
582511be
PA
3779 do
3780 {
d16f3f6c 3781 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3782 }
3783 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3784 && event_ptid == null_ptid
582511be 3785 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3786
3787 /* If at least one stop was reported, there may be more. A single
3788 SIGCHLD can signal more than one child stop. */
3789 if (target_is_async_p ()
3790 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3791 && event_ptid != null_ptid)
bd99dc85
PA
3792 async_file_mark ();
3793
3794 return event_ptid;
da6d8c04
DJ
3795}
3796
c5f62d5f 3797/* Send a signal to an LWP. */
fd500816
DJ
3798
3799static int
a1928bad 3800kill_lwp (unsigned long lwpid, int signo)
fd500816 3801{
4a6ed09b 3802 int ret;
fd500816 3803
4a6ed09b
PA
3804 errno = 0;
3805 ret = syscall (__NR_tkill, lwpid, signo);
3806 if (errno == ENOSYS)
3807 {
3808 /* If tkill fails, then we are not using nptl threads, a
3809 configuration we no longer support. */
3810 perror_with_name (("tkill"));
3811 }
3812 return ret;
fd500816
DJ
3813}
3814
964e4306
PA
3815void
3816linux_stop_lwp (struct lwp_info *lwp)
3817{
3818 send_sigstop (lwp);
3819}
3820
0d62e5e8 3821static void
02fc4de7 3822send_sigstop (struct lwp_info *lwp)
0d62e5e8 3823{
bd99dc85 3824 int pid;
0d62e5e8 3825
d86d4aaf 3826 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3827
0d62e5e8
DJ
3828 /* If we already have a pending stop signal for this process, don't
3829 send another. */
54a0b537 3830 if (lwp->stop_expected)
0d62e5e8 3831 {
ae13219e 3832 if (debug_threads)
87ce2a04 3833 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3834
0d62e5e8
DJ
3835 return;
3836 }
3837
3838 if (debug_threads)
87ce2a04 3839 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3840
d50171e4 3841 lwp->stop_expected = 1;
bd99dc85 3842 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3843}
3844
df3e4dbe
SM
3845static void
3846send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3847{
d86d4aaf 3848 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3849
7984d532
PA
3850 /* Ignore EXCEPT. */
3851 if (lwp == except)
df3e4dbe 3852 return;
7984d532 3853
02fc4de7 3854 if (lwp->stopped)
df3e4dbe 3855 return;
02fc4de7
PA
3856
3857 send_sigstop (lwp);
7984d532
PA
3858}
3859
3860/* Increment the suspend count of an LWP, and stop it, if not stopped
3861 yet. */
df3e4dbe
SM
3862static void
3863suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3864{
d86d4aaf 3865 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3866
3867 /* Ignore EXCEPT. */
3868 if (lwp == except)
df3e4dbe 3869 return;
7984d532 3870
863d01bd 3871 lwp_suspended_inc (lwp);
7984d532 3872
df3e4dbe 3873 send_sigstop (thread, except);
02fc4de7
PA
3874}
3875
95954743
PA
3876static void
3877mark_lwp_dead (struct lwp_info *lwp, int wstat)
3878{
95954743
PA
3879 /* Store the exit status for later. */
3880 lwp->status_pending_p = 1;
3881 lwp->status_pending = wstat;
3882
00db26fa
PA
3883 /* Store in waitstatus as well, as there's nothing else to process
3884 for this event. */
3885 if (WIFEXITED (wstat))
3886 {
3887 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3888 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3889 }
3890 else if (WIFSIGNALED (wstat))
3891 {
3892 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3893 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3894 }
3895
95954743
PA
3896 /* Prevent trying to stop it. */
3897 lwp->stopped = 1;
3898
3899 /* No further stops are expected from a dead lwp. */
3900 lwp->stop_expected = 0;
3901}
3902
00db26fa
PA
3903/* Return true if LWP has exited already, and has a pending exit event
3904 to report to GDB. */
3905
3906static int
3907lwp_is_marked_dead (struct lwp_info *lwp)
3908{
3909 return (lwp->status_pending_p
3910 && (WIFEXITED (lwp->status_pending)
3911 || WIFSIGNALED (lwp->status_pending)));
3912}
3913
d16f3f6c
TBA
3914void
3915linux_process_target::wait_for_sigstop ()
0d62e5e8 3916{
0bfdf32f 3917 struct thread_info *saved_thread;
95954743 3918 ptid_t saved_tid;
fa96cb38
PA
3919 int wstat;
3920 int ret;
0d62e5e8 3921
0bfdf32f
GB
3922 saved_thread = current_thread;
3923 if (saved_thread != NULL)
9c80ecd6 3924 saved_tid = saved_thread->id;
bd99dc85 3925 else
95954743 3926 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3927
d50171e4 3928 if (debug_threads)
fa96cb38 3929 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3930
fa96cb38
PA
3931 /* Passing NULL_PTID as filter indicates we want all events to be
3932 left pending. Eventually this returns when there are no
3933 unwaited-for children left. */
d16f3f6c 3934 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3935 gdb_assert (ret == -1);
0d62e5e8 3936
13d3d99b 3937 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3938 current_thread = saved_thread;
0d62e5e8
DJ
3939 else
3940 {
3941 if (debug_threads)
87ce2a04 3942 debug_printf ("Previously current thread died.\n");
0d62e5e8 3943
f0db101d
PA
3944 /* We can't change the current inferior behind GDB's back,
3945 otherwise, a subsequent command may apply to the wrong
3946 process. */
3947 current_thread = NULL;
0d62e5e8
DJ
3948 }
3949}
3950
fcb056a5 3951/* Returns true if THREAD is stopped in a jump pad, and we can't
fa593d66
PA
3952 move it out, because we need to report the stop event to GDB. For
3953 example, if the user puts a breakpoint in the jump pad, it's
3954 because she wants to debug it. */
3955
fcb056a5
SM
3956static bool
3957stuck_in_jump_pad_callback (thread_info *thread)
fa593d66 3958{
d86d4aaf 3959 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3960
863d01bd
PA
3961 if (lwp->suspended != 0)
3962 {
3963 internal_error (__FILE__, __LINE__,
3964 "LWP %ld is suspended, suspended=%d\n",
3965 lwpid_of (thread), lwp->suspended);
3966 }
fa593d66
PA
3967 gdb_assert (lwp->stopped);
3968
3969 /* Allow debugging the jump pad, gdb_collect, etc.. */
3970 return (supports_fast_tracepoints ()
58b4daa5 3971 && agent_loaded_p ()
fa593d66 3972 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3973 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3974 || thread->last_resume_kind == resume_step)
229d26fc
SM
3975 && (linux_fast_tracepoint_collecting (lwp, NULL)
3976 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3977}
3978
d16f3f6c
TBA
3979void
3980linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3981{
f0ce0d3a 3982 struct thread_info *saved_thread;
d86d4aaf 3983 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3984 int *wstat;
3985
863d01bd
PA
3986 if (lwp->suspended != 0)
3987 {
3988 internal_error (__FILE__, __LINE__,
3989 "LWP %ld is suspended, suspended=%d\n",
3990 lwpid_of (thread), lwp->suspended);
3991 }
fa593d66
PA
3992 gdb_assert (lwp->stopped);
3993
f0ce0d3a
PA
3994 /* For gdb_breakpoint_here. */
3995 saved_thread = current_thread;
3996 current_thread = thread;
3997
fa593d66
PA
3998 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3999
4000 /* Allow debugging the jump pad, gdb_collect, etc. */
4001 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4002 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4003 && thread->last_resume_kind != resume_step
4004 && maybe_move_out_of_jump_pad (lwp, wstat))
4005 {
4006 if (debug_threads)
87ce2a04 4007 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4008 lwpid_of (thread));
fa593d66
PA
4009
4010 if (wstat)
4011 {
4012 lwp->status_pending_p = 0;
4013 enqueue_one_deferred_signal (lwp, wstat);
4014
4015 if (debug_threads)
87ce2a04
DE
4016 debug_printf ("Signal %d for LWP %ld deferred "
4017 "(in jump pad)\n",
d86d4aaf 4018 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4019 }
4020
df95181f 4021 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
4022 }
4023 else
863d01bd 4024 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4025
4026 current_thread = saved_thread;
fa593d66
PA
4027}
4028
5a6b0a41
SM
4029static bool
4030lwp_running (thread_info *thread)
fa593d66 4031{
d86d4aaf 4032 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4033
00db26fa 4034 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4035 return false;
4036
4037 return !lwp->stopped;
fa593d66
PA
4038}
4039
d16f3f6c
TBA
4040void
4041linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 4042{
bde24c0a
PA
4043 /* Should not be called recursively. */
4044 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4045
87ce2a04
DE
4046 if (debug_threads)
4047 {
4048 debug_enter ();
4049 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4050 suspend ? "stop-and-suspend" : "stop",
4051 except != NULL
d86d4aaf 4052 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4053 : "none");
4054 }
4055
bde24c0a
PA
4056 stopping_threads = (suspend
4057 ? STOPPING_AND_SUSPENDING_THREADS
4058 : STOPPING_THREADS);
7984d532
PA
4059
4060 if (suspend)
df3e4dbe
SM
4061 for_each_thread ([&] (thread_info *thread)
4062 {
4063 suspend_and_send_sigstop (thread, except);
4064 });
7984d532 4065 else
df3e4dbe
SM
4066 for_each_thread ([&] (thread_info *thread)
4067 {
4068 send_sigstop (thread, except);
4069 });
4070
fa96cb38 4071 wait_for_sigstop ();
bde24c0a 4072 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4073
4074 if (debug_threads)
4075 {
4076 debug_printf ("stop_all_lwps done, setting stopping_threads "
4077 "back to !stopping\n");
4078 debug_exit ();
4079 }
0d62e5e8
DJ
4080}
4081
863d01bd
PA
4082/* Enqueue one signal in the chain of signals which need to be
4083 delivered to this process on next resume. */
4084
4085static void
4086enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4087{
8d749320 4088 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4089
863d01bd
PA
4090 p_sig->prev = lwp->pending_signals;
4091 p_sig->signal = signal;
4092 if (info == NULL)
4093 memset (&p_sig->info, 0, sizeof (siginfo_t));
4094 else
4095 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4096 lwp->pending_signals = p_sig;
4097}
4098
df95181f
TBA
4099void
4100linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4101{
984a2c04
YQ
4102 struct thread_info *thread = get_lwp_thread (lwp);
4103 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4104
4105 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4106
984a2c04 4107 current_thread = thread;
7582c77c 4108 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4109
a0ff9e1a 4110 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4111 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4112}
4113
df95181f
TBA
4114int
4115linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4116{
4117 int step = 0;
4118
4119 if (can_hardware_single_step ())
4120 {
4121 step = 1;
4122 }
7582c77c 4123 else if (supports_software_single_step ())
7fe5e27e
AT
4124 {
4125 install_software_single_step_breakpoints (lwp);
4126 step = 0;
4127 }
4128 else
4129 {
4130 if (debug_threads)
4131 debug_printf ("stepping is not implemented on this target");
4132 }
4133
4134 return step;
4135}
4136
35ac8b3e 4137/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4138 finish a fast tracepoint collect. Since signal can be delivered in
4139 the step-over, the program may go to signal handler and trap again
4140 after return from the signal handler. We can live with the spurious
4141 double traps. */
35ac8b3e
YQ
4142
4143static int
4144lwp_signal_can_be_delivered (struct lwp_info *lwp)
4145{
229d26fc
SM
4146 return (lwp->collecting_fast_tracepoint
4147 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4148}
4149
df95181f
TBA
4150void
4151linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4152 int signal, siginfo_t *info)
da6d8c04 4153{
d86d4aaf 4154 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4155 struct thread_info *saved_thread;
82075af2 4156 int ptrace_request;
c06cbd92
YQ
4157 struct process_info *proc = get_thread_process (thread);
4158
4159 /* Note that target description may not be initialised
4160 (proc->tdesc == NULL) at this point because the program hasn't
4161 stopped at the first instruction yet. It means GDBserver skips
4162 the extra traps from the wrapper program (see option --wrapper).
4163 Code in this function that requires register access should be
4164 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4165
54a0b537 4166 if (lwp->stopped == 0)
0d62e5e8
DJ
4167 return;
4168
65706a29
PA
4169 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4170
229d26fc
SM
4171 fast_tpoint_collect_result fast_tp_collecting
4172 = lwp->collecting_fast_tracepoint;
fa593d66 4173
229d26fc
SM
4174 gdb_assert (!stabilizing_threads
4175 || (fast_tp_collecting
4176 != fast_tpoint_collect_result::not_collecting));
fa593d66 4177
219f2f23
PA
4178 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4179 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4180 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4181 {
4182 /* Collecting 'while-stepping' actions doesn't make sense
4183 anymore. */
d86d4aaf 4184 release_while_stepping_state_list (thread);
219f2f23
PA
4185 }
4186
0d62e5e8 4187 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4188 signal. Also enqueue the signal if it can't be delivered to the
4189 inferior right now. */
0d62e5e8 4190 if (signal != 0
fa593d66
PA
4191 && (lwp->status_pending_p
4192 || lwp->pending_signals != NULL
35ac8b3e 4193 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4194 {
4195 enqueue_pending_signal (lwp, signal, info);
4196
4197 /* Postpone any pending signal. It was enqueued above. */
4198 signal = 0;
4199 }
0d62e5e8 4200
d50171e4
PA
4201 if (lwp->status_pending_p)
4202 {
4203 if (debug_threads)
94610ec4 4204 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4205 " has pending status\n",
94610ec4 4206 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4207 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4208 return;
4209 }
0d62e5e8 4210
0bfdf32f
GB
4211 saved_thread = current_thread;
4212 current_thread = thread;
0d62e5e8 4213
0d62e5e8
DJ
4214 /* This bit needs some thinking about. If we get a signal that
4215 we must report while a single-step reinsert is still pending,
4216 we often end up resuming the thread. It might be better to
4217 (ew) allow a stack of pending events; then we could be sure that
4218 the reinsert happened right away and not lose any signals.
4219
4220 Making this stack would also shrink the window in which breakpoints are
54a0b537 4221 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4222 complete correctness, so it won't solve that problem. It may be
4223 worthwhile just to solve this one, however. */
54a0b537 4224 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4225 {
4226 if (debug_threads)
87ce2a04
DE
4227 debug_printf (" pending reinsert at 0x%s\n",
4228 paddress (lwp->bp_reinsert));
d50171e4 4229
85e00e85 4230 if (can_hardware_single_step ())
d50171e4 4231 {
229d26fc 4232 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4233 {
4234 if (step == 0)
9986ba08 4235 warning ("BAD - reinserting but not stepping.");
fa593d66 4236 if (lwp->suspended)
9986ba08
PA
4237 warning ("BAD - reinserting and suspended(%d).",
4238 lwp->suspended);
fa593d66 4239 }
d50171e4 4240 }
f79b145d
YQ
4241
4242 step = maybe_hw_step (thread);
0d62e5e8
DJ
4243 }
4244
229d26fc 4245 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4246 {
4247 if (debug_threads)
87ce2a04
DE
4248 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4249 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4250 lwpid_of (thread));
fa593d66 4251 }
229d26fc 4252 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4253 {
4254 if (debug_threads)
87ce2a04
DE
4255 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4256 " single-stepping\n",
d86d4aaf 4257 lwpid_of (thread));
fa593d66
PA
4258
4259 if (can_hardware_single_step ())
4260 step = 1;
4261 else
38e08fca
GB
4262 {
4263 internal_error (__FILE__, __LINE__,
4264 "moving out of jump pad single-stepping"
4265 " not implemented on this target");
4266 }
fa593d66
PA
4267 }
4268
219f2f23
PA
4269 /* If we have while-stepping actions in this thread set it stepping.
4270 If we have a signal to deliver, it may or may not be set to
4271 SIG_IGN, we don't know. Assume so, and allow collecting
4272 while-stepping into a signal handler. A possible smart thing to
4273 do would be to set an internal breakpoint at the signal return
4274 address, continue, and carry on catching this while-stepping
4275 action only when that breakpoint is hit. A future
4276 enhancement. */
7fe5e27e 4277 if (thread->while_stepping != NULL)
219f2f23
PA
4278 {
4279 if (debug_threads)
87ce2a04 4280 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4281 lwpid_of (thread));
7fe5e27e
AT
4282
4283 step = single_step (lwp);
219f2f23
PA
4284 }
4285
bf9ae9d8 4286 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4287 {
0bfdf32f 4288 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4289
bf9ae9d8 4290 lwp->stop_pc = low_get_pc (regcache);
582511be
PA
4291
4292 if (debug_threads)
4293 {
4294 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4295 (long) lwp->stop_pc);
4296 }
0d62e5e8
DJ
4297 }
4298
35ac8b3e
YQ
4299 /* If we have pending signals, consume one if it can be delivered to
4300 the inferior. */
4301 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4302 {
4303 struct pending_signals **p_sig;
4304
54a0b537 4305 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4306 while ((*p_sig)->prev != NULL)
4307 p_sig = &(*p_sig)->prev;
4308
4309 signal = (*p_sig)->signal;
32ca6d61 4310 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4311 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4312 &(*p_sig)->info);
32ca6d61 4313
0d62e5e8
DJ
4314 free (*p_sig);
4315 *p_sig = NULL;
4316 }
4317
94610ec4
YQ
4318 if (debug_threads)
4319 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4320 lwpid_of (thread), step ? "step" : "continue", signal,
4321 lwp->stop_expected ? "expected" : "not expected");
4322
aa5ca48f
DE
4323 if (the_low_target.prepare_to_resume != NULL)
4324 the_low_target.prepare_to_resume (lwp);
4325
d86d4aaf 4326 regcache_invalidate_thread (thread);
da6d8c04 4327 errno = 0;
54a0b537 4328 lwp->stepping = step;
82075af2
JS
4329 if (step)
4330 ptrace_request = PTRACE_SINGLESTEP;
4331 else if (gdb_catching_syscalls_p (lwp))
4332 ptrace_request = PTRACE_SYSCALL;
4333 else
4334 ptrace_request = PTRACE_CONT;
4335 ptrace (ptrace_request,
4336 lwpid_of (thread),
b8e1b30e 4337 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4338 /* Coerce to a uintptr_t first to avoid potential gcc warning
4339 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4340 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4341
0bfdf32f 4342 current_thread = saved_thread;
da6d8c04 4343 if (errno)
23f238d3
PA
4344 perror_with_name ("resuming thread");
4345
4346 /* Successfully resumed. Clear state that no longer makes sense,
4347 and mark the LWP as running. Must not do this before resuming
4348 otherwise if that fails other code will be confused. E.g., we'd
4349 later try to stop the LWP and hang forever waiting for a stop
4350 status. Note that we must not throw after this is cleared,
4351 otherwise handle_zombie_lwp_error would get confused. */
4352 lwp->stopped = 0;
4353 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4354}
4355
4356/* Called when we try to resume a stopped LWP and that errors out. If
4357 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4358 or about to become), discard the error, clear any pending status
4359 the LWP may have, and return true (we'll collect the exit status
4360 soon enough). Otherwise, return false. */
4361
4362static int
4363check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4364{
4365 struct thread_info *thread = get_lwp_thread (lp);
4366
4367 /* If we get an error after resuming the LWP successfully, we'd
4368 confuse !T state for the LWP being gone. */
4369 gdb_assert (lp->stopped);
4370
4371 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4372 because even if ptrace failed with ESRCH, the tracee may be "not
4373 yet fully dead", but already refusing ptrace requests. In that
4374 case the tracee has 'R (Running)' state for a little bit
4375 (observed in Linux 3.18). See also the note on ESRCH in the
4376 ptrace(2) man page. Instead, check whether the LWP has any state
4377 other than ptrace-stopped. */
4378
4379 /* Don't assume anything if /proc/PID/status can't be read. */
4380 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4381 {
23f238d3
PA
4382 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4383 lp->status_pending_p = 0;
4384 return 1;
4385 }
4386 return 0;
4387}
4388
df95181f
TBA
4389void
4390linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4391 siginfo_t *info)
23f238d3 4392{
a70b8144 4393 try
23f238d3 4394 {
df95181f 4395 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4396 }
230d2906 4397 catch (const gdb_exception_error &ex)
23f238d3
PA
4398 {
4399 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4400 throw;
3221518c 4401 }
da6d8c04
DJ
4402}
4403
5fdda392
SM
4404/* This function is called once per thread via for_each_thread.
4405 We look up which resume request applies to THREAD and mark it with a
4406 pointer to the appropriate resume request.
5544ad89
DJ
4407
4408 This algorithm is O(threads * resume elements), but resume elements
4409 is small (and will remain small at least until GDB supports thread
4410 suspension). */
ebcf782c 4411
5fdda392
SM
4412static void
4413linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4414{
d86d4aaf 4415 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4416
5fdda392 4417 for (int ndx = 0; ndx < n; ndx++)
95954743 4418 {
5fdda392 4419 ptid_t ptid = resume[ndx].thread;
d7e15655 4420 if (ptid == minus_one_ptid
9c80ecd6 4421 || ptid == thread->id
0c9070b3
YQ
4422 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4423 of PID'. */
e99b03dc 4424 || (ptid.pid () == pid_of (thread)
0e998d96 4425 && (ptid.is_pid ()
e38504b3 4426 || ptid.lwp () == -1)))
95954743 4427 {
5fdda392 4428 if (resume[ndx].kind == resume_stop
8336d594 4429 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4430 {
4431 if (debug_threads)
87ce2a04
DE
4432 debug_printf ("already %s LWP %ld at GDB's request\n",
4433 (thread->last_status.kind
4434 == TARGET_WAITKIND_STOPPED)
4435 ? "stopped"
4436 : "stopping",
d86d4aaf 4437 lwpid_of (thread));
d50171e4
PA
4438
4439 continue;
4440 }
4441
5a04c4cf
PA
4442 /* Ignore (wildcard) resume requests for already-resumed
4443 threads. */
5fdda392 4444 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4445 && thread->last_resume_kind != resume_stop)
4446 {
4447 if (debug_threads)
4448 debug_printf ("already %s LWP %ld at GDB's request\n",
4449 (thread->last_resume_kind
4450 == resume_step)
4451 ? "stepping"
4452 : "continuing",
4453 lwpid_of (thread));
4454 continue;
4455 }
4456
4457 /* Don't let wildcard resumes resume fork children that GDB
4458 does not yet know are new fork children. */
4459 if (lwp->fork_relative != NULL)
4460 {
5a04c4cf
PA
4461 struct lwp_info *rel = lwp->fork_relative;
4462
4463 if (rel->status_pending_p
4464 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4465 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4466 {
4467 if (debug_threads)
4468 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4469 lwpid_of (thread));
4470 continue;
4471 }
4472 }
4473
4474 /* If the thread has a pending event that has already been
4475 reported to GDBserver core, but GDB has not pulled the
4476 event out of the vStopped queue yet, likewise, ignore the
4477 (wildcard) resume request. */
9c80ecd6 4478 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4479 {
4480 if (debug_threads)
4481 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4482 lwpid_of (thread));
4483 continue;
4484 }
4485
5fdda392 4486 lwp->resume = &resume[ndx];
8336d594 4487 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4488
c2d6af84
PA
4489 lwp->step_range_start = lwp->resume->step_range_start;
4490 lwp->step_range_end = lwp->resume->step_range_end;
4491
fa593d66
PA
4492 /* If we had a deferred signal to report, dequeue one now.
4493 This can happen if LWP gets more than one signal while
4494 trying to get out of a jump pad. */
4495 if (lwp->stopped
4496 && !lwp->status_pending_p
4497 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4498 {
4499 lwp->status_pending_p = 1;
4500
4501 if (debug_threads)
87ce2a04
DE
4502 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4503 "leaving status pending.\n",
d86d4aaf
DE
4504 WSTOPSIG (lwp->status_pending),
4505 lwpid_of (thread));
fa593d66
PA
4506 }
4507
5fdda392 4508 return;
95954743
PA
4509 }
4510 }
2bd7c093
PA
4511
4512 /* No resume action for this thread. */
4513 lwp->resume = NULL;
5544ad89
DJ
4514}
4515
df95181f
TBA
4516bool
4517linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4518{
d86d4aaf 4519 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4520
bd99dc85
PA
4521 /* LWPs which will not be resumed are not interesting, because
4522 we might not wait for them next time through linux_wait. */
2bd7c093 4523 if (lwp->resume == NULL)
25c28b4d 4524 return false;
64386c31 4525
df95181f 4526 return thread_still_has_status_pending (thread);
d50171e4
PA
4527}
4528
df95181f
TBA
4529bool
4530linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4531{
d86d4aaf 4532 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4533 struct thread_info *saved_thread;
d50171e4 4534 CORE_ADDR pc;
c06cbd92
YQ
4535 struct process_info *proc = get_thread_process (thread);
4536
4537 /* GDBserver is skipping the extra traps from the wrapper program,
4538 don't have to do step over. */
4539 if (proc->tdesc == NULL)
eca55aec 4540 return false;
d50171e4
PA
4541
4542 /* LWPs which will not be resumed are not interesting, because we
4543 might not wait for them next time through linux_wait. */
4544
4545 if (!lwp->stopped)
4546 {
4547 if (debug_threads)
87ce2a04 4548 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4549 lwpid_of (thread));
eca55aec 4550 return false;
d50171e4
PA
4551 }
4552
8336d594 4553 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4554 {
4555 if (debug_threads)
87ce2a04
DE
4556 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4557 " stopped\n",
d86d4aaf 4558 lwpid_of (thread));
eca55aec 4559 return false;
d50171e4
PA
4560 }
4561
7984d532
PA
4562 gdb_assert (lwp->suspended >= 0);
4563
4564 if (lwp->suspended)
4565 {
4566 if (debug_threads)
87ce2a04 4567 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4568 lwpid_of (thread));
eca55aec 4569 return false;
7984d532
PA
4570 }
4571
bd99dc85 4572 if (lwp->status_pending_p)
d50171e4
PA
4573 {
4574 if (debug_threads)
87ce2a04
DE
4575 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4576 " status.\n",
d86d4aaf 4577 lwpid_of (thread));
eca55aec 4578 return false;
d50171e4
PA
4579 }
4580
4581 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4582 or we have. */
4583 pc = get_pc (lwp);
4584
4585 /* If the PC has changed since we stopped, then don't do anything,
4586 and let the breakpoint/tracepoint be hit. This happens if, for
4587 instance, GDB handled the decr_pc_after_break subtraction itself,
4588 GDB is OOL stepping this thread, or the user has issued a "jump"
4589 command, or poked thread's registers herself. */
4590 if (pc != lwp->stop_pc)
4591 {
4592 if (debug_threads)
87ce2a04
DE
4593 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4594 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4595 lwpid_of (thread),
4596 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4597 return false;
d50171e4
PA
4598 }
4599
484b3c32
YQ
4600 /* On software single step target, resume the inferior with signal
4601 rather than stepping over. */
7582c77c 4602 if (supports_software_single_step ()
484b3c32
YQ
4603 && lwp->pending_signals != NULL
4604 && lwp_signal_can_be_delivered (lwp))
4605 {
4606 if (debug_threads)
4607 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4608 " signals.\n",
4609 lwpid_of (thread));
4610
eca55aec 4611 return false;
484b3c32
YQ
4612 }
4613
0bfdf32f
GB
4614 saved_thread = current_thread;
4615 current_thread = thread;
d50171e4 4616
8b07ae33 4617 /* We can only step over breakpoints we know about. */
fa593d66 4618 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4619 {
8b07ae33 4620 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4621 though. If the condition is being evaluated on the target's side
4622 and it evaluate to false, step over this breakpoint as well. */
4623 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4624 && gdb_condition_true_at_breakpoint (pc)
4625 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4626 {
4627 if (debug_threads)
87ce2a04
DE
4628 debug_printf ("Need step over [LWP %ld]? yes, but found"
4629 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4630 lwpid_of (thread), paddress (pc));
d50171e4 4631
0bfdf32f 4632 current_thread = saved_thread;
eca55aec 4633 return false;
8b07ae33
PA
4634 }
4635 else
4636 {
4637 if (debug_threads)
87ce2a04
DE
4638 debug_printf ("Need step over [LWP %ld]? yes, "
4639 "found breakpoint at 0x%s\n",
d86d4aaf 4640 lwpid_of (thread), paddress (pc));
d50171e4 4641
8b07ae33 4642 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4643 that find_thread stops looking. */
0bfdf32f 4644 current_thread = saved_thread;
8b07ae33 4645
eca55aec 4646 return true;
8b07ae33 4647 }
d50171e4
PA
4648 }
4649
0bfdf32f 4650 current_thread = saved_thread;
d50171e4
PA
4651
4652 if (debug_threads)
87ce2a04
DE
4653 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4654 " at 0x%s\n",
d86d4aaf 4655 lwpid_of (thread), paddress (pc));
c6ecbae5 4656
eca55aec 4657 return false;
5544ad89
DJ
4658}
4659
d16f3f6c
TBA
4660void
4661linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4662{
d86d4aaf 4663 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4664 struct thread_info *saved_thread;
d50171e4
PA
4665 CORE_ADDR pc;
4666 int step;
4667
4668 if (debug_threads)
87ce2a04 4669 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4670 lwpid_of (thread));
d50171e4 4671
7984d532 4672 stop_all_lwps (1, lwp);
863d01bd
PA
4673
4674 if (lwp->suspended != 0)
4675 {
4676 internal_error (__FILE__, __LINE__,
4677 "LWP %ld suspended=%d\n", lwpid_of (thread),
4678 lwp->suspended);
4679 }
d50171e4
PA
4680
4681 if (debug_threads)
87ce2a04 4682 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4683
4684 /* Note, we should always reach here with an already adjusted PC,
4685 either by GDB (if we're resuming due to GDB's request), or by our
4686 caller, if we just finished handling an internal breakpoint GDB
4687 shouldn't care about. */
4688 pc = get_pc (lwp);
4689
0bfdf32f
GB
4690 saved_thread = current_thread;
4691 current_thread = thread;
d50171e4
PA
4692
4693 lwp->bp_reinsert = pc;
4694 uninsert_breakpoints_at (pc);
fa593d66 4695 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4696
7fe5e27e 4697 step = single_step (lwp);
d50171e4 4698
0bfdf32f 4699 current_thread = saved_thread;
d50171e4 4700
df95181f 4701 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4702
4703 /* Require next event from this LWP. */
9c80ecd6 4704 step_over_bkpt = thread->id;
d50171e4
PA
4705}
4706
4707/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3b9a79ef 4708 start_step_over, if still there, and delete any single-step
d50171e4
PA
4709 breakpoints we've set, on non hardware single-step targets. */
4710
4711static int
4712finish_step_over (struct lwp_info *lwp)
4713{
4714 if (lwp->bp_reinsert != 0)
4715 {
f79b145d
YQ
4716 struct thread_info *saved_thread = current_thread;
4717
d50171e4 4718 if (debug_threads)
87ce2a04 4719 debug_printf ("Finished step over.\n");
d50171e4 4720
f79b145d
YQ
4721 current_thread = get_lwp_thread (lwp);
4722
d50171e4
PA
4723 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4724 may be no breakpoint to reinsert there by now. */
4725 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4726 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4727
4728 lwp->bp_reinsert = 0;
4729
3b9a79ef
YQ
4730 /* Delete any single-step breakpoints. No longer needed. We
4731 don't have to worry about other threads hitting this trap,
4732 and later not being able to explain it, because we were
4733 stepping over a breakpoint, and we hold all threads but
4734 LWP stopped while doing that. */
d50171e4 4735 if (!can_hardware_single_step ())
f79b145d 4736 {
3b9a79ef
YQ
4737 gdb_assert (has_single_step_breakpoints (current_thread));
4738 delete_single_step_breakpoints (current_thread);
f79b145d 4739 }
d50171e4
PA
4740
4741 step_over_bkpt = null_ptid;
f79b145d 4742 current_thread = saved_thread;
d50171e4
PA
4743 return 1;
4744 }
4745 else
4746 return 0;
4747}
4748
d16f3f6c
TBA
4749void
4750linux_process_target::complete_ongoing_step_over ()
863d01bd 4751{
d7e15655 4752 if (step_over_bkpt != null_ptid)
863d01bd
PA
4753 {
4754 struct lwp_info *lwp;
4755 int wstat;
4756 int ret;
4757
4758 if (debug_threads)
4759 debug_printf ("detach: step over in progress, finish it first\n");
4760
4761 /* Passing NULL_PTID as filter indicates we want all events to
4762 be left pending. Eventually this returns when there are no
4763 unwaited-for children left. */
d16f3f6c
TBA
4764 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4765 __WALL);
863d01bd
PA
4766 gdb_assert (ret == -1);
4767
4768 lwp = find_lwp_pid (step_over_bkpt);
4769 if (lwp != NULL)
4770 finish_step_over (lwp);
4771 step_over_bkpt = null_ptid;
4772 unsuspend_all_lwps (lwp);
4773 }
4774}
4775
df95181f
TBA
4776void
4777linux_process_target::resume_one_thread (thread_info *thread,
4778 bool leave_all_stopped)
5544ad89 4779{
d86d4aaf 4780 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4781 int leave_pending;
5544ad89 4782
2bd7c093 4783 if (lwp->resume == NULL)
c80825ff 4784 return;
5544ad89 4785
bd99dc85 4786 if (lwp->resume->kind == resume_stop)
5544ad89 4787 {
bd99dc85 4788 if (debug_threads)
d86d4aaf 4789 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4790
4791 if (!lwp->stopped)
4792 {
4793 if (debug_threads)
d86d4aaf 4794 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4795
d50171e4
PA
4796 /* Stop the thread, and wait for the event asynchronously,
4797 through the event loop. */
02fc4de7 4798 send_sigstop (lwp);
bd99dc85
PA
4799 }
4800 else
4801 {
4802 if (debug_threads)
87ce2a04 4803 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4804 lwpid_of (thread));
d50171e4
PA
4805
4806 /* The LWP may have been stopped in an internal event that
4807 was not meant to be notified back to GDB (e.g., gdbserver
4808 breakpoint), so we should be reporting a stop event in
4809 this case too. */
4810
4811 /* If the thread already has a pending SIGSTOP, this is a
4812 no-op. Otherwise, something later will presumably resume
4813 the thread and this will cause it to cancel any pending
4814 operation, due to last_resume_kind == resume_stop. If
4815 the thread already has a pending status to report, we
4816 will still report it the next time we wait - see
4817 status_pending_p_callback. */
1a981360
PA
4818
4819 /* If we already have a pending signal to report, then
4820 there's no need to queue a SIGSTOP, as this means we're
4821 midway through moving the LWP out of the jumppad, and we
4822 will report the pending signal as soon as that is
4823 finished. */
4824 if (lwp->pending_signals_to_report == NULL)
4825 send_sigstop (lwp);
bd99dc85 4826 }
32ca6d61 4827
bd99dc85
PA
4828 /* For stop requests, we're done. */
4829 lwp->resume = NULL;
fc7238bb 4830 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4831 return;
5544ad89
DJ
4832 }
4833
bd99dc85 4834 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4835 then don't resume it - we can just report the pending status.
4836 Likewise if it is suspended, because e.g., another thread is
4837 stepping past a breakpoint. Make sure to queue any signals that
4838 would otherwise be sent. In all-stop mode, we do this decision
4839 based on if *any* thread has a pending status. If there's a
4840 thread that needs the step-over-breakpoint dance, then don't
4841 resume any other thread but that particular one. */
4842 leave_pending = (lwp->suspended
4843 || lwp->status_pending_p
4844 || leave_all_stopped);
5544ad89 4845
0e9a339e
YQ
4846 /* If we have a new signal, enqueue the signal. */
4847 if (lwp->resume->sig != 0)
4848 {
4849 siginfo_t info, *info_p;
4850
4851 /* If this is the same signal we were previously stopped by,
4852 make sure to queue its siginfo. */
4853 if (WIFSTOPPED (lwp->last_status)
4854 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4855 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4856 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4857 info_p = &info;
4858 else
4859 info_p = NULL;
4860
4861 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4862 }
4863
d50171e4 4864 if (!leave_pending)
bd99dc85
PA
4865 {
4866 if (debug_threads)
d86d4aaf 4867 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4868
9c80ecd6 4869 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4870 }
4871 else
4872 {
4873 if (debug_threads)
d86d4aaf 4874 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4875 }
5544ad89 4876
fc7238bb 4877 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4878 lwp->resume = NULL;
0d62e5e8
DJ
4879}
4880
0e4d7e35
TBA
4881void
4882linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4883{
d86d4aaf 4884 struct thread_info *need_step_over = NULL;
c6ecbae5 4885
87ce2a04
DE
4886 if (debug_threads)
4887 {
4888 debug_enter ();
4889 debug_printf ("linux_resume:\n");
4890 }
4891
5fdda392
SM
4892 for_each_thread ([&] (thread_info *thread)
4893 {
4894 linux_set_resume_request (thread, resume_info, n);
4895 });
5544ad89 4896
d50171e4
PA
4897 /* If there is a thread which would otherwise be resumed, which has
4898 a pending status, then don't resume any threads - we can just
4899 report the pending status. Make sure to queue any signals that
4900 would otherwise be sent. In non-stop mode, we'll apply this
4901 logic to each thread individually. We consume all pending events
4902 before considering to start a step-over (in all-stop). */
25c28b4d 4903 bool any_pending = false;
bd99dc85 4904 if (!non_stop)
df95181f
TBA
4905 any_pending = find_thread ([this] (thread_info *thread)
4906 {
4907 return resume_status_pending (thread);
4908 }) != nullptr;
d50171e4
PA
4909
4910 /* If there is a thread which would otherwise be resumed, which is
4911 stopped at a breakpoint that needs stepping over, then don't
4912 resume any threads - have it step over the breakpoint with all
4913 other threads stopped, then resume all threads again. Make sure
4914 to queue any signals that would otherwise be delivered or
4915 queued. */
bf9ae9d8 4916 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4917 need_step_over = find_thread ([this] (thread_info *thread)
4918 {
4919 return thread_needs_step_over (thread);
4920 });
d50171e4 4921
c80825ff 4922 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4923
4924 if (debug_threads)
4925 {
4926 if (need_step_over != NULL)
87ce2a04 4927 debug_printf ("Not resuming all, need step over\n");
d50171e4 4928 else if (any_pending)
87ce2a04
DE
4929 debug_printf ("Not resuming, all-stop and found "
4930 "an LWP with pending status\n");
d50171e4 4931 else
87ce2a04 4932 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4933 }
4934
4935 /* Even if we're leaving threads stopped, queue all signals we'd
4936 otherwise deliver. */
c80825ff
SM
4937 for_each_thread ([&] (thread_info *thread)
4938 {
df95181f 4939 resume_one_thread (thread, leave_all_stopped);
c80825ff 4940 });
d50171e4
PA
4941
4942 if (need_step_over)
d86d4aaf 4943 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4944
4945 if (debug_threads)
4946 {
4947 debug_printf ("linux_resume done\n");
4948 debug_exit ();
4949 }
1bebeeca
PA
4950
4951 /* We may have events that were pending that can/should be sent to
4952 the client now. Trigger a linux_wait call. */
4953 if (target_is_async_p ())
4954 async_file_mark ();
d50171e4
PA
4955}
4956
df95181f
TBA
4957void
4958linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4959{
d86d4aaf 4960 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4961 int step;
4962
7984d532 4963 if (lwp == except)
e2b44075 4964 return;
d50171e4
PA
4965
4966 if (debug_threads)
d86d4aaf 4967 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4968
4969 if (!lwp->stopped)
4970 {
4971 if (debug_threads)
d86d4aaf 4972 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 4973 return;
d50171e4
PA
4974 }
4975
02fc4de7
PA
4976 if (thread->last_resume_kind == resume_stop
4977 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4978 {
4979 if (debug_threads)
87ce2a04 4980 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4981 lwpid_of (thread));
e2b44075 4982 return;
d50171e4
PA
4983 }
4984
4985 if (lwp->status_pending_p)
4986 {
4987 if (debug_threads)
87ce2a04 4988 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4989 lwpid_of (thread));
e2b44075 4990 return;
d50171e4
PA
4991 }
4992
7984d532
PA
4993 gdb_assert (lwp->suspended >= 0);
4994
d50171e4
PA
4995 if (lwp->suspended)
4996 {
4997 if (debug_threads)
d86d4aaf 4998 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 4999 return;
d50171e4
PA
5000 }
5001
1a981360
PA
5002 if (thread->last_resume_kind == resume_stop
5003 && lwp->pending_signals_to_report == NULL
229d26fc
SM
5004 && (lwp->collecting_fast_tracepoint
5005 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
5006 {
5007 /* We haven't reported this LWP as stopped yet (otherwise, the
5008 last_status.kind check above would catch it, and we wouldn't
5009 reach here. This LWP may have been momentarily paused by a
5010 stop_all_lwps call while handling for example, another LWP's
5011 step-over. In that case, the pending expected SIGSTOP signal
5012 that was queued at vCont;t handling time will have already
5013 been consumed by wait_for_sigstop, and so we need to requeue
5014 another one here. Note that if the LWP already has a SIGSTOP
5015 pending, this is a no-op. */
5016
5017 if (debug_threads)
87ce2a04
DE
5018 debug_printf ("Client wants LWP %ld to stop. "
5019 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5020 lwpid_of (thread));
02fc4de7
PA
5021
5022 send_sigstop (lwp);
5023 }
5024
863d01bd
PA
5025 if (thread->last_resume_kind == resume_step)
5026 {
5027 if (debug_threads)
5028 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5029 lwpid_of (thread));
8901d193 5030
3b9a79ef 5031 /* If resume_step is requested by GDB, install single-step
8901d193 5032 breakpoints when the thread is about to be actually resumed if
3b9a79ef 5033 the single-step breakpoints weren't removed. */
7582c77c 5034 if (supports_software_single_step ()
3b9a79ef 5035 && !has_single_step_breakpoints (thread))
8901d193
YQ
5036 install_software_single_step_breakpoints (lwp);
5037
5038 step = maybe_hw_step (thread);
863d01bd
PA
5039 }
5040 else if (lwp->bp_reinsert != 0)
5041 {
5042 if (debug_threads)
5043 debug_printf (" stepping LWP %ld, reinsert set\n",
5044 lwpid_of (thread));
f79b145d
YQ
5045
5046 step = maybe_hw_step (thread);
863d01bd
PA
5047 }
5048 else
5049 step = 0;
5050
df95181f 5051 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5052}
5053
df95181f
TBA
5054void
5055linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5056 lwp_info *except)
7984d532 5057{
d86d4aaf 5058 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5059
5060 if (lwp == except)
e2b44075 5061 return;
7984d532 5062
863d01bd 5063 lwp_suspended_decr (lwp);
7984d532 5064
e2b44075 5065 proceed_one_lwp (thread, except);
d50171e4
PA
5066}
5067
d16f3f6c
TBA
5068void
5069linux_process_target::proceed_all_lwps ()
d50171e4 5070{
d86d4aaf 5071 struct thread_info *need_step_over;
d50171e4
PA
5072
5073 /* If there is a thread which would otherwise be resumed, which is
5074 stopped at a breakpoint that needs stepping over, then don't
5075 resume any threads - have it step over the breakpoint with all
5076 other threads stopped, then resume all threads again. */
5077
bf9ae9d8 5078 if (low_supports_breakpoints ())
d50171e4 5079 {
df95181f
TBA
5080 need_step_over = find_thread ([this] (thread_info *thread)
5081 {
5082 return thread_needs_step_over (thread);
5083 });
d50171e4
PA
5084
5085 if (need_step_over != NULL)
5086 {
5087 if (debug_threads)
87ce2a04
DE
5088 debug_printf ("proceed_all_lwps: found "
5089 "thread %ld needing a step-over\n",
5090 lwpid_of (need_step_over));
d50171e4 5091
d86d4aaf 5092 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5093 return;
5094 }
5095 }
5544ad89 5096
d50171e4 5097 if (debug_threads)
87ce2a04 5098 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5099
df95181f 5100 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
5101 {
5102 proceed_one_lwp (thread, NULL);
5103 });
d50171e4
PA
5104}
5105
d16f3f6c
TBA
5106void
5107linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5108{
5544ad89
DJ
5109 if (debug_threads)
5110 {
87ce2a04 5111 debug_enter ();
d50171e4 5112 if (except)
87ce2a04 5113 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5114 lwpid_of (get_lwp_thread (except)));
5544ad89 5115 else
87ce2a04 5116 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5117 }
5118
7984d532 5119 if (unsuspend)
e2b44075
SM
5120 for_each_thread ([&] (thread_info *thread)
5121 {
5122 unsuspend_and_proceed_one_lwp (thread, except);
5123 });
7984d532 5124 else
e2b44075
SM
5125 for_each_thread ([&] (thread_info *thread)
5126 {
5127 proceed_one_lwp (thread, except);
5128 });
87ce2a04
DE
5129
5130 if (debug_threads)
5131 {
5132 debug_printf ("unstop_all_lwps done\n");
5133 debug_exit ();
5134 }
0d62e5e8
DJ
5135}
5136
58caa3dc
DJ
5137
5138#ifdef HAVE_LINUX_REGSETS
5139
1faeff08
MR
5140#define use_linux_regsets 1
5141
030031ee
PA
5142/* Returns true if REGSET has been disabled. */
5143
5144static int
5145regset_disabled (struct regsets_info *info, struct regset_info *regset)
5146{
5147 return (info->disabled_regsets != NULL
5148 && info->disabled_regsets[regset - info->regsets]);
5149}
5150
5151/* Disable REGSET. */
5152
5153static void
5154disable_regset (struct regsets_info *info, struct regset_info *regset)
5155{
5156 int dr_offset;
5157
5158 dr_offset = regset - info->regsets;
5159 if (info->disabled_regsets == NULL)
224c3ddb 5160 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5161 info->disabled_regsets[dr_offset] = 1;
5162}
5163
58caa3dc 5164static int
3aee8918
PA
5165regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5166 struct regcache *regcache)
58caa3dc
DJ
5167{
5168 struct regset_info *regset;
e9d25b98 5169 int saw_general_regs = 0;
95954743 5170 int pid;
1570b33e 5171 struct iovec iov;
58caa3dc 5172
0bfdf32f 5173 pid = lwpid_of (current_thread);
28eef672 5174 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5175 {
1570b33e
L
5176 void *buf, *data;
5177 int nt_type, res;
58caa3dc 5178
030031ee 5179 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5180 continue;
58caa3dc 5181
bca929d3 5182 buf = xmalloc (regset->size);
1570b33e
L
5183
5184 nt_type = regset->nt_type;
5185 if (nt_type)
5186 {
5187 iov.iov_base = buf;
5188 iov.iov_len = regset->size;
5189 data = (void *) &iov;
5190 }
5191 else
5192 data = buf;
5193
dfb64f85 5194#ifndef __sparc__
f15f9948 5195 res = ptrace (regset->get_request, pid,
b8e1b30e 5196 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5197#else
1570b33e 5198 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5199#endif
58caa3dc
DJ
5200 if (res < 0)
5201 {
1ef53e6b
AH
5202 if (errno == EIO
5203 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5204 {
1ef53e6b
AH
5205 /* If we get EIO on a regset, or an EINVAL and the regset is
5206 optional, do not try it again for this process mode. */
030031ee 5207 disable_regset (regsets_info, regset);
58caa3dc 5208 }
e5a9158d
AA
5209 else if (errno == ENODATA)
5210 {
5211 /* ENODATA may be returned if the regset is currently
5212 not "active". This can happen in normal operation,
5213 so suppress the warning in this case. */
5214 }
fcd4a73d
YQ
5215 else if (errno == ESRCH)
5216 {
5217 /* At this point, ESRCH should mean the process is
5218 already gone, in which case we simply ignore attempts
5219 to read its registers. */
5220 }
58caa3dc
DJ
5221 else
5222 {
0d62e5e8 5223 char s[256];
95954743
PA
5224 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5225 pid);
0d62e5e8 5226 perror (s);
58caa3dc
DJ
5227 }
5228 }
098dbe61
AA
5229 else
5230 {
5231 if (regset->type == GENERAL_REGS)
5232 saw_general_regs = 1;
5233 regset->store_function (regcache, buf);
5234 }
fdeb2a12 5235 free (buf);
58caa3dc 5236 }
e9d25b98
DJ
5237 if (saw_general_regs)
5238 return 0;
5239 else
5240 return 1;
58caa3dc
DJ
5241}
5242
5243static int
3aee8918
PA
5244regsets_store_inferior_registers (struct regsets_info *regsets_info,
5245 struct regcache *regcache)
58caa3dc
DJ
5246{
5247 struct regset_info *regset;
e9d25b98 5248 int saw_general_regs = 0;
95954743 5249 int pid;
1570b33e 5250 struct iovec iov;
58caa3dc 5251
0bfdf32f 5252 pid = lwpid_of (current_thread);
28eef672 5253 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5254 {
1570b33e
L
5255 void *buf, *data;
5256 int nt_type, res;
58caa3dc 5257
feea5f36
AA
5258 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5259 || regset->fill_function == NULL)
28eef672 5260 continue;
58caa3dc 5261
bca929d3 5262 buf = xmalloc (regset->size);
545587ee
DJ
5263
5264 /* First fill the buffer with the current register set contents,
5265 in case there are any items in the kernel's regset that are
5266 not in gdbserver's regcache. */
1570b33e
L
5267
5268 nt_type = regset->nt_type;
5269 if (nt_type)
5270 {
5271 iov.iov_base = buf;
5272 iov.iov_len = regset->size;
5273 data = (void *) &iov;
5274 }
5275 else
5276 data = buf;
5277
dfb64f85 5278#ifndef __sparc__
f15f9948 5279 res = ptrace (regset->get_request, pid,
b8e1b30e 5280 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5281#else
689cc2ae 5282 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5283#endif
545587ee
DJ
5284
5285 if (res == 0)
5286 {
5287 /* Then overlay our cached registers on that. */
442ea881 5288 regset->fill_function (regcache, buf);
545587ee
DJ
5289
5290 /* Only now do we write the register set. */
dfb64f85 5291#ifndef __sparc__
f15f9948 5292 res = ptrace (regset->set_request, pid,
b8e1b30e 5293 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5294#else
1570b33e 5295 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5296#endif
545587ee
DJ
5297 }
5298
58caa3dc
DJ
5299 if (res < 0)
5300 {
1ef53e6b
AH
5301 if (errno == EIO
5302 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5303 {
1ef53e6b
AH
5304 /* If we get EIO on a regset, or an EINVAL and the regset is
5305 optional, do not try it again for this process mode. */
030031ee 5306 disable_regset (regsets_info, regset);
58caa3dc 5307 }
3221518c
UW
5308 else if (errno == ESRCH)
5309 {
1b3f6016
PA
5310 /* At this point, ESRCH should mean the process is
5311 already gone, in which case we simply ignore attempts
5312 to change its registers. See also the related
df95181f 5313 comment in resume_one_lwp. */
fdeb2a12 5314 free (buf);
3221518c
UW
5315 return 0;
5316 }
58caa3dc
DJ
5317 else
5318 {
ce3a066d 5319 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5320 }
5321 }
e9d25b98
DJ
5322 else if (regset->type == GENERAL_REGS)
5323 saw_general_regs = 1;
09ec9b38 5324 free (buf);
58caa3dc 5325 }
e9d25b98
DJ
5326 if (saw_general_regs)
5327 return 0;
5328 else
5329 return 1;
58caa3dc
DJ
5330}
5331
1faeff08 5332#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5333
1faeff08 5334#define use_linux_regsets 0
3aee8918
PA
5335#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5336#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5337
58caa3dc 5338#endif
1faeff08
MR
5339
5340/* Return 1 if register REGNO is supported by one of the regset ptrace
5341 calls or 0 if it has to be transferred individually. */
5342
5343static int
3aee8918 5344linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5345{
5346 unsigned char mask = 1 << (regno % 8);
5347 size_t index = regno / 8;
5348
5349 return (use_linux_regsets
3aee8918
PA
5350 && (regs_info->regset_bitmap == NULL
5351 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5352}
5353
58caa3dc 5354#ifdef HAVE_LINUX_USRREGS
1faeff08 5355
5b3da067 5356static int
3aee8918 5357register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5358{
5359 int addr;
5360
3aee8918 5361 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5362 error ("Invalid register number %d.", regnum);
5363
3aee8918 5364 addr = usrregs->regmap[regnum];
1faeff08
MR
5365
5366 return addr;
5367}
5368
daca57a7
TBA
5369
5370void
5371linux_process_target::fetch_register (const usrregs_info *usrregs,
5372 regcache *regcache, int regno)
1faeff08
MR
5373{
5374 CORE_ADDR regaddr;
5375 int i, size;
5376 char *buf;
5377 int pid;
5378
3aee8918 5379 if (regno >= usrregs->num_regs)
1faeff08 5380 return;
daca57a7 5381 if (low_cannot_fetch_register (regno))
1faeff08
MR
5382 return;
5383
3aee8918 5384 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5385 if (regaddr == -1)
5386 return;
5387
3aee8918
PA
5388 size = ((register_size (regcache->tdesc, regno)
5389 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5390 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5391 buf = (char *) alloca (size);
1faeff08 5392
0bfdf32f 5393 pid = lwpid_of (current_thread);
1faeff08
MR
5394 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5395 {
5396 errno = 0;
5397 *(PTRACE_XFER_TYPE *) (buf + i) =
5398 ptrace (PTRACE_PEEKUSER, pid,
5399 /* Coerce to a uintptr_t first to avoid potential gcc warning
5400 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5401 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5402 regaddr += sizeof (PTRACE_XFER_TYPE);
5403 if (errno != 0)
9a70f35c
YQ
5404 {
5405 /* Mark register REGNO unavailable. */
5406 supply_register (regcache, regno, NULL);
5407 return;
5408 }
1faeff08
MR
5409 }
5410
5411 if (the_low_target.supply_ptrace_register)
5412 the_low_target.supply_ptrace_register (regcache, regno, buf);
5413 else
5414 supply_register (regcache, regno, buf);
5415}
5416
daca57a7
TBA
5417void
5418linux_process_target::store_register (const usrregs_info *usrregs,
5419 regcache *regcache, int regno)
1faeff08
MR
5420{
5421 CORE_ADDR regaddr;
5422 int i, size;
5423 char *buf;
5424 int pid;
5425
3aee8918 5426 if (regno >= usrregs->num_regs)
1faeff08 5427 return;
daca57a7 5428 if (low_cannot_store_register (regno))
1faeff08
MR
5429 return;
5430
3aee8918 5431 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5432 if (regaddr == -1)
5433 return;
5434
3aee8918
PA
5435 size = ((register_size (regcache->tdesc, regno)
5436 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5437 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5438 buf = (char *) alloca (size);
1faeff08
MR
5439 memset (buf, 0, size);
5440
5441 if (the_low_target.collect_ptrace_register)
5442 the_low_target.collect_ptrace_register (regcache, regno, buf);
5443 else
5444 collect_register (regcache, regno, buf);
5445
0bfdf32f 5446 pid = lwpid_of (current_thread);
1faeff08
MR
5447 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5448 {
5449 errno = 0;
5450 ptrace (PTRACE_POKEUSER, pid,
5451 /* Coerce to a uintptr_t first to avoid potential gcc warning
5452 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5453 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5454 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5455 if (errno != 0)
5456 {
5457 /* At this point, ESRCH should mean the process is
5458 already gone, in which case we simply ignore attempts
5459 to change its registers. See also the related
df95181f 5460 comment in resume_one_lwp. */
1faeff08
MR
5461 if (errno == ESRCH)
5462 return;
5463
daca57a7
TBA
5464
5465 if (!low_cannot_store_register (regno))
6d91ce9a 5466 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5467 }
5468 regaddr += sizeof (PTRACE_XFER_TYPE);
5469 }
5470}
daca57a7 5471#endif /* HAVE_LINUX_USRREGS */
1faeff08 5472
daca57a7
TBA
5473void
5474linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5475 regcache *regcache,
5476 int regno, int all)
1faeff08 5477{
daca57a7 5478#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5479 struct usrregs_info *usr = regs_info->usrregs;
5480
1faeff08
MR
5481 if (regno == -1)
5482 {
3aee8918
PA
5483 for (regno = 0; regno < usr->num_regs; regno++)
5484 if (all || !linux_register_in_regsets (regs_info, regno))
5485 fetch_register (usr, regcache, regno);
1faeff08
MR
5486 }
5487 else
3aee8918 5488 fetch_register (usr, regcache, regno);
daca57a7 5489#endif
1faeff08
MR
5490}
5491
daca57a7
TBA
5492void
5493linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5494 regcache *regcache,
5495 int regno, int all)
1faeff08 5496{
daca57a7 5497#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5498 struct usrregs_info *usr = regs_info->usrregs;
5499
1faeff08
MR
5500 if (regno == -1)
5501 {
3aee8918
PA
5502 for (regno = 0; regno < usr->num_regs; regno++)
5503 if (all || !linux_register_in_regsets (regs_info, regno))
5504 store_register (usr, regcache, regno);
1faeff08
MR
5505 }
5506 else
3aee8918 5507 store_register (usr, regcache, regno);
58caa3dc 5508#endif
daca57a7 5509}
1faeff08 5510
a5a4d4cd
TBA
5511void
5512linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5513{
5514 int use_regsets;
5515 int all = 0;
aa8d21c9 5516 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5517
5518 if (regno == -1)
5519 {
bd70b1f2 5520 if (regs_info->usrregs != NULL)
3aee8918 5521 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5522 low_fetch_register (regcache, regno);
c14dfd32 5523
3aee8918
PA
5524 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5525 if (regs_info->usrregs != NULL)
5526 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5527 }
5528 else
5529 {
bd70b1f2 5530 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5531 return;
5532
3aee8918 5533 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5534 if (use_regsets)
3aee8918
PA
5535 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5536 regcache);
5537 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5538 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5539 }
58caa3dc
DJ
5540}
5541
a5a4d4cd
TBA
5542void
5543linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5544{
1faeff08
MR
5545 int use_regsets;
5546 int all = 0;
aa8d21c9 5547 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5548
5549 if (regno == -1)
5550 {
3aee8918
PA
5551 all = regsets_store_inferior_registers (regs_info->regsets_info,
5552 regcache);
5553 if (regs_info->usrregs != NULL)
5554 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5555 }
5556 else
5557 {
3aee8918 5558 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5559 if (use_regsets)
3aee8918
PA
5560 all = regsets_store_inferior_registers (regs_info->regsets_info,
5561 regcache);
5562 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5563 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5564 }
58caa3dc
DJ
5565}
5566
bd70b1f2
TBA
5567bool
5568linux_process_target::low_fetch_register (regcache *regcache, int regno)
5569{
5570 return false;
5571}
da6d8c04 5572
e2558df3 5573/* A wrapper for the read_memory target op. */
da6d8c04 5574
c3e735a6 5575static int
f450004a 5576linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5577{
52405d85 5578 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5579}
5580
5581/* Copy LEN bytes from inferior's memory starting at MEMADDR
5582 to debugger memory starting at MYADDR. */
5583
5584int
5585linux_process_target::read_memory (CORE_ADDR memaddr,
5586 unsigned char *myaddr, int len)
da6d8c04 5587{
0bfdf32f 5588 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5589 PTRACE_XFER_TYPE *buffer;
5590 CORE_ADDR addr;
5591 int count;
4934b29e 5592 char filename[64];
ae3e2ccf 5593 int i;
4934b29e 5594 int ret;
fd462a61 5595 int fd;
fd462a61
DJ
5596
5597 /* Try using /proc. Don't bother for one word. */
5598 if (len >= 3 * sizeof (long))
5599 {
4934b29e
MR
5600 int bytes;
5601
fd462a61
DJ
5602 /* We could keep this file open and cache it - possibly one per
5603 thread. That requires some juggling, but is even faster. */
95954743 5604 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5605 fd = open (filename, O_RDONLY | O_LARGEFILE);
5606 if (fd == -1)
5607 goto no_proc;
5608
5609 /* If pread64 is available, use it. It's faster if the kernel
5610 supports it (only one syscall), and it's 64-bit safe even on
5611 32-bit platforms (for instance, SPARC debugging a SPARC64
5612 application). */
5613#ifdef HAVE_PREAD64
4934b29e 5614 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5615#else
4934b29e
MR
5616 bytes = -1;
5617 if (lseek (fd, memaddr, SEEK_SET) != -1)
5618 bytes = read (fd, myaddr, len);
fd462a61 5619#endif
fd462a61
DJ
5620
5621 close (fd);
4934b29e
MR
5622 if (bytes == len)
5623 return 0;
5624
5625 /* Some data was read, we'll try to get the rest with ptrace. */
5626 if (bytes > 0)
5627 {
5628 memaddr += bytes;
5629 myaddr += bytes;
5630 len -= bytes;
5631 }
fd462a61 5632 }
da6d8c04 5633
fd462a61 5634 no_proc:
4934b29e
MR
5635 /* Round starting address down to longword boundary. */
5636 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5637 /* Round ending address up; get number of longwords that makes. */
5638 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5639 / sizeof (PTRACE_XFER_TYPE));
5640 /* Allocate buffer of that many longwords. */
8d749320 5641 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5642
da6d8c04 5643 /* Read all the longwords */
4934b29e 5644 errno = 0;
da6d8c04
DJ
5645 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5646 {
14ce3065
DE
5647 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5648 about coercing an 8 byte integer to a 4 byte pointer. */
5649 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5650 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5651 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5652 if (errno)
4934b29e 5653 break;
da6d8c04 5654 }
4934b29e 5655 ret = errno;
da6d8c04
DJ
5656
5657 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5658 if (i > 0)
5659 {
5660 i *= sizeof (PTRACE_XFER_TYPE);
5661 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5662 memcpy (myaddr,
5663 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5664 i < len ? i : len);
5665 }
c3e735a6 5666
4934b29e 5667 return ret;
da6d8c04
DJ
5668}
5669
93ae6fdc
PA
5670/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5671 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5672 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5673
e2558df3
TBA
5674int
5675linux_process_target::write_memory (CORE_ADDR memaddr,
5676 const unsigned char *myaddr, int len)
da6d8c04 5677{
ae3e2ccf 5678 int i;
da6d8c04 5679 /* Round starting address down to longword boundary. */
ae3e2ccf 5680 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5681 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5682 int count
493e2a69
MS
5683 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5684 / sizeof (PTRACE_XFER_TYPE);
5685
da6d8c04 5686 /* Allocate buffer of that many longwords. */
ae3e2ccf 5687 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5688
0bfdf32f 5689 int pid = lwpid_of (current_thread);
da6d8c04 5690
f0ae6fc3
PA
5691 if (len == 0)
5692 {
5693 /* Zero length write always succeeds. */
5694 return 0;
5695 }
5696
0d62e5e8
DJ
5697 if (debug_threads)
5698 {
58d6951d 5699 /* Dump up to four bytes. */
bf47e248
PA
5700 char str[4 * 2 + 1];
5701 char *p = str;
5702 int dump = len < 4 ? len : 4;
5703
5704 for (i = 0; i < dump; i++)
5705 {
5706 sprintf (p, "%02x", myaddr[i]);
5707 p += 2;
5708 }
5709 *p = '\0';
5710
5711 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5712 str, (long) memaddr, pid);
0d62e5e8
DJ
5713 }
5714
da6d8c04
DJ
5715 /* Fill start and end extra bytes of buffer with existing memory data. */
5716
93ae6fdc 5717 errno = 0;
14ce3065
DE
5718 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5719 about coercing an 8 byte integer to a 4 byte pointer. */
5720 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5721 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5722 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5723 if (errno)
5724 return errno;
da6d8c04
DJ
5725
5726 if (count > 1)
5727 {
93ae6fdc 5728 errno = 0;
da6d8c04 5729 buffer[count - 1]
95954743 5730 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5731 /* Coerce to a uintptr_t first to avoid potential gcc warning
5732 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5733 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5734 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5735 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5736 if (errno)
5737 return errno;
da6d8c04
DJ
5738 }
5739
93ae6fdc 5740 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5741
493e2a69
MS
5742 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5743 myaddr, len);
da6d8c04
DJ
5744
5745 /* Write the entire buffer. */
5746
5747 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5748 {
5749 errno = 0;
14ce3065
DE
5750 ptrace (PTRACE_POKETEXT, pid,
5751 /* Coerce to a uintptr_t first to avoid potential gcc warning
5752 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5753 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5754 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5755 if (errno)
5756 return errno;
5757 }
5758
5759 return 0;
5760}
2f2893d9 5761
2a31c7aa
TBA
5762void
5763linux_process_target::look_up_symbols ()
2f2893d9 5764{
0d62e5e8 5765#ifdef USE_THREAD_DB
95954743
PA
5766 struct process_info *proc = current_process ();
5767
fe978cb0 5768 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5769 return;
5770
9b4c5f87 5771 thread_db_init ();
0d62e5e8
DJ
5772#endif
5773}
5774
eb497a2a
TBA
5775void
5776linux_process_target::request_interrupt ()
e5379b03 5777{
78708b7c
PA
5778 /* Send a SIGINT to the process group. This acts just like the user
5779 typed a ^C on the controlling terminal. */
eb497a2a 5780 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5781}
5782
eac215cc
TBA
5783bool
5784linux_process_target::supports_read_auxv ()
5785{
5786 return true;
5787}
5788
aa691b87
RM
5789/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5790 to debugger memory starting at MYADDR. */
5791
eac215cc
TBA
5792int
5793linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5794 unsigned int len)
aa691b87
RM
5795{
5796 char filename[PATH_MAX];
5797 int fd, n;
0bfdf32f 5798 int pid = lwpid_of (current_thread);
aa691b87 5799
6cebaf6e 5800 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5801
5802 fd = open (filename, O_RDONLY);
5803 if (fd < 0)
5804 return -1;
5805
5806 if (offset != (CORE_ADDR) 0
5807 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5808 n = -1;
5809 else
5810 n = read (fd, myaddr, len);
5811
5812 close (fd);
5813
5814 return n;
5815}
5816
7e0bde70
TBA
5817int
5818linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5819 int size, raw_breakpoint *bp)
e013ee27 5820{
c8f4bfdd
YQ
5821 if (type == raw_bkpt_type_sw)
5822 return insert_memory_breakpoint (bp);
e013ee27 5823 else
9db9aa23
TBA
5824 return low_insert_point (type, addr, size, bp);
5825}
5826
5827int
5828linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5829 int size, raw_breakpoint *bp)
5830{
5831 /* Unsupported (see target.h). */
5832 return 1;
e013ee27
OF
5833}
5834
7e0bde70
TBA
5835int
5836linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5837 int size, raw_breakpoint *bp)
e013ee27 5838{
c8f4bfdd
YQ
5839 if (type == raw_bkpt_type_sw)
5840 return remove_memory_breakpoint (bp);
e013ee27 5841 else
9db9aa23
TBA
5842 return low_remove_point (type, addr, size, bp);
5843}
5844
5845int
5846linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5847 int size, raw_breakpoint *bp)
5848{
5849 /* Unsupported (see target.h). */
5850 return 1;
e013ee27
OF
5851}
5852
84320c4e 5853/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5854 method. */
5855
84320c4e
TBA
5856bool
5857linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5858{
5859 struct lwp_info *lwp = get_thread_lwp (current_thread);
5860
5861 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5862}
5863
84320c4e 5864/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5865 method. */
5866
84320c4e
TBA
5867bool
5868linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5869{
5870 return USE_SIGTRAP_SIGINFO;
5871}
5872
93fe88b2 5873/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5874 method. */
5875
93fe88b2
TBA
5876bool
5877linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5878{
5879 struct lwp_info *lwp = get_thread_lwp (current_thread);
5880
5881 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5882}
5883
93fe88b2 5884/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5885 method. */
5886
93fe88b2
TBA
5887bool
5888linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5889{
5890 return USE_SIGTRAP_SIGINFO;
5891}
5892
70b90b91 5893/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5894
22aa6223
TBA
5895bool
5896linux_process_target::supports_hardware_single_step ()
45614f15 5897{
45614f15
YQ
5898 return can_hardware_single_step ();
5899}
5900
6eeb5c55
TBA
5901bool
5902linux_process_target::stopped_by_watchpoint ()
e013ee27 5903{
0bfdf32f 5904 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5905
15c66dd6 5906 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5907}
5908
6eeb5c55
TBA
5909CORE_ADDR
5910linux_process_target::stopped_data_address ()
e013ee27 5911{
0bfdf32f 5912 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5913
5914 return lwp->stopped_data_address;
e013ee27
OF
5915}
5916
db0dfaa0
LM
5917/* This is only used for targets that define PT_TEXT_ADDR,
5918 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5919 the target has different ways of acquiring this information, like
5920 loadmaps. */
52fb6437 5921
5203ae1e
TBA
5922bool
5923linux_process_target::supports_read_offsets ()
5924{
5925#ifdef SUPPORTS_READ_OFFSETS
5926 return true;
5927#else
5928 return false;
5929#endif
5930}
5931
52fb6437
NS
5932/* Under uClinux, programs are loaded at non-zero offsets, which we need
5933 to tell gdb about. */
5934
5203ae1e
TBA
5935int
5936linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5937{
5203ae1e 5938#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5939 unsigned long text, text_end, data;
62828379 5940 int pid = lwpid_of (current_thread);
52fb6437
NS
5941
5942 errno = 0;
5943
b8e1b30e
LM
5944 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5945 (PTRACE_TYPE_ARG4) 0);
5946 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5947 (PTRACE_TYPE_ARG4) 0);
5948 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5949 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5950
5951 if (errno == 0)
5952 {
5953 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5954 used by gdb) are relative to the beginning of the program,
5955 with the data segment immediately following the text segment.
5956 However, the actual runtime layout in memory may put the data
5957 somewhere else, so when we send gdb a data base-address, we
5958 use the real data base address and subtract the compile-time
5959 data base-address from it (which is just the length of the
5960 text segment). BSS immediately follows data in both
5961 cases. */
52fb6437
NS
5962 *text_p = text;
5963 *data_p = data - (text_end - text);
1b3f6016 5964
52fb6437
NS
5965 return 1;
5966 }
5203ae1e
TBA
5967 return 0;
5968#else
5969 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5970#endif
5203ae1e 5971}
52fb6437 5972
6e3fd7e9
TBA
5973bool
5974linux_process_target::supports_get_tls_address ()
5975{
5976#ifdef USE_THREAD_DB
5977 return true;
5978#else
5979 return false;
5980#endif
5981}
5982
5983int
5984linux_process_target::get_tls_address (thread_info *thread,
5985 CORE_ADDR offset,
5986 CORE_ADDR load_module,
5987 CORE_ADDR *address)
5988{
5989#ifdef USE_THREAD_DB
5990 return thread_db_get_tls_address (thread, offset, load_module, address);
5991#else
5992 return -1;
5993#endif
5994}
5995
2d0795ee
TBA
5996bool
5997linux_process_target::supports_qxfer_osdata ()
5998{
5999 return true;
6000}
6001
6002int
6003linux_process_target::qxfer_osdata (const char *annex,
6004 unsigned char *readbuf,
6005 unsigned const char *writebuf,
6006 CORE_ADDR offset, int len)
07e059b5 6007{
d26e3629 6008 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6009}
6010
d0722149
DE
6011/* Convert a native/host siginfo object, into/from the siginfo in the
6012 layout of the inferiors' architecture. */
6013
6014static void
8adce034 6015siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
6016{
6017 int done = 0;
6018
6019 if (the_low_target.siginfo_fixup != NULL)
6020 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6021
6022 /* If there was no callback, or the callback didn't do anything,
6023 then just do a straight memcpy. */
6024 if (!done)
6025 {
6026 if (direction == 1)
a5362b9a 6027 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6028 else
a5362b9a 6029 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6030 }
6031}
6032
d7abedf7
TBA
6033bool
6034linux_process_target::supports_qxfer_siginfo ()
6035{
6036 return true;
6037}
6038
6039int
6040linux_process_target::qxfer_siginfo (const char *annex,
6041 unsigned char *readbuf,
6042 unsigned const char *writebuf,
6043 CORE_ADDR offset, int len)
4aa995e1 6044{
d0722149 6045 int pid;
a5362b9a 6046 siginfo_t siginfo;
8adce034 6047 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6048
0bfdf32f 6049 if (current_thread == NULL)
4aa995e1
PA
6050 return -1;
6051
0bfdf32f 6052 pid = lwpid_of (current_thread);
4aa995e1
PA
6053
6054 if (debug_threads)
87ce2a04
DE
6055 debug_printf ("%s siginfo for lwp %d.\n",
6056 readbuf != NULL ? "Reading" : "Writing",
6057 pid);
4aa995e1 6058
0adea5f7 6059 if (offset >= sizeof (siginfo))
4aa995e1
PA
6060 return -1;
6061
b8e1b30e 6062 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6063 return -1;
6064
d0722149
DE
6065 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6066 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6067 inferior with a 64-bit GDBSERVER should look the same as debugging it
6068 with a 32-bit GDBSERVER, we need to convert it. */
6069 siginfo_fixup (&siginfo, inf_siginfo, 0);
6070
4aa995e1
PA
6071 if (offset + len > sizeof (siginfo))
6072 len = sizeof (siginfo) - offset;
6073
6074 if (readbuf != NULL)
d0722149 6075 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6076 else
6077 {
d0722149
DE
6078 memcpy (inf_siginfo + offset, writebuf, len);
6079
6080 /* Convert back to ptrace layout before flushing it out. */
6081 siginfo_fixup (&siginfo, inf_siginfo, 1);
6082
b8e1b30e 6083 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6084 return -1;
6085 }
6086
6087 return len;
6088}
6089
bd99dc85
PA
6090/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6091 so we notice when children change state; as the handler for the
6092 sigsuspend in my_waitpid. */
6093
6094static void
6095sigchld_handler (int signo)
6096{
6097 int old_errno = errno;
6098
6099 if (debug_threads)
e581f2b4
PA
6100 {
6101 do
6102 {
a7e559cc
AH
6103 /* Use the async signal safe debug function. */
6104 if (debug_write ("sigchld_handler\n",
6105 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6106 break; /* just ignore */
6107 } while (0);
6108 }
bd99dc85
PA
6109
6110 if (target_is_async_p ())
6111 async_file_mark (); /* trigger a linux_wait */
6112
6113 errno = old_errno;
6114}
6115
0dc587d4
TBA
6116bool
6117linux_process_target::supports_non_stop ()
bd99dc85 6118{
0dc587d4 6119 return true;
bd99dc85
PA
6120}
6121
0dc587d4
TBA
6122bool
6123linux_process_target::async (bool enable)
bd99dc85 6124{
0dc587d4 6125 bool previous = target_is_async_p ();
bd99dc85 6126
8336d594 6127 if (debug_threads)
87ce2a04
DE
6128 debug_printf ("linux_async (%d), previous=%d\n",
6129 enable, previous);
8336d594 6130
bd99dc85
PA
6131 if (previous != enable)
6132 {
6133 sigset_t mask;
6134 sigemptyset (&mask);
6135 sigaddset (&mask, SIGCHLD);
6136
21987b9c 6137 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6138
6139 if (enable)
6140 {
6141 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6142 {
6143 linux_event_pipe[0] = -1;
6144 linux_event_pipe[1] = -1;
21987b9c 6145 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6146
6147 warning ("creating event pipe failed.");
6148 return previous;
6149 }
bd99dc85
PA
6150
6151 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6152 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6153
6154 /* Register the event loop handler. */
6155 add_file_handler (linux_event_pipe[0],
6156 handle_target_event, NULL);
6157
6158 /* Always trigger a linux_wait. */
6159 async_file_mark ();
6160 }
6161 else
6162 {
6163 delete_file_handler (linux_event_pipe[0]);
6164
6165 close (linux_event_pipe[0]);
6166 close (linux_event_pipe[1]);
6167 linux_event_pipe[0] = -1;
6168 linux_event_pipe[1] = -1;
6169 }
6170
21987b9c 6171 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6172 }
6173
6174 return previous;
6175}
6176
0dc587d4
TBA
6177int
6178linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6179{
6180 /* Register or unregister from event-loop accordingly. */
0dc587d4 6181 target_async (nonstop);
aa96c426 6182
0dc587d4 6183 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6184 return -1;
6185
bd99dc85
PA
6186 return 0;
6187}
6188
652aef77
TBA
6189bool
6190linux_process_target::supports_multi_process ()
cf8fd78b 6191{
652aef77 6192 return true;
cf8fd78b
PA
6193}
6194
89245bc0
DB
6195/* Check if fork events are supported. */
6196
9690a72a
TBA
6197bool
6198linux_process_target::supports_fork_events ()
89245bc0
DB
6199{
6200 return linux_supports_tracefork ();
6201}
6202
6203/* Check if vfork events are supported. */
6204
9690a72a
TBA
6205bool
6206linux_process_target::supports_vfork_events ()
89245bc0
DB
6207{
6208 return linux_supports_tracefork ();
6209}
6210
94585166
DB
6211/* Check if exec events are supported. */
6212
9690a72a
TBA
6213bool
6214linux_process_target::supports_exec_events ()
94585166
DB
6215{
6216 return linux_supports_traceexec ();
6217}
6218
de0d863e
DB
6219/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6220 ptrace flags for all inferiors. This is in case the new GDB connection
6221 doesn't support the same set of events that the previous one did. */
6222
fb00dfce
TBA
6223void
6224linux_process_target::handle_new_gdb_connection ()
de0d863e 6225{
de0d863e 6226 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6227 for_each_thread ([] (thread_info *thread)
6228 {
6229 struct lwp_info *lwp = get_thread_lwp (thread);
6230
6231 if (!lwp->stopped)
6232 {
6233 /* Stop the lwp so we can modify its ptrace options. */
6234 lwp->must_set_ptrace_flags = 1;
6235 linux_stop_lwp (lwp);
6236 }
6237 else
6238 {
6239 /* Already stopped; go ahead and set the ptrace options. */
6240 struct process_info *proc = find_process_pid (pid_of (thread));
6241 int options = linux_low_ptrace_options (proc->attached);
6242
6243 linux_enable_event_reporting (lwpid_of (thread), options);
6244 lwp->must_set_ptrace_flags = 0;
6245 }
6246 });
de0d863e
DB
6247}
6248
55cf3021
TBA
6249int
6250linux_process_target::handle_monitor_command (char *mon)
6251{
6252#ifdef USE_THREAD_DB
6253 return thread_db_handle_monitor_command (mon);
6254#else
6255 return 0;
6256#endif
6257}
6258
95a45fc1
TBA
6259int
6260linux_process_target::core_of_thread (ptid_t ptid)
6261{
6262 return linux_common_core_of_thread (ptid);
6263}
6264
c756403b
TBA
6265bool
6266linux_process_target::supports_disable_randomization ()
03583c20
UW
6267{
6268#ifdef HAVE_PERSONALITY
c756403b 6269 return true;
03583c20 6270#else
c756403b 6271 return false;
03583c20
UW
6272#endif
6273}
efcbbd14 6274
c0245cb9
TBA
6275bool
6276linux_process_target::supports_agent ()
d1feda86 6277{
c0245cb9 6278 return true;
d1feda86
YQ
6279}
6280
2526e0cd
TBA
6281bool
6282linux_process_target::supports_range_stepping ()
c2d6af84 6283{
7582c77c 6284 if (supports_software_single_step ())
2526e0cd 6285 return true;
c2d6af84 6286 if (*the_low_target.supports_range_stepping == NULL)
2526e0cd 6287 return false;
c2d6af84
PA
6288
6289 return (*the_low_target.supports_range_stepping) ();
6290}
6291
8247b823
TBA
6292bool
6293linux_process_target::supports_pid_to_exec_file ()
6294{
6295 return true;
6296}
6297
6298char *
6299linux_process_target::pid_to_exec_file (int pid)
6300{
6301 return linux_proc_pid_to_exec_file (pid);
6302}
6303
c9b7b804
TBA
6304bool
6305linux_process_target::supports_multifs ()
6306{
6307 return true;
6308}
6309
6310int
6311linux_process_target::multifs_open (int pid, const char *filename,
6312 int flags, mode_t mode)
6313{
6314 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6315}
6316
6317int
6318linux_process_target::multifs_unlink (int pid, const char *filename)
6319{
6320 return linux_mntns_unlink (pid, filename);
6321}
6322
6323ssize_t
6324linux_process_target::multifs_readlink (int pid, const char *filename,
6325 char *buf, size_t bufsiz)
6326{
6327 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6328}
6329
723b724b 6330#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6331struct target_loadseg
6332{
6333 /* Core address to which the segment is mapped. */
6334 Elf32_Addr addr;
6335 /* VMA recorded in the program header. */
6336 Elf32_Addr p_vaddr;
6337 /* Size of this segment in memory. */
6338 Elf32_Word p_memsz;
6339};
6340
723b724b 6341# if defined PT_GETDSBT
78d85199
YQ
6342struct target_loadmap
6343{
6344 /* Protocol version number, must be zero. */
6345 Elf32_Word version;
6346 /* Pointer to the DSBT table, its size, and the DSBT index. */
6347 unsigned *dsbt_table;
6348 unsigned dsbt_size, dsbt_index;
6349 /* Number of segments in this map. */
6350 Elf32_Word nsegs;
6351 /* The actual memory map. */
6352 struct target_loadseg segs[/*nsegs*/];
6353};
723b724b
MF
6354# define LINUX_LOADMAP PT_GETDSBT
6355# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6356# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6357# else
6358struct target_loadmap
6359{
6360 /* Protocol version number, must be zero. */
6361 Elf32_Half version;
6362 /* Number of segments in this map. */
6363 Elf32_Half nsegs;
6364 /* The actual memory map. */
6365 struct target_loadseg segs[/*nsegs*/];
6366};
6367# define LINUX_LOADMAP PTRACE_GETFDPIC
6368# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6369# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6370# endif
78d85199 6371
9da41fda
TBA
6372bool
6373linux_process_target::supports_read_loadmap ()
6374{
6375 return true;
6376}
6377
6378int
6379linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6380 unsigned char *myaddr, unsigned int len)
78d85199 6381{
0bfdf32f 6382 int pid = lwpid_of (current_thread);
78d85199
YQ
6383 int addr = -1;
6384 struct target_loadmap *data = NULL;
6385 unsigned int actual_length, copy_length;
6386
6387 if (strcmp (annex, "exec") == 0)
723b724b 6388 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6389 else if (strcmp (annex, "interp") == 0)
723b724b 6390 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6391 else
6392 return -1;
6393
723b724b 6394 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6395 return -1;
6396
6397 if (data == NULL)
6398 return -1;
6399
6400 actual_length = sizeof (struct target_loadmap)
6401 + sizeof (struct target_loadseg) * data->nsegs;
6402
6403 if (offset < 0 || offset > actual_length)
6404 return -1;
6405
6406 copy_length = actual_length - offset < len ? actual_length - offset : len;
6407 memcpy (myaddr, (char *) data + offset, copy_length);
6408 return copy_length;
6409}
723b724b 6410#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6411
0df28b1b
TBA
6412void
6413linux_process_target::process_qsupported (char **features, int count)
1570b33e
L
6414{
6415 if (the_low_target.process_qsupported != NULL)
06e03fff 6416 the_low_target.process_qsupported (features, count);
1570b33e
L
6417}
6418
bc8d3ae4
TBA
6419bool
6420linux_process_target::supports_catch_syscall ()
82075af2
JS
6421{
6422 return (the_low_target.get_syscall_trapinfo != NULL
6423 && linux_supports_tracesysgood ());
6424}
6425
d633e831
TBA
6426int
6427linux_process_target::get_ipa_tdesc_idx ()
ae91f625
MK
6428{
6429 if (the_low_target.get_ipa_tdesc_idx == NULL)
6430 return 0;
6431
6432 return (*the_low_target.get_ipa_tdesc_idx) ();
6433}
6434
290732bf
TBA
6435bool
6436linux_process_target::supports_tracepoints ()
219f2f23
PA
6437{
6438 if (*the_low_target.supports_tracepoints == NULL)
290732bf 6439 return false;
219f2f23
PA
6440
6441 return (*the_low_target.supports_tracepoints) ();
6442}
6443
770d8f6a
TBA
6444CORE_ADDR
6445linux_process_target::read_pc (regcache *regcache)
219f2f23 6446{
bf9ae9d8 6447 if (!low_supports_breakpoints ())
219f2f23
PA
6448 return 0;
6449
bf9ae9d8 6450 return low_get_pc (regcache);
219f2f23
PA
6451}
6452
770d8f6a
TBA
6453void
6454linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6455{
bf9ae9d8 6456 gdb_assert (low_supports_breakpoints ());
219f2f23 6457
bf9ae9d8 6458 low_set_pc (regcache, pc);
219f2f23
PA
6459}
6460
68119632
TBA
6461bool
6462linux_process_target::supports_thread_stopped ()
6463{
6464 return true;
6465}
6466
6467bool
6468linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6469{
6470 return get_thread_lwp (thread)->stopped;
6471}
6472
6473/* This exposes stop-all-threads functionality to other modules. */
6474
29e8dc09
TBA
6475void
6476linux_process_target::pause_all (bool freeze)
8336d594 6477{
7984d532
PA
6478 stop_all_lwps (freeze, NULL);
6479}
6480
6481/* This exposes unstop-all-threads functionality to other gdbserver
6482 modules. */
6483
29e8dc09
TBA
6484void
6485linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6486{
6487 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6488}
6489
79b44087
TBA
6490int
6491linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6492{
6493 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6494 running LWP. */
6495 if (non_stop)
29e8dc09 6496 target_pause_all (true);
90d74c30
PA
6497 return 0;
6498}
6499
79b44087
TBA
6500void
6501linux_process_target::done_accessing_memory ()
90d74c30
PA
6502{
6503 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6504 running LWP. */
6505 if (non_stop)
29e8dc09 6506 target_unpause_all (true);
90d74c30
PA
6507}
6508
c23c9391
TBA
6509bool
6510linux_process_target::supports_fast_tracepoints ()
6511{
6512 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6513}
6514
6515int
6516linux_process_target::install_fast_tracepoint_jump_pad
6517 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6518 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6519 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6520 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6521 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6522 char *err)
fa593d66
PA
6523{
6524 return (*the_low_target.install_fast_tracepoint_jump_pad)
6525 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6526 jump_entry, trampoline, trampoline_size,
6527 jjump_pad_insn, jjump_pad_insn_size,
6528 adjusted_insn_addr, adjusted_insn_addr_end,
6529 err);
fa593d66
PA
6530}
6531
345dafad
TBA
6532emit_ops *
6533linux_process_target::emit_ops ()
6a271cae
PA
6534{
6535 if (the_low_target.emit_ops != NULL)
6536 return (*the_low_target.emit_ops) ();
6537 else
6538 return NULL;
6539}
6540
c23c9391
TBA
6541int
6542linux_process_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
6543{
6544 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6545}
6546
2268b414
JK
6547/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6548
6549static int
6550get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6551 CORE_ADDR *phdr_memaddr, int *num_phdr)
6552{
6553 char filename[PATH_MAX];
6554 int fd;
6555 const int auxv_size = is_elf64
6556 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6557 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6558
6559 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6560
6561 fd = open (filename, O_RDONLY);
6562 if (fd < 0)
6563 return 1;
6564
6565 *phdr_memaddr = 0;
6566 *num_phdr = 0;
6567 while (read (fd, buf, auxv_size) == auxv_size
6568 && (*phdr_memaddr == 0 || *num_phdr == 0))
6569 {
6570 if (is_elf64)
6571 {
6572 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6573
6574 switch (aux->a_type)
6575 {
6576 case AT_PHDR:
6577 *phdr_memaddr = aux->a_un.a_val;
6578 break;
6579 case AT_PHNUM:
6580 *num_phdr = aux->a_un.a_val;
6581 break;
6582 }
6583 }
6584 else
6585 {
6586 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6587
6588 switch (aux->a_type)
6589 {
6590 case AT_PHDR:
6591 *phdr_memaddr = aux->a_un.a_val;
6592 break;
6593 case AT_PHNUM:
6594 *num_phdr = aux->a_un.a_val;
6595 break;
6596 }
6597 }
6598 }
6599
6600 close (fd);
6601
6602 if (*phdr_memaddr == 0 || *num_phdr == 0)
6603 {
6604 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6605 "phdr_memaddr = %ld, phdr_num = %d",
6606 (long) *phdr_memaddr, *num_phdr);
6607 return 2;
6608 }
6609
6610 return 0;
6611}
6612
6613/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6614
6615static CORE_ADDR
6616get_dynamic (const int pid, const int is_elf64)
6617{
6618 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6619 int num_phdr, i;
2268b414 6620 unsigned char *phdr_buf;
db1ff28b 6621 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6622
6623 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6624 return 0;
6625
6626 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6627 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6628
6629 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6630 return 0;
6631
6632 /* Compute relocation: it is expected to be 0 for "regular" executables,
6633 non-zero for PIE ones. */
6634 relocation = -1;
db1ff28b
JK
6635 for (i = 0; relocation == -1 && i < num_phdr; i++)
6636 if (is_elf64)
6637 {
6638 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6639
6640 if (p->p_type == PT_PHDR)
6641 relocation = phdr_memaddr - p->p_vaddr;
6642 }
6643 else
6644 {
6645 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6646
6647 if (p->p_type == PT_PHDR)
6648 relocation = phdr_memaddr - p->p_vaddr;
6649 }
6650
2268b414
JK
6651 if (relocation == -1)
6652 {
e237a7e2
JK
6653 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6654 any real world executables, including PIE executables, have always
6655 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6656 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6657 or present DT_DEBUG anyway (fpc binaries are statically linked).
6658
6659 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6660
6661 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6662
2268b414
JK
6663 return 0;
6664 }
6665
db1ff28b
JK
6666 for (i = 0; i < num_phdr; i++)
6667 {
6668 if (is_elf64)
6669 {
6670 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6671
6672 if (p->p_type == PT_DYNAMIC)
6673 return p->p_vaddr + relocation;
6674 }
6675 else
6676 {
6677 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6678
db1ff28b
JK
6679 if (p->p_type == PT_DYNAMIC)
6680 return p->p_vaddr + relocation;
6681 }
6682 }
2268b414
JK
6683
6684 return 0;
6685}
6686
6687/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6688 can be 0 if the inferior does not yet have the library list initialized.
6689 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6690 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6691
6692static CORE_ADDR
6693get_r_debug (const int pid, const int is_elf64)
6694{
6695 CORE_ADDR dynamic_memaddr;
6696 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6697 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6698 CORE_ADDR map = -1;
2268b414
JK
6699
6700 dynamic_memaddr = get_dynamic (pid, is_elf64);
6701 if (dynamic_memaddr == 0)
367ba2c2 6702 return map;
2268b414
JK
6703
6704 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6705 {
6706 if (is_elf64)
6707 {
6708 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6709#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6710 union
6711 {
6712 Elf64_Xword map;
6713 unsigned char buf[sizeof (Elf64_Xword)];
6714 }
6715 rld_map;
a738da3a
MF
6716#endif
6717#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6718 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6719 {
6720 if (linux_read_memory (dyn->d_un.d_val,
6721 rld_map.buf, sizeof (rld_map.buf)) == 0)
6722 return rld_map.map;
6723 else
6724 break;
6725 }
75f62ce7 6726#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6727#ifdef DT_MIPS_RLD_MAP_REL
6728 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6729 {
6730 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6731 rld_map.buf, sizeof (rld_map.buf)) == 0)
6732 return rld_map.map;
6733 else
6734 break;
6735 }
6736#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6737
367ba2c2
MR
6738 if (dyn->d_tag == DT_DEBUG && map == -1)
6739 map = dyn->d_un.d_val;
2268b414
JK
6740
6741 if (dyn->d_tag == DT_NULL)
6742 break;
6743 }
6744 else
6745 {
6746 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6747#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6748 union
6749 {
6750 Elf32_Word map;
6751 unsigned char buf[sizeof (Elf32_Word)];
6752 }
6753 rld_map;
a738da3a
MF
6754#endif
6755#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6756 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6757 {
6758 if (linux_read_memory (dyn->d_un.d_val,
6759 rld_map.buf, sizeof (rld_map.buf)) == 0)
6760 return rld_map.map;
6761 else
6762 break;
6763 }
75f62ce7 6764#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6765#ifdef DT_MIPS_RLD_MAP_REL
6766 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6767 {
6768 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6769 rld_map.buf, sizeof (rld_map.buf)) == 0)
6770 return rld_map.map;
6771 else
6772 break;
6773 }
6774#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6775
367ba2c2
MR
6776 if (dyn->d_tag == DT_DEBUG && map == -1)
6777 map = dyn->d_un.d_val;
2268b414
JK
6778
6779 if (dyn->d_tag == DT_NULL)
6780 break;
6781 }
6782
6783 dynamic_memaddr += dyn_size;
6784 }
6785
367ba2c2 6786 return map;
2268b414
JK
6787}
6788
6789/* Read one pointer from MEMADDR in the inferior. */
6790
6791static int
6792read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6793{
485f1ee4
PA
6794 int ret;
6795
6796 /* Go through a union so this works on either big or little endian
6797 hosts, when the inferior's pointer size is smaller than the size
6798 of CORE_ADDR. It is assumed the inferior's endianness is the
6799 same of the superior's. */
6800 union
6801 {
6802 CORE_ADDR core_addr;
6803 unsigned int ui;
6804 unsigned char uc;
6805 } addr;
6806
6807 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6808 if (ret == 0)
6809 {
6810 if (ptr_size == sizeof (CORE_ADDR))
6811 *ptr = addr.core_addr;
6812 else if (ptr_size == sizeof (unsigned int))
6813 *ptr = addr.ui;
6814 else
6815 gdb_assert_not_reached ("unhandled pointer size");
6816 }
6817 return ret;
2268b414
JK
6818}
6819
974387bb
TBA
6820bool
6821linux_process_target::supports_qxfer_libraries_svr4 ()
6822{
6823 return true;
6824}
6825
2268b414
JK
6826struct link_map_offsets
6827 {
6828 /* Offset and size of r_debug.r_version. */
6829 int r_version_offset;
6830
6831 /* Offset and size of r_debug.r_map. */
6832 int r_map_offset;
6833
6834 /* Offset to l_addr field in struct link_map. */
6835 int l_addr_offset;
6836
6837 /* Offset to l_name field in struct link_map. */
6838 int l_name_offset;
6839
6840 /* Offset to l_ld field in struct link_map. */
6841 int l_ld_offset;
6842
6843 /* Offset to l_next field in struct link_map. */
6844 int l_next_offset;
6845
6846 /* Offset to l_prev field in struct link_map. */
6847 int l_prev_offset;
6848 };
6849
fb723180 6850/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6851
974387bb
TBA
6852int
6853linux_process_target::qxfer_libraries_svr4 (const char *annex,
6854 unsigned char *readbuf,
6855 unsigned const char *writebuf,
6856 CORE_ADDR offset, int len)
2268b414 6857{
fe978cb0 6858 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6859 char filename[PATH_MAX];
6860 int pid, is_elf64;
6861
6862 static const struct link_map_offsets lmo_32bit_offsets =
6863 {
6864 0, /* r_version offset. */
6865 4, /* r_debug.r_map offset. */
6866 0, /* l_addr offset in link_map. */
6867 4, /* l_name offset in link_map. */
6868 8, /* l_ld offset in link_map. */
6869 12, /* l_next offset in link_map. */
6870 16 /* l_prev offset in link_map. */
6871 };
6872
6873 static const struct link_map_offsets lmo_64bit_offsets =
6874 {
6875 0, /* r_version offset. */
6876 8, /* r_debug.r_map offset. */
6877 0, /* l_addr offset in link_map. */
6878 8, /* l_name offset in link_map. */
6879 16, /* l_ld offset in link_map. */
6880 24, /* l_next offset in link_map. */
6881 32 /* l_prev offset in link_map. */
6882 };
6883 const struct link_map_offsets *lmo;
214d508e 6884 unsigned int machine;
b1fbec62
GB
6885 int ptr_size;
6886 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6887 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6888 int header_done = 0;
2268b414
JK
6889
6890 if (writebuf != NULL)
6891 return -2;
6892 if (readbuf == NULL)
6893 return -1;
6894
0bfdf32f 6895 pid = lwpid_of (current_thread);
2268b414 6896 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6897 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6898 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6899 ptr_size = is_elf64 ? 8 : 4;
2268b414 6900
b1fbec62
GB
6901 while (annex[0] != '\0')
6902 {
6903 const char *sep;
6904 CORE_ADDR *addrp;
da4ae14a 6905 int name_len;
2268b414 6906
b1fbec62
GB
6907 sep = strchr (annex, '=');
6908 if (sep == NULL)
6909 break;
0c5bf5a9 6910
da4ae14a
TT
6911 name_len = sep - annex;
6912 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6913 addrp = &lm_addr;
da4ae14a 6914 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6915 addrp = &lm_prev;
6916 else
6917 {
6918 annex = strchr (sep, ';');
6919 if (annex == NULL)
6920 break;
6921 annex++;
6922 continue;
6923 }
6924
6925 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6926 }
b1fbec62
GB
6927
6928 if (lm_addr == 0)
2268b414 6929 {
b1fbec62
GB
6930 int r_version = 0;
6931
6932 if (priv->r_debug == 0)
6933 priv->r_debug = get_r_debug (pid, is_elf64);
6934
6935 /* We failed to find DT_DEBUG. Such situation will not change
6936 for this inferior - do not retry it. Report it to GDB as
6937 E01, see for the reasons at the GDB solib-svr4.c side. */
6938 if (priv->r_debug == (CORE_ADDR) -1)
6939 return -1;
6940
6941 if (priv->r_debug != 0)
2268b414 6942 {
b1fbec62
GB
6943 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6944 (unsigned char *) &r_version,
6945 sizeof (r_version)) != 0
6946 || r_version != 1)
6947 {
6948 warning ("unexpected r_debug version %d", r_version);
6949 }
6950 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6951 &lm_addr, ptr_size) != 0)
6952 {
6953 warning ("unable to read r_map from 0x%lx",
6954 (long) priv->r_debug + lmo->r_map_offset);
6955 }
2268b414 6956 }
b1fbec62 6957 }
2268b414 6958
f6e8a41e 6959 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6960
6961 while (lm_addr
6962 && read_one_ptr (lm_addr + lmo->l_name_offset,
6963 &l_name, ptr_size) == 0
6964 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6965 &l_addr, ptr_size) == 0
6966 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6967 &l_ld, ptr_size) == 0
6968 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6969 &l_prev, ptr_size) == 0
6970 && read_one_ptr (lm_addr + lmo->l_next_offset,
6971 &l_next, ptr_size) == 0)
6972 {
6973 unsigned char libname[PATH_MAX];
6974
6975 if (lm_prev != l_prev)
2268b414 6976 {
b1fbec62
GB
6977 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6978 (long) lm_prev, (long) l_prev);
6979 break;
2268b414
JK
6980 }
6981
d878444c
JK
6982 /* Ignore the first entry even if it has valid name as the first entry
6983 corresponds to the main executable. The first entry should not be
6984 skipped if the dynamic loader was loaded late by a static executable
6985 (see solib-svr4.c parameter ignore_first). But in such case the main
6986 executable does not have PT_DYNAMIC present and this function already
6987 exited above due to failed get_r_debug. */
6988 if (lm_prev == 0)
f6e8a41e 6989 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
6990 else
6991 {
6992 /* Not checking for error because reading may stop before
6993 we've got PATH_MAX worth of characters. */
6994 libname[0] = '\0';
6995 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6996 libname[sizeof (libname) - 1] = '\0';
6997 if (libname[0] != '\0')
2268b414 6998 {
d878444c
JK
6999 if (!header_done)
7000 {
7001 /* Terminate `<library-list-svr4'. */
f6e8a41e 7002 document += '>';
d878444c
JK
7003 header_done = 1;
7004 }
2268b414 7005
e6a58aa8
SM
7006 string_appendf (document, "<library name=\"");
7007 xml_escape_text_append (&document, (char *) libname);
7008 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 7009 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
7010 (unsigned long) lm_addr, (unsigned long) l_addr,
7011 (unsigned long) l_ld);
d878444c 7012 }
0afae3cf 7013 }
b1fbec62
GB
7014
7015 lm_prev = lm_addr;
7016 lm_addr = l_next;
2268b414
JK
7017 }
7018
b1fbec62
GB
7019 if (!header_done)
7020 {
7021 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 7022 document += "/>";
b1fbec62
GB
7023 }
7024 else
f6e8a41e 7025 document += "</library-list-svr4>";
b1fbec62 7026
f6e8a41e 7027 int document_len = document.length ();
2268b414
JK
7028 if (offset < document_len)
7029 document_len -= offset;
7030 else
7031 document_len = 0;
7032 if (len > document_len)
7033 len = document_len;
7034
f6e8a41e 7035 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
7036
7037 return len;
7038}
7039
9accd112
MM
7040#ifdef HAVE_LINUX_BTRACE
7041
79597bdd
TBA
7042btrace_target_info *
7043linux_process_target::enable_btrace (ptid_t ptid,
7044 const btrace_config *conf)
7045{
7046 return linux_enable_btrace (ptid, conf);
7047}
7048
969c39fb 7049/* See to_disable_btrace target method. */
9accd112 7050
79597bdd
TBA
7051int
7052linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
7053{
7054 enum btrace_error err;
7055
7056 err = linux_disable_btrace (tinfo);
7057 return (err == BTRACE_ERR_NONE ? 0 : -1);
7058}
7059
bc504a31 7060/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7061
7062static void
7063linux_low_encode_pt_config (struct buffer *buffer,
7064 const struct btrace_data_pt_config *config)
7065{
7066 buffer_grow_str (buffer, "<pt-config>\n");
7067
7068 switch (config->cpu.vendor)
7069 {
7070 case CV_INTEL:
7071 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7072 "model=\"%u\" stepping=\"%u\"/>\n",
7073 config->cpu.family, config->cpu.model,
7074 config->cpu.stepping);
7075 break;
7076
7077 default:
7078 break;
7079 }
7080
7081 buffer_grow_str (buffer, "</pt-config>\n");
7082}
7083
7084/* Encode a raw buffer. */
7085
7086static void
7087linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7088 unsigned int size)
7089{
7090 if (size == 0)
7091 return;
7092
268a13a5 7093 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
7094 buffer_grow_str (buffer, "<raw>\n");
7095
7096 while (size-- > 0)
7097 {
7098 char elem[2];
7099
7100 elem[0] = tohex ((*data >> 4) & 0xf);
7101 elem[1] = tohex (*data++ & 0xf);
7102
7103 buffer_grow (buffer, elem, 2);
7104 }
7105
7106 buffer_grow_str (buffer, "</raw>\n");
7107}
7108
969c39fb
MM
7109/* See to_read_btrace target method. */
7110
79597bdd
TBA
7111int
7112linux_process_target::read_btrace (btrace_target_info *tinfo,
7113 buffer *buffer,
7114 enum btrace_read_type type)
9accd112 7115{
734b0e4b 7116 struct btrace_data btrace;
969c39fb 7117 enum btrace_error err;
9accd112 7118
969c39fb
MM
7119 err = linux_read_btrace (&btrace, tinfo, type);
7120 if (err != BTRACE_ERR_NONE)
7121 {
7122 if (err == BTRACE_ERR_OVERFLOW)
7123 buffer_grow_str0 (buffer, "E.Overflow.");
7124 else
7125 buffer_grow_str0 (buffer, "E.Generic Error.");
7126
8dcc53b3 7127 return -1;
969c39fb 7128 }
9accd112 7129
734b0e4b
MM
7130 switch (btrace.format)
7131 {
7132 case BTRACE_FORMAT_NONE:
7133 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7134 return -1;
734b0e4b
MM
7135
7136 case BTRACE_FORMAT_BTS:
7137 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7138 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7139
46f29a9a 7140 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7141 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7142 paddress (block.begin), paddress (block.end));
9accd112 7143
734b0e4b
MM
7144 buffer_grow_str0 (buffer, "</btrace>\n");
7145 break;
7146
b20a6524
MM
7147 case BTRACE_FORMAT_PT:
7148 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7149 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7150 buffer_grow_str (buffer, "<pt>\n");
7151
7152 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7153
b20a6524
MM
7154 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7155 btrace.variant.pt.size);
7156
7157 buffer_grow_str (buffer, "</pt>\n");
7158 buffer_grow_str0 (buffer, "</btrace>\n");
7159 break;
7160
7161 default:
7162 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7163 return -1;
734b0e4b 7164 }
969c39fb
MM
7165
7166 return 0;
9accd112 7167}
f4abbc16
MM
7168
7169/* See to_btrace_conf target method. */
7170
79597bdd
TBA
7171int
7172linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7173 buffer *buffer)
f4abbc16
MM
7174{
7175 const struct btrace_config *conf;
7176
7177 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7178 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7179
7180 conf = linux_btrace_conf (tinfo);
7181 if (conf != NULL)
7182 {
7183 switch (conf->format)
7184 {
7185 case BTRACE_FORMAT_NONE:
7186 break;
7187
7188 case BTRACE_FORMAT_BTS:
d33501a5
MM
7189 buffer_xml_printf (buffer, "<bts");
7190 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7191 buffer_xml_printf (buffer, " />\n");
f4abbc16 7192 break;
b20a6524
MM
7193
7194 case BTRACE_FORMAT_PT:
7195 buffer_xml_printf (buffer, "<pt");
7196 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7197 buffer_xml_printf (buffer, "/>\n");
7198 break;
f4abbc16
MM
7199 }
7200 }
7201
7202 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7203 return 0;
7204}
9accd112
MM
7205#endif /* HAVE_LINUX_BTRACE */
7206
7b669087
GB
7207/* See nat/linux-nat.h. */
7208
7209ptid_t
7210current_lwp_ptid (void)
7211{
7212 return ptid_of (current_thread);
7213}
7214
7f63b89b
TBA
7215const char *
7216linux_process_target::thread_name (ptid_t thread)
7217{
7218 return linux_proc_tid_get_name (thread);
7219}
7220
7221#if USE_THREAD_DB
7222bool
7223linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7224 int *handle_len)
7225{
7226 return thread_db_thread_handle (ptid, handle, handle_len);
7227}
7228#endif
7229
276d4552
YQ
7230/* Default implementation of linux_target_ops method "set_pc" for
7231 32-bit pc register which is literally named "pc". */
7232
7233void
7234linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7235{
7236 uint32_t newpc = pc;
7237
7238 supply_register_by_name (regcache, "pc", &newpc);
7239}
7240
7241/* Default implementation of linux_target_ops method "get_pc" for
7242 32-bit pc register which is literally named "pc". */
7243
7244CORE_ADDR
7245linux_get_pc_32bit (struct regcache *regcache)
7246{
7247 uint32_t pc;
7248
7249 collect_register_by_name (regcache, "pc", &pc);
7250 if (debug_threads)
7251 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7252 return pc;
7253}
7254
6f69e520
YQ
7255/* Default implementation of linux_target_ops method "set_pc" for
7256 64-bit pc register which is literally named "pc". */
7257
7258void
7259linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7260{
7261 uint64_t newpc = pc;
7262
7263 supply_register_by_name (regcache, "pc", &newpc);
7264}
7265
7266/* Default implementation of linux_target_ops method "get_pc" for
7267 64-bit pc register which is literally named "pc". */
7268
7269CORE_ADDR
7270linux_get_pc_64bit (struct regcache *regcache)
7271{
7272 uint64_t pc;
7273
7274 collect_register_by_name (regcache, "pc", &pc);
7275 if (debug_threads)
7276 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7277 return pc;
7278}
7279
0570503d 7280/* See linux-low.h. */
974c89e0 7281
0570503d
PFC
7282int
7283linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7284{
7285 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7286 int offset = 0;
7287
7288 gdb_assert (wordsize == 4 || wordsize == 8);
7289
52405d85 7290 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7291 {
7292 if (wordsize == 4)
7293 {
0570503d 7294 uint32_t *data_p = (uint32_t *) data;
974c89e0 7295 if (data_p[0] == match)
0570503d
PFC
7296 {
7297 *valp = data_p[1];
7298 return 1;
7299 }
974c89e0
AH
7300 }
7301 else
7302 {
0570503d 7303 uint64_t *data_p = (uint64_t *) data;
974c89e0 7304 if (data_p[0] == match)
0570503d
PFC
7305 {
7306 *valp = data_p[1];
7307 return 1;
7308 }
974c89e0
AH
7309 }
7310
7311 offset += 2 * wordsize;
7312 }
7313
7314 return 0;
7315}
7316
7317/* See linux-low.h. */
7318
7319CORE_ADDR
7320linux_get_hwcap (int wordsize)
7321{
0570503d
PFC
7322 CORE_ADDR hwcap = 0;
7323 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7324 return hwcap;
974c89e0
AH
7325}
7326
7327/* See linux-low.h. */
7328
7329CORE_ADDR
7330linux_get_hwcap2 (int wordsize)
7331{
0570503d
PFC
7332 CORE_ADDR hwcap2 = 0;
7333 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7334 return hwcap2;
974c89e0 7335}
6f69e520 7336
3aee8918
PA
7337#ifdef HAVE_LINUX_REGSETS
7338void
7339initialize_regsets_info (struct regsets_info *info)
7340{
7341 for (info->num_regsets = 0;
7342 info->regsets[info->num_regsets].size >= 0;
7343 info->num_regsets++)
7344 ;
3aee8918
PA
7345}
7346#endif
7347
da6d8c04
DJ
7348void
7349initialize_low (void)
7350{
bd99dc85 7351 struct sigaction sigchld_action;
dd373349 7352
bd99dc85 7353 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7354 set_target_ops (the_linux_target);
dd373349 7355
aa7c7447 7356 linux_ptrace_init_warnings ();
1b919490 7357 linux_proc_init_warnings ();
bd99dc85
PA
7358
7359 sigchld_action.sa_handler = sigchld_handler;
7360 sigemptyset (&sigchld_action.sa_mask);
7361 sigchld_action.sa_flags = SA_RESTART;
7362 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7363
7364 initialize_low_arch ();
89245bc0
DB
7365
7366 linux_check_ptrace_features ();
da6d8c04 7367}
This page took 2.939466 seconds and 4 git commands to generate.