gdbserver: turn target op 'get_tls_address' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
b811d2c2 2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
276d4552 49#include <inttypes.h>
268a13a5 50#include "gdbsupport/common-inferior.h"
2090129c 51#include "nat/fork-inferior.h"
268a13a5 52#include "gdbsupport/environ.h"
21987b9c 53#include "gdbsupport/gdb-sigmask.h"
268a13a5 54#include "gdbsupport/scoped_restore.h"
957f3f49
DE
55#ifndef ELFMAG0
56/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
57 then ELFMAG0 will have been defined. If it didn't get included by
58 gdb_proc_service.h then including it will likely introduce a duplicate
59 definition of elf_fpregset_t. */
60#include <elf.h>
61#endif
14d2069a 62#include "nat/linux-namespaces.h"
efcbbd14 63
03583c20
UW
64#ifdef HAVE_PERSONALITY
65# include <sys/personality.h>
66# if !HAVE_DECL_ADDR_NO_RANDOMIZE
67# define ADDR_NO_RANDOMIZE 0x0040000
68# endif
69#endif
70
fd462a61
DJ
71#ifndef O_LARGEFILE
72#define O_LARGEFILE 0
73#endif
1a981360 74
69f4c9cc
AH
75#ifndef AT_HWCAP2
76#define AT_HWCAP2 26
77#endif
78
db0dfaa0
LM
79/* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82#if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85#if defined(__mcoldfire__)
86/* These are still undefined in 3.10 kernels. */
87#define PT_TEXT_ADDR 49*4
88#define PT_DATA_ADDR 50*4
89#define PT_TEXT_END_ADDR 51*4
90/* BFIN already defines these since at least 2.6.32 kernels. */
91#elif defined(BFIN)
92#define PT_TEXT_ADDR 220
93#define PT_TEXT_END_ADDR 224
94#define PT_DATA_ADDR 228
95/* These are still undefined in 3.10 kernels. */
96#elif defined(__TMS320C6X__)
97#define PT_TEXT_ADDR (0x10000*4)
98#define PT_DATA_ADDR (0x10004*4)
99#define PT_TEXT_END_ADDR (0x10008*4)
100#endif
101#endif
102
5203ae1e
TBA
103#if (defined(__UCLIBC__) \
104 && defined(HAS_NOMMU) \
105 && defined(PT_TEXT_ADDR) \
106 && defined(PT_DATA_ADDR) \
107 && defined(PT_TEXT_END_ADDR))
108#define SUPPORTS_READ_OFFSETS
109#endif
110
9accd112 111#ifdef HAVE_LINUX_BTRACE
125f8a3d 112# include "nat/linux-btrace.h"
268a13a5 113# include "gdbsupport/btrace-common.h"
9accd112
MM
114#endif
115
8365dcf5
TJB
116#ifndef HAVE_ELF32_AUXV_T
117/* Copied from glibc's elf.h. */
118typedef struct
119{
120 uint32_t a_type; /* Entry type */
121 union
122 {
123 uint32_t a_val; /* Integer value */
124 /* We use to have pointer elements added here. We cannot do that,
125 though, since it does not work when using 32-bit definitions
126 on 64-bit platforms and vice versa. */
127 } a_un;
128} Elf32_auxv_t;
129#endif
130
131#ifndef HAVE_ELF64_AUXV_T
132/* Copied from glibc's elf.h. */
133typedef struct
134{
135 uint64_t a_type; /* Entry type */
136 union
137 {
138 uint64_t a_val; /* Integer value */
139 /* We use to have pointer elements added here. We cannot do that,
140 though, since it does not work when using 32-bit definitions
141 on 64-bit platforms and vice versa. */
142 } a_un;
143} Elf64_auxv_t;
144#endif
145
ded48a5e
YQ
146/* Does the current host support PTRACE_GETREGSET? */
147int have_ptrace_getregset = -1;
148
cff068da
GB
149/* LWP accessors. */
150
151/* See nat/linux-nat.h. */
152
153ptid_t
154ptid_of_lwp (struct lwp_info *lwp)
155{
156 return ptid_of (get_lwp_thread (lwp));
157}
158
159/* See nat/linux-nat.h. */
160
4b134ca1
GB
161void
162lwp_set_arch_private_info (struct lwp_info *lwp,
163 struct arch_lwp_info *info)
164{
165 lwp->arch_private = info;
166}
167
168/* See nat/linux-nat.h. */
169
170struct arch_lwp_info *
171lwp_arch_private_info (struct lwp_info *lwp)
172{
173 return lwp->arch_private;
174}
175
176/* See nat/linux-nat.h. */
177
cff068da
GB
178int
179lwp_is_stopped (struct lwp_info *lwp)
180{
181 return lwp->stopped;
182}
183
184/* See nat/linux-nat.h. */
185
186enum target_stop_reason
187lwp_stop_reason (struct lwp_info *lwp)
188{
189 return lwp->stop_reason;
190}
191
0e00e962
AA
192/* See nat/linux-nat.h. */
193
194int
195lwp_is_stepping (struct lwp_info *lwp)
196{
197 return lwp->stepping;
198}
199
05044653
PA
200/* A list of all unknown processes which receive stop signals. Some
201 other process will presumably claim each of these as forked
202 children momentarily. */
24a09b5f 203
05044653
PA
204struct simple_pid_list
205{
206 /* The process ID. */
207 int pid;
208
209 /* The status as reported by waitpid. */
210 int status;
211
212 /* Next in chain. */
213 struct simple_pid_list *next;
214};
215struct simple_pid_list *stopped_pids;
216
217/* Trivial list manipulation functions to keep track of a list of new
218 stopped processes. */
219
220static void
221add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
222{
8d749320 223 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
224
225 new_pid->pid = pid;
226 new_pid->status = status;
227 new_pid->next = *listp;
228 *listp = new_pid;
229}
230
231static int
232pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
233{
234 struct simple_pid_list **p;
235
236 for (p = listp; *p != NULL; p = &(*p)->next)
237 if ((*p)->pid == pid)
238 {
239 struct simple_pid_list *next = (*p)->next;
240
241 *statusp = (*p)->status;
242 xfree (*p);
243 *p = next;
244 return 1;
245 }
246 return 0;
247}
24a09b5f 248
bde24c0a
PA
249enum stopping_threads_kind
250 {
251 /* Not stopping threads presently. */
252 NOT_STOPPING_THREADS,
253
254 /* Stopping threads. */
255 STOPPING_THREADS,
256
257 /* Stopping and suspending threads. */
258 STOPPING_AND_SUSPENDING_THREADS
259 };
260
261/* This is set while stop_all_lwps is in effect. */
262enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
263
264/* FIXME make into a target method? */
24a09b5f 265int using_threads = 1;
24a09b5f 266
fa593d66
PA
267/* True if we're presently stabilizing threads (moving them out of
268 jump pads). */
269static int stabilizing_threads;
270
2acc282a 271static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 272 int step, int signal, siginfo_t *info);
7984d532
PA
273static void stop_all_lwps (int suspend, struct lwp_info *except);
274static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
f50bf8e5 275static void unsuspend_all_lwps (struct lwp_info *except);
fa96cb38
PA
276static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
277 int *wstat, int options);
95954743 278static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 279static struct lwp_info *add_lwp (ptid_t ptid);
95954743 280static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 281static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 282static void proceed_all_lwps (void);
d50171e4 283static int finish_step_over (struct lwp_info *lwp);
d50171e4 284static int kill_lwp (unsigned long lwpid, int signo);
863d01bd
PA
285static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
286static void complete_ongoing_step_over (void);
ece66d65 287static int linux_low_ptrace_options (int attached);
ced2dffb 288static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
e2b44075 289static void proceed_one_lwp (thread_info *thread, lwp_info *except);
d50171e4 290
582511be
PA
291/* When the event-loop is doing a step-over, this points at the thread
292 being stepped. */
293ptid_t step_over_bkpt;
294
7d00775e 295/* True if the low target can hardware single-step. */
d50171e4
PA
296
297static int
298can_hardware_single_step (void)
299{
7d00775e
AT
300 if (the_low_target.supports_hardware_single_step != NULL)
301 return the_low_target.supports_hardware_single_step ();
302 else
303 return 0;
304}
305
306/* True if the low target can software single-step. Such targets
fa5308bd 307 implement the GET_NEXT_PCS callback. */
7d00775e
AT
308
309static int
310can_software_single_step (void)
311{
fa5308bd 312 return (the_low_target.get_next_pcs != NULL);
d50171e4
PA
313}
314
315/* True if the low target supports memory breakpoints. If so, we'll
316 have a GET_PC implementation. */
317
318static int
319supports_breakpoints (void)
320{
321 return (the_low_target.get_pc != NULL);
322}
0d62e5e8 323
fa593d66
PA
324/* Returns true if this target can support fast tracepoints. This
325 does not mean that the in-process agent has been loaded in the
326 inferior. */
327
328static int
329supports_fast_tracepoints (void)
330{
331 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
332}
333
c2d6af84
PA
334/* True if LWP is stopped in its stepping range. */
335
336static int
337lwp_in_step_range (struct lwp_info *lwp)
338{
339 CORE_ADDR pc = lwp->stop_pc;
340
341 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
342}
343
0d62e5e8
DJ
344struct pending_signals
345{
346 int signal;
32ca6d61 347 siginfo_t info;
0d62e5e8
DJ
348 struct pending_signals *prev;
349};
611cb4a5 350
bd99dc85
PA
351/* The read/write ends of the pipe registered as waitable file in the
352 event loop. */
353static int linux_event_pipe[2] = { -1, -1 };
354
355/* True if we're currently in async mode. */
356#define target_is_async_p() (linux_event_pipe[0] != -1)
357
02fc4de7 358static void send_sigstop (struct lwp_info *lwp);
fa96cb38 359static void wait_for_sigstop (void);
bd99dc85 360
d0722149
DE
361/* Return non-zero if HEADER is a 64-bit ELF file. */
362
363static int
214d508e 364elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 365{
214d508e
L
366 if (header->e_ident[EI_MAG0] == ELFMAG0
367 && header->e_ident[EI_MAG1] == ELFMAG1
368 && header->e_ident[EI_MAG2] == ELFMAG2
369 && header->e_ident[EI_MAG3] == ELFMAG3)
370 {
371 *machine = header->e_machine;
372 return header->e_ident[EI_CLASS] == ELFCLASS64;
373
374 }
375 *machine = EM_NONE;
376 return -1;
d0722149
DE
377}
378
379/* Return non-zero if FILE is a 64-bit ELF file,
380 zero if the file is not a 64-bit ELF file,
381 and -1 if the file is not accessible or doesn't exist. */
382
be07f1a2 383static int
214d508e 384elf_64_file_p (const char *file, unsigned int *machine)
d0722149 385{
957f3f49 386 Elf64_Ehdr header;
d0722149
DE
387 int fd;
388
389 fd = open (file, O_RDONLY);
390 if (fd < 0)
391 return -1;
392
393 if (read (fd, &header, sizeof (header)) != sizeof (header))
394 {
395 close (fd);
396 return 0;
397 }
398 close (fd);
399
214d508e 400 return elf_64_header_p (&header, machine);
d0722149
DE
401}
402
be07f1a2
PA
403/* Accepts an integer PID; Returns true if the executable PID is
404 running is a 64-bit ELF file.. */
405
406int
214d508e 407linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 408{
d8d2a3ee 409 char file[PATH_MAX];
be07f1a2
PA
410
411 sprintf (file, "/proc/%d/exe", pid);
214d508e 412 return elf_64_file_p (file, machine);
be07f1a2
PA
413}
414
bd99dc85
PA
415static void
416delete_lwp (struct lwp_info *lwp)
417{
fa96cb38
PA
418 struct thread_info *thr = get_lwp_thread (lwp);
419
420 if (debug_threads)
421 debug_printf ("deleting %ld\n", lwpid_of (thr));
422
423 remove_thread (thr);
466eecee
SM
424
425 if (the_low_target.delete_thread != NULL)
426 the_low_target.delete_thread (lwp->arch_private);
427 else
428 gdb_assert (lwp->arch_private == NULL);
429
bd99dc85
PA
430 free (lwp);
431}
432
95954743
PA
433/* Add a process to the common process list, and set its private
434 data. */
435
436static struct process_info *
437linux_add_process (int pid, int attached)
438{
439 struct process_info *proc;
440
95954743 441 proc = add_process (pid, attached);
8d749320 442 proc->priv = XCNEW (struct process_info_private);
95954743 443
aa5ca48f 444 if (the_low_target.new_process != NULL)
fe978cb0 445 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 446
95954743
PA
447 return proc;
448}
449
582511be
PA
450static CORE_ADDR get_pc (struct lwp_info *lwp);
451
ece66d65 452/* Call the target arch_setup function on the current thread. */
94585166
DB
453
454static void
455linux_arch_setup (void)
456{
457 the_low_target.arch_setup ();
458}
459
460/* Call the target arch_setup function on THREAD. */
461
462static void
463linux_arch_setup_thread (struct thread_info *thread)
464{
465 struct thread_info *saved_thread;
466
467 saved_thread = current_thread;
468 current_thread = thread;
469
470 linux_arch_setup ();
471
472 current_thread = saved_thread;
473}
474
475/* Handle a GNU/Linux extended wait response. If we see a clone,
476 fork, or vfork event, we need to add the new LWP to our list
477 (and return 0 so as not to report the trap to higher layers).
478 If we see an exec event, we will modify ORIG_EVENT_LWP to point
479 to a new LWP representing the new program. */
0d62e5e8 480
de0d863e 481static int
94585166 482handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
24a09b5f 483{
c12a5089 484 client_state &cs = get_client_state ();
94585166 485 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 486 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 487 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 488 struct lwp_info *new_lwp;
24a09b5f 489
65706a29
PA
490 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
491
82075af2
JS
492 /* All extended events we currently use are mid-syscall. Only
493 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
494 you have to be using PTRACE_SEIZE to get that. */
495 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
496
c269dbdb
DB
497 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
498 || (event == PTRACE_EVENT_CLONE))
24a09b5f 499 {
95954743 500 ptid_t ptid;
24a09b5f 501 unsigned long new_pid;
05044653 502 int ret, status;
24a09b5f 503
de0d863e 504 /* Get the pid of the new lwp. */
d86d4aaf 505 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 506 &new_pid);
24a09b5f
DJ
507
508 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 509 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
510 {
511 /* The new child has a pending SIGSTOP. We can't affect it until it
512 hits the SIGSTOP, but we're already attached. */
513
97438e3f 514 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
515
516 if (ret == -1)
517 perror_with_name ("waiting for new child");
518 else if (ret != new_pid)
519 warning ("wait returned unexpected PID %d", ret);
da5898ce 520 else if (!WIFSTOPPED (status))
24a09b5f
DJ
521 warning ("wait returned unexpected status 0x%x", status);
522 }
523
c269dbdb 524 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
525 {
526 struct process_info *parent_proc;
527 struct process_info *child_proc;
528 struct lwp_info *child_lwp;
bfacd19d 529 struct thread_info *child_thr;
de0d863e
DB
530 struct target_desc *tdesc;
531
fd79271b 532 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
533
534 if (debug_threads)
535 {
536 debug_printf ("HEW: Got fork event from LWP %ld, "
537 "new child is %d\n",
e38504b3 538 ptid_of (event_thr).lwp (),
e99b03dc 539 ptid.pid ());
de0d863e
DB
540 }
541
542 /* Add the new process to the tables and clone the breakpoint
543 lists of the parent. We need to do this even if the new process
544 will be detached, since we will need the process object and the
545 breakpoints to remove any breakpoints from memory when we
546 detach, and the client side will access registers. */
547 child_proc = linux_add_process (new_pid, 0);
548 gdb_assert (child_proc != NULL);
549 child_lwp = add_lwp (ptid);
550 gdb_assert (child_lwp != NULL);
551 child_lwp->stopped = 1;
bfacd19d
DB
552 child_lwp->must_set_ptrace_flags = 1;
553 child_lwp->status_pending_p = 0;
554 child_thr = get_lwp_thread (child_lwp);
555 child_thr->last_resume_kind = resume_stop;
998d452a
PA
556 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
557
863d01bd 558 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
559 too. If the fork/clone parent is stepping over a breakpoint,
560 all other threads have been suspended already. Leave the
561 child suspended too. */
562 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
563 || event_lwp->bp_reinsert != 0)
863d01bd
PA
564 {
565 if (debug_threads)
566 debug_printf ("HEW: leaving child suspended\n");
567 child_lwp->suspended = 1;
568 }
569
de0d863e
DB
570 parent_proc = get_thread_process (event_thr);
571 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
572
573 if (event_lwp->bp_reinsert != 0
574 && can_software_single_step ()
575 && event == PTRACE_EVENT_VFORK)
576 {
3b9a79ef
YQ
577 /* If we leave single-step breakpoints there, child will
578 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
579 (and child). Once vfork child is done, reinsert
580 them back to parent. */
3b9a79ef 581 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
582 }
583
63c40ec7 584 clone_all_breakpoints (child_thr, event_thr);
de0d863e 585
cc397f3a 586 tdesc = allocate_target_description ();
de0d863e
DB
587 copy_target_description (tdesc, parent_proc->tdesc);
588 child_proc->tdesc = tdesc;
de0d863e 589
3a8a0396
DB
590 /* Clone arch-specific process data. */
591 if (the_low_target.new_fork != NULL)
592 the_low_target.new_fork (parent_proc, child_proc);
593
de0d863e 594 /* Save fork info in the parent thread. */
c269dbdb
DB
595 if (event == PTRACE_EVENT_FORK)
596 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
597 else if (event == PTRACE_EVENT_VFORK)
598 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
599
de0d863e 600 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 601
de0d863e
DB
602 /* The status_pending field contains bits denoting the
603 extended event, so when the pending event is handled,
604 the handler will look at lwp->waitstatus. */
605 event_lwp->status_pending_p = 1;
606 event_lwp->status_pending = wstat;
607
5a04c4cf
PA
608 /* Link the threads until the parent event is passed on to
609 higher layers. */
610 event_lwp->fork_relative = child_lwp;
611 child_lwp->fork_relative = event_lwp;
612
3b9a79ef
YQ
613 /* If the parent thread is doing step-over with single-step
614 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
615 from the parent's. Remove them from the child process.
616 In case of vfork, we'll reinsert them back once vforked
617 child is done. */
8a81c5d7 618 if (event_lwp->bp_reinsert != 0
2e7b624b 619 && can_software_single_step ())
8a81c5d7 620 {
8a81c5d7
YQ
621 /* The child process is forked and stopped, so it is safe
622 to access its memory without stopping all other threads
623 from other processes. */
3b9a79ef 624 delete_single_step_breakpoints (child_thr);
8a81c5d7 625
3b9a79ef
YQ
626 gdb_assert (has_single_step_breakpoints (event_thr));
627 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
628 }
629
de0d863e
DB
630 /* Report the event. */
631 return 0;
632 }
633
fa96cb38
PA
634 if (debug_threads)
635 debug_printf ("HEW: Got clone event "
636 "from LWP %ld, new child is LWP %ld\n",
637 lwpid_of (event_thr), new_pid);
638
fd79271b 639 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 640 new_lwp = add_lwp (ptid);
24a09b5f 641
e27d73f6
DE
642 /* Either we're going to immediately resume the new thread
643 or leave it stopped. linux_resume_one_lwp is a nop if it
644 thinks the thread is currently running, so set this first
645 before calling linux_resume_one_lwp. */
646 new_lwp->stopped = 1;
647
0f8288ae
YQ
648 /* If we're suspending all threads, leave this one suspended
649 too. If the fork/clone parent is stepping over a breakpoint,
650 all other threads have been suspended already. Leave the
651 child suspended too. */
652 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
653 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
654 new_lwp->suspended = 1;
655
da5898ce
DJ
656 /* Normally we will get the pending SIGSTOP. But in some cases
657 we might get another signal delivered to the group first.
f21cc1a2 658 If we do get another signal, be sure not to lose it. */
20ba1ce6 659 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 660 {
54a0b537 661 new_lwp->stop_expected = 1;
20ba1ce6
PA
662 new_lwp->status_pending_p = 1;
663 new_lwp->status_pending = status;
da5898ce 664 }
c12a5089 665 else if (cs.report_thread_events)
65706a29
PA
666 {
667 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
668 new_lwp->status_pending_p = 1;
669 new_lwp->status_pending = status;
670 }
de0d863e 671
a0aad537 672#ifdef USE_THREAD_DB
94c207e0 673 thread_db_notice_clone (event_thr, ptid);
a0aad537 674#endif
86299109 675
de0d863e
DB
676 /* Don't report the event. */
677 return 1;
24a09b5f 678 }
c269dbdb
DB
679 else if (event == PTRACE_EVENT_VFORK_DONE)
680 {
681 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
682
2e7b624b
YQ
683 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
684 {
3b9a79ef 685 reinsert_single_step_breakpoints (event_thr);
2e7b624b 686
3b9a79ef 687 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
688 }
689
c269dbdb
DB
690 /* Report the event. */
691 return 0;
692 }
c12a5089 693 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
694 {
695 struct process_info *proc;
f27866ba 696 std::vector<int> syscalls_to_catch;
94585166
DB
697 ptid_t event_ptid;
698 pid_t event_pid;
699
700 if (debug_threads)
701 {
702 debug_printf ("HEW: Got exec event from LWP %ld\n",
703 lwpid_of (event_thr));
704 }
705
706 /* Get the event ptid. */
707 event_ptid = ptid_of (event_thr);
e99b03dc 708 event_pid = event_ptid.pid ();
94585166 709
82075af2 710 /* Save the syscall list from the execing process. */
94585166 711 proc = get_thread_process (event_thr);
f27866ba 712 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
713
714 /* Delete the execing process and all its threads. */
8adb37b9 715 the_target->pt->mourn (proc);
94585166
DB
716 current_thread = NULL;
717
718 /* Create a new process/lwp/thread. */
719 proc = linux_add_process (event_pid, 0);
720 event_lwp = add_lwp (event_ptid);
721 event_thr = get_lwp_thread (event_lwp);
722 gdb_assert (current_thread == event_thr);
723 linux_arch_setup_thread (event_thr);
724
725 /* Set the event status. */
726 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
727 event_lwp->waitstatus.value.execd_pathname
728 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
729
730 /* Mark the exec status as pending. */
731 event_lwp->stopped = 1;
732 event_lwp->status_pending_p = 1;
733 event_lwp->status_pending = wstat;
734 event_thr->last_resume_kind = resume_continue;
735 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
736
82075af2
JS
737 /* Update syscall state in the new lwp, effectively mid-syscall too. */
738 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
739
740 /* Restore the list to catch. Don't rely on the client, which is free
741 to avoid sending a new list when the architecture doesn't change.
742 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 743 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 744
94585166
DB
745 /* Report the event. */
746 *orig_event_lwp = event_lwp;
747 return 0;
748 }
de0d863e
DB
749
750 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
751}
752
d50171e4
PA
753/* Return the PC as read from the regcache of LWP, without any
754 adjustment. */
755
756static CORE_ADDR
757get_pc (struct lwp_info *lwp)
758{
0bfdf32f 759 struct thread_info *saved_thread;
d50171e4
PA
760 struct regcache *regcache;
761 CORE_ADDR pc;
762
763 if (the_low_target.get_pc == NULL)
764 return 0;
765
0bfdf32f
GB
766 saved_thread = current_thread;
767 current_thread = get_lwp_thread (lwp);
d50171e4 768
0bfdf32f 769 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
770 pc = (*the_low_target.get_pc) (regcache);
771
772 if (debug_threads)
87ce2a04 773 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 774
0bfdf32f 775 current_thread = saved_thread;
d50171e4
PA
776 return pc;
777}
778
82075af2 779/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 780 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
781
782static void
4cc32bec 783get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
784{
785 struct thread_info *saved_thread;
786 struct regcache *regcache;
787
788 if (the_low_target.get_syscall_trapinfo == NULL)
789 {
790 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 791 system call number. */
82075af2 792 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
793 return;
794 }
795
796 saved_thread = current_thread;
797 current_thread = get_lwp_thread (lwp);
798
799 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 800 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
801
802 if (debug_threads)
4cc32bec 803 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
804
805 current_thread = saved_thread;
806}
807
e7ad2f14 808static int check_stopped_by_watchpoint (struct lwp_info *child);
0d62e5e8 809
e7ad2f14
PA
810/* Called when the LWP stopped for a signal/trap. If it stopped for a
811 trap check what caused it (breakpoint, watchpoint, trace, etc.),
812 and save the result in the LWP's stop_reason field. If it stopped
813 for a breakpoint, decrement the PC if necessary on the lwp's
814 architecture. Returns true if we now have the LWP's stop PC. */
0d62e5e8 815
582511be 816static int
e7ad2f14 817save_stop_reason (struct lwp_info *lwp)
0d62e5e8 818{
582511be
PA
819 CORE_ADDR pc;
820 CORE_ADDR sw_breakpoint_pc;
821 struct thread_info *saved_thread;
3e572f71
PA
822#if USE_SIGTRAP_SIGINFO
823 siginfo_t siginfo;
824#endif
d50171e4
PA
825
826 if (the_low_target.get_pc == NULL)
827 return 0;
0d62e5e8 828
582511be
PA
829 pc = get_pc (lwp);
830 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 831
582511be
PA
832 /* breakpoint_at reads from the current thread. */
833 saved_thread = current_thread;
834 current_thread = get_lwp_thread (lwp);
47c0c975 835
3e572f71
PA
836#if USE_SIGTRAP_SIGINFO
837 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
838 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
839 {
840 if (siginfo.si_signo == SIGTRAP)
841 {
e7ad2f14
PA
842 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
843 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 844 {
e7ad2f14
PA
845 /* The si_code is ambiguous on this arch -- check debug
846 registers. */
847 if (!check_stopped_by_watchpoint (lwp))
848 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
849 }
850 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
851 {
852 /* If we determine the LWP stopped for a SW breakpoint,
853 trust it. Particularly don't check watchpoint
854 registers, because at least on s390, we'd find
855 stopped-by-watchpoint as long as there's a watchpoint
856 set. */
3e572f71 857 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 858 }
e7ad2f14 859 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 860 {
e7ad2f14
PA
861 /* This can indicate either a hardware breakpoint or
862 hardware watchpoint. Check debug registers. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 865 }
2bf6fb9d
PA
866 else if (siginfo.si_code == TRAP_TRACE)
867 {
e7ad2f14
PA
868 /* We may have single stepped an instruction that
869 triggered a watchpoint. In that case, on some
870 architectures (such as x86), instead of TRAP_HWBKPT,
871 si_code indicates TRAP_TRACE, and we need to check
872 the debug registers separately. */
873 if (!check_stopped_by_watchpoint (lwp))
874 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 875 }
3e572f71
PA
876 }
877 }
878#else
582511be
PA
879 /* We may have just stepped a breakpoint instruction. E.g., in
880 non-stop mode, GDB first tells the thread A to step a range, and
881 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
882 case we need to report the breakpoint PC. */
883 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be 884 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
e7ad2f14
PA
885 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
886
887 if (hardware_breakpoint_inserted_here (pc))
888 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
889
890 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
891 check_stopped_by_watchpoint (lwp);
892#endif
893
894 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
895 {
896 if (debug_threads)
897 {
898 struct thread_info *thr = get_lwp_thread (lwp);
899
900 debug_printf ("CSBB: %s stopped by software breakpoint\n",
901 target_pid_to_str (ptid_of (thr)));
902 }
903
904 /* Back up the PC if necessary. */
905 if (pc != sw_breakpoint_pc)
e7ad2f14 906 {
582511be
PA
907 struct regcache *regcache
908 = get_thread_regcache (current_thread, 1);
909 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
910 }
911
e7ad2f14
PA
912 /* Update this so we record the correct stop PC below. */
913 pc = sw_breakpoint_pc;
582511be 914 }
e7ad2f14 915 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
e7ad2f14
PA
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 930
e7ad2f14
PA
931 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
582511be 934 }
e7ad2f14
PA
935 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
936 {
937 if (debug_threads)
938 {
939 struct thread_info *thr = get_lwp_thread (lwp);
582511be 940
e7ad2f14
PA
941 debug_printf ("CSBB: %s stopped by trace\n",
942 target_pid_to_str (ptid_of (thr)));
943 }
944 }
945
946 lwp->stop_pc = pc;
582511be 947 current_thread = saved_thread;
e7ad2f14 948 return 1;
0d62e5e8 949}
ce3a066d 950
b3312d80 951static struct lwp_info *
95954743 952add_lwp (ptid_t ptid)
611cb4a5 953{
54a0b537 954 struct lwp_info *lwp;
0d62e5e8 955
8d749320 956 lwp = XCNEW (struct lwp_info);
00db26fa
PA
957
958 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 959
754e3168
AH
960 lwp->thread = add_thread (ptid, lwp);
961
aa5ca48f 962 if (the_low_target.new_thread != NULL)
34c703da 963 the_low_target.new_thread (lwp);
aa5ca48f 964
54a0b537 965 return lwp;
0d62e5e8 966}
611cb4a5 967
2090129c
SDJ
968/* Callback to be used when calling fork_inferior, responsible for
969 actually initiating the tracing of the inferior. */
970
971static void
972linux_ptrace_fun ()
973{
974 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
975 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 976 trace_start_error_with_name ("ptrace");
2090129c
SDJ
977
978 if (setpgid (0, 0) < 0)
979 trace_start_error_with_name ("setpgid");
980
981 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
982 stdout to stderr so that inferior i/o doesn't corrupt the connection.
983 Also, redirect stdin to /dev/null. */
984 if (remote_connection_is_stdio ())
985 {
986 if (close (0) < 0)
987 trace_start_error_with_name ("close");
988 if (open ("/dev/null", O_RDONLY) < 0)
989 trace_start_error_with_name ("open");
990 if (dup2 (2, 1) < 0)
991 trace_start_error_with_name ("dup2");
992 if (write (2, "stdin/stdout redirected\n",
993 sizeof ("stdin/stdout redirected\n") - 1) < 0)
994 {
995 /* Errors ignored. */;
996 }
997 }
998}
999
da6d8c04 1000/* Start an inferior process and returns its pid.
2090129c
SDJ
1001 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
1002 are its arguments. */
da6d8c04 1003
15295543
TBA
1004int
1005linux_process_target::create_inferior (const char *program,
1006 const std::vector<char *> &program_args)
da6d8c04 1007{
c12a5089 1008 client_state &cs = get_client_state ();
a6dbe5df 1009 struct lwp_info *new_lwp;
da6d8c04 1010 int pid;
95954743 1011 ptid_t ptid;
03583c20 1012
41272101
TT
1013 {
1014 maybe_disable_address_space_randomization restore_personality
c12a5089 1015 (cs.disable_randomization);
41272101
TT
1016 std::string str_program_args = stringify_argv (program_args);
1017
1018 pid = fork_inferior (program,
1019 str_program_args.c_str (),
1020 get_environ ()->envp (), linux_ptrace_fun,
1021 NULL, NULL, NULL, NULL);
1022 }
03583c20 1023
55d7b841 1024 linux_add_process (pid, 0);
95954743 1025
fd79271b 1026 ptid = ptid_t (pid, pid, 0);
95954743 1027 new_lwp = add_lwp (ptid);
a6dbe5df 1028 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1029
2090129c
SDJ
1030 post_fork_inferior (pid, program);
1031
a9fa9f7d 1032 return pid;
da6d8c04
DJ
1033}
1034
ece66d65
JS
1035/* Implement the post_create_inferior target_ops method. */
1036
6dee9afb
TBA
1037void
1038linux_process_target::post_create_inferior ()
ece66d65
JS
1039{
1040 struct lwp_info *lwp = get_thread_lwp (current_thread);
1041
1042 linux_arch_setup ();
1043
1044 if (lwp->must_set_ptrace_flags)
1045 {
1046 struct process_info *proc = current_process ();
1047 int options = linux_low_ptrace_options (proc->attached);
1048
1049 linux_enable_event_reporting (lwpid_of (current_thread), options);
1050 lwp->must_set_ptrace_flags = 0;
1051 }
1052}
1053
8784d563
PA
1054/* Attach to an inferior process. Returns 0 on success, ERRNO on
1055 error. */
da6d8c04 1056
7ae1a6a6
PA
1057int
1058linux_attach_lwp (ptid_t ptid)
da6d8c04 1059{
54a0b537 1060 struct lwp_info *new_lwp;
e38504b3 1061 int lwpid = ptid.lwp ();
611cb4a5 1062
b8e1b30e 1063 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1064 != 0)
7ae1a6a6 1065 return errno;
24a09b5f 1066
b3312d80 1067 new_lwp = add_lwp (ptid);
0d62e5e8 1068
a6dbe5df
PA
1069 /* We need to wait for SIGSTOP before being able to make the next
1070 ptrace call on this LWP. */
1071 new_lwp->must_set_ptrace_flags = 1;
1072
644cebc9 1073 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1074 {
1075 if (debug_threads)
87ce2a04 1076 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1077
1078 /* The process is definitely stopped. It is in a job control
1079 stop, unless the kernel predates the TASK_STOPPED /
1080 TASK_TRACED distinction, in which case it might be in a
1081 ptrace stop. Make sure it is in a ptrace stop; from there we
1082 can kill it, signal it, et cetera.
1083
1084 First make sure there is a pending SIGSTOP. Since we are
1085 already attached, the process can not transition from stopped
1086 to running without a PTRACE_CONT; so we know this signal will
1087 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1088 probably already in the queue (unless this kernel is old
1089 enough to use TASK_STOPPED for ptrace stops); but since
1090 SIGSTOP is not an RT signal, it can only be queued once. */
1091 kill_lwp (lwpid, SIGSTOP);
1092
1093 /* Finally, resume the stopped process. This will deliver the
1094 SIGSTOP (or a higher priority signal, just like normal
1095 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1096 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1097 }
1098
0d62e5e8 1099 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1100 brings it to a halt.
1101
1102 There are several cases to consider here:
1103
1104 1) gdbserver has already attached to the process and is being notified
1b3f6016 1105 of a new thread that is being created.
d50171e4
PA
1106 In this case we should ignore that SIGSTOP and resume the
1107 process. This is handled below by setting stop_expected = 1,
8336d594 1108 and the fact that add_thread sets last_resume_kind ==
d50171e4 1109 resume_continue.
0e21c1ec
DE
1110
1111 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1112 to it via attach_inferior.
1113 In this case we want the process thread to stop.
d50171e4
PA
1114 This is handled by having linux_attach set last_resume_kind ==
1115 resume_stop after we return.
e3deef73
LM
1116
1117 If the pid we are attaching to is also the tgid, we attach to and
1118 stop all the existing threads. Otherwise, we attach to pid and
1119 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1120
1121 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1122 existing threads.
1123 In this case we want the thread to stop.
1124 FIXME: This case is currently not properly handled.
1125 We should wait for the SIGSTOP but don't. Things work apparently
1126 because enough time passes between when we ptrace (ATTACH) and when
1127 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1128
1129 On the other hand, if we are currently trying to stop all threads, we
1130 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1131 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1132 end of the list, and so the new thread has not yet reached
1133 wait_for_sigstop (but will). */
d50171e4 1134 new_lwp->stop_expected = 1;
0d62e5e8 1135
7ae1a6a6 1136 return 0;
95954743
PA
1137}
1138
8784d563
PA
1139/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1140 already attached. Returns true if a new LWP is found, false
1141 otherwise. */
1142
1143static int
1144attach_proc_task_lwp_callback (ptid_t ptid)
1145{
1146 /* Is this a new thread? */
1147 if (find_thread_ptid (ptid) == NULL)
1148 {
e38504b3 1149 int lwpid = ptid.lwp ();
8784d563
PA
1150 int err;
1151
1152 if (debug_threads)
1153 debug_printf ("Found new lwp %d\n", lwpid);
1154
1155 err = linux_attach_lwp (ptid);
1156
1157 /* Be quiet if we simply raced with the thread exiting. EPERM
1158 is returned if the thread's task still exists, and is marked
1159 as exited or zombie, as well as other conditions, so in that
1160 case, confirm the status in /proc/PID/status. */
1161 if (err == ESRCH
1162 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1163 {
1164 if (debug_threads)
1165 {
1166 debug_printf ("Cannot attach to lwp %d: "
1167 "thread is gone (%d: %s)\n",
6d91ce9a 1168 lwpid, err, safe_strerror (err));
8784d563
PA
1169 }
1170 }
1171 else if (err != 0)
1172 {
4d9b86e1 1173 std::string reason
50fa3001 1174 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1175
1176 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1177 }
1178
1179 return 1;
1180 }
1181 return 0;
1182}
1183
500c1d85
PA
1184static void async_file_mark (void);
1185
e3deef73
LM
1186/* Attach to PID. If PID is the tgid, attach to it and all
1187 of its threads. */
1188
ef03dad8
TBA
1189int
1190linux_process_target::attach (unsigned long pid)
0d62e5e8 1191{
500c1d85
PA
1192 struct process_info *proc;
1193 struct thread_info *initial_thread;
fd79271b 1194 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1195 int err;
1196
df0da8a2
AH
1197 proc = linux_add_process (pid, 1);
1198
e3deef73
LM
1199 /* Attach to PID. We will check for other threads
1200 soon. */
7ae1a6a6
PA
1201 err = linux_attach_lwp (ptid);
1202 if (err != 0)
4d9b86e1 1203 {
df0da8a2 1204 remove_process (proc);
4d9b86e1 1205
50fa3001
SDJ
1206 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1207 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1208 }
7ae1a6a6 1209
500c1d85
PA
1210 /* Don't ignore the initial SIGSTOP if we just attached to this
1211 process. It will be collected by wait shortly. */
fd79271b 1212 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1213 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1214
8784d563
PA
1215 /* We must attach to every LWP. If /proc is mounted, use that to
1216 find them now. On the one hand, the inferior may be using raw
1217 clone instead of using pthreads. On the other hand, even if it
1218 is using pthreads, GDB may not be connected yet (thread_db needs
1219 to do symbol lookups, through qSymbol). Also, thread_db walks
1220 structures in the inferior's address space to find the list of
1221 threads/LWPs, and those structures may well be corrupted. Note
1222 that once thread_db is loaded, we'll still use it to list threads
1223 and associate pthread info with each LWP. */
1224 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1225
1226 /* GDB will shortly read the xml target description for this
1227 process, to figure out the process' architecture. But the target
1228 description is only filled in when the first process/thread in
1229 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1230 that now, otherwise, if GDB is fast enough, it could read the
1231 target description _before_ that initial stop. */
1232 if (non_stop)
1233 {
1234 struct lwp_info *lwp;
1235 int wstat, lwpid;
f2907e49 1236 ptid_t pid_ptid = ptid_t (pid);
500c1d85
PA
1237
1238 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1239 &wstat, __WALL);
1240 gdb_assert (lwpid > 0);
1241
f2907e49 1242 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1243
1244 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1245 {
1246 lwp->status_pending_p = 1;
1247 lwp->status_pending = wstat;
1248 }
1249
1250 initial_thread->last_resume_kind = resume_continue;
1251
1252 async_file_mark ();
1253
1254 gdb_assert (proc->tdesc != NULL);
1255 }
1256
95954743
PA
1257 return 0;
1258}
1259
95954743 1260static int
e4eb0dec 1261last_thread_of_process_p (int pid)
95954743 1262{
e4eb0dec 1263 bool seen_one = false;
95954743 1264
da4ae14a 1265 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1266 {
e4eb0dec
SM
1267 if (!seen_one)
1268 {
1269 /* This is the first thread of this process we see. */
1270 seen_one = true;
1271 return false;
1272 }
1273 else
1274 {
1275 /* This is the second thread of this process we see. */
1276 return true;
1277 }
1278 });
da6d8c04 1279
e4eb0dec 1280 return thread == NULL;
95954743
PA
1281}
1282
da84f473
PA
1283/* Kill LWP. */
1284
1285static void
1286linux_kill_one_lwp (struct lwp_info *lwp)
1287{
d86d4aaf
DE
1288 struct thread_info *thr = get_lwp_thread (lwp);
1289 int pid = lwpid_of (thr);
da84f473
PA
1290
1291 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1292 there is no signal context, and ptrace(PTRACE_KILL) (or
1293 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1294 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1295 alternative is to kill with SIGKILL. We only need one SIGKILL
1296 per process, not one for each thread. But since we still support
4a6ed09b
PA
1297 support debugging programs using raw clone without CLONE_THREAD,
1298 we send one for each thread. For years, we used PTRACE_KILL
1299 only, so we're being a bit paranoid about some old kernels where
1300 PTRACE_KILL might work better (dubious if there are any such, but
1301 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1302 second, and so we're fine everywhere. */
da84f473
PA
1303
1304 errno = 0;
69ff6be5 1305 kill_lwp (pid, SIGKILL);
da84f473 1306 if (debug_threads)
ce9e3fe7
PA
1307 {
1308 int save_errno = errno;
1309
1310 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1311 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1312 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1313 }
da84f473
PA
1314
1315 errno = 0;
b8e1b30e 1316 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1317 if (debug_threads)
ce9e3fe7
PA
1318 {
1319 int save_errno = errno;
1320
1321 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1322 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1323 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1324 }
da84f473
PA
1325}
1326
e76126e8
PA
1327/* Kill LWP and wait for it to die. */
1328
1329static void
1330kill_wait_lwp (struct lwp_info *lwp)
1331{
1332 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1333 int pid = ptid_of (thr).pid ();
e38504b3 1334 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1335 int wstat;
1336 int res;
1337
1338 if (debug_threads)
1339 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1340
1341 do
1342 {
1343 linux_kill_one_lwp (lwp);
1344
1345 /* Make sure it died. Notes:
1346
1347 - The loop is most likely unnecessary.
1348
1349 - We don't use linux_wait_for_event as that could delete lwps
1350 while we're iterating over them. We're not interested in
1351 any pending status at this point, only in making sure all
1352 wait status on the kernel side are collected until the
1353 process is reaped.
1354
1355 - We don't use __WALL here as the __WALL emulation relies on
1356 SIGCHLD, and killing a stopped process doesn't generate
1357 one, nor an exit status.
1358 */
1359 res = my_waitpid (lwpid, &wstat, 0);
1360 if (res == -1 && errno == ECHILD)
1361 res = my_waitpid (lwpid, &wstat, __WCLONE);
1362 } while (res > 0 && WIFSTOPPED (wstat));
1363
586b02a9
PA
1364 /* Even if it was stopped, the child may have already disappeared.
1365 E.g., if it was killed by SIGKILL. */
1366 if (res < 0 && errno != ECHILD)
1367 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1368}
1369
578290ec 1370/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1371 except the leader. */
95954743 1372
578290ec
SM
1373static void
1374kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1375{
54a0b537 1376 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1377
fd500816
DJ
1378 /* We avoid killing the first thread here, because of a Linux kernel (at
1379 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1380 the children get a chance to be reaped, it will remain a zombie
1381 forever. */
95954743 1382
d86d4aaf 1383 if (lwpid_of (thread) == pid)
95954743
PA
1384 {
1385 if (debug_threads)
87ce2a04 1386 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1387 target_pid_to_str (thread->id));
578290ec 1388 return;
95954743 1389 }
fd500816 1390
e76126e8 1391 kill_wait_lwp (lwp);
da6d8c04
DJ
1392}
1393
c6885a57
TBA
1394int
1395linux_process_target::kill (process_info *process)
0d62e5e8 1396{
a780ef4f 1397 int pid = process->pid;
9d606399 1398
f9e39928
PA
1399 /* If we're killing a running inferior, make sure it is stopped
1400 first, as PTRACE_KILL will not work otherwise. */
7984d532 1401 stop_all_lwps (0, NULL);
f9e39928 1402
578290ec
SM
1403 for_each_thread (pid, [&] (thread_info *thread)
1404 {
1405 kill_one_lwp_callback (thread, pid);
1406 });
fd500816 1407
54a0b537 1408 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1409 thread in the list, so do so now. */
a780ef4f 1410 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1411
784867a5 1412 if (lwp == NULL)
fd500816 1413 {
784867a5 1414 if (debug_threads)
d86d4aaf
DE
1415 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1416 pid);
784867a5
JK
1417 }
1418 else
e76126e8 1419 kill_wait_lwp (lwp);
2d717e4f 1420
8adb37b9 1421 mourn (process);
f9e39928
PA
1422
1423 /* Since we presently can only stop all lwps of all processes, we
1424 need to unstop lwps of other processes. */
7984d532 1425 unstop_all_lwps (0, NULL);
95954743 1426 return 0;
0d62e5e8
DJ
1427}
1428
9b224c5e
PA
1429/* Get pending signal of THREAD, for detaching purposes. This is the
1430 signal the thread last stopped for, which we need to deliver to the
1431 thread when detaching, otherwise, it'd be suppressed/lost. */
1432
1433static int
1434get_detach_signal (struct thread_info *thread)
1435{
c12a5089 1436 client_state &cs = get_client_state ();
a493e3e2 1437 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1438 int status;
1439 struct lwp_info *lp = get_thread_lwp (thread);
1440
1441 if (lp->status_pending_p)
1442 status = lp->status_pending;
1443 else
1444 {
1445 /* If the thread had been suspended by gdbserver, and it stopped
1446 cleanly, then it'll have stopped with SIGSTOP. But we don't
1447 want to deliver that SIGSTOP. */
1448 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1449 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1450 return 0;
1451
1452 /* Otherwise, we may need to deliver the signal we
1453 intercepted. */
1454 status = lp->last_status;
1455 }
1456
1457 if (!WIFSTOPPED (status))
1458 {
1459 if (debug_threads)
87ce2a04 1460 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1461 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1462 return 0;
1463 }
1464
1465 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1466 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1467 {
1468 if (debug_threads)
87ce2a04
DE
1469 debug_printf ("GPS: lwp %s had stopped with extended "
1470 "status: no pending signal\n",
d86d4aaf 1471 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1472 return 0;
1473 }
1474
2ea28649 1475 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1476
c12a5089 1477 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1478 {
1479 if (debug_threads)
87ce2a04 1480 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1481 target_pid_to_str (ptid_of (thread)),
87ce2a04 1482 gdb_signal_to_string (signo));
9b224c5e
PA
1483 return 0;
1484 }
c12a5089 1485 else if (!cs.program_signals_p
9b224c5e
PA
1486 /* If we have no way to know which signals GDB does not
1487 want to have passed to the program, assume
1488 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1489 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1490 {
1491 if (debug_threads)
87ce2a04
DE
1492 debug_printf ("GPS: lwp %s had signal %s, "
1493 "but we don't know if we should pass it. "
1494 "Default to not.\n",
d86d4aaf 1495 target_pid_to_str (ptid_of (thread)),
87ce2a04 1496 gdb_signal_to_string (signo));
9b224c5e
PA
1497 return 0;
1498 }
1499 else
1500 {
1501 if (debug_threads)
87ce2a04 1502 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1503 target_pid_to_str (ptid_of (thread)),
87ce2a04 1504 gdb_signal_to_string (signo));
9b224c5e
PA
1505
1506 return WSTOPSIG (status);
1507 }
1508}
1509
ced2dffb
PA
1510/* Detach from LWP. */
1511
1512static void
1513linux_detach_one_lwp (struct lwp_info *lwp)
6ad8ae5c 1514{
ced2dffb 1515 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1516 int sig;
ced2dffb 1517 int lwpid;
6ad8ae5c 1518
9b224c5e 1519 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1520 if (lwp->stop_expected)
ae13219e 1521 {
9b224c5e 1522 if (debug_threads)
87ce2a04 1523 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1524 target_pid_to_str (ptid_of (thread)));
9b224c5e 1525
d86d4aaf 1526 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1527 lwp->stop_expected = 0;
ae13219e
DJ
1528 }
1529
9b224c5e
PA
1530 /* Pass on any pending signal for this thread. */
1531 sig = get_detach_signal (thread);
1532
ced2dffb
PA
1533 /* Preparing to resume may try to write registers, and fail if the
1534 lwp is zombie. If that happens, ignore the error. We'll handle
1535 it below, when detach fails with ESRCH. */
a70b8144 1536 try
ced2dffb
PA
1537 {
1538 /* Flush any pending changes to the process's registers. */
1539 regcache_invalidate_thread (thread);
1540
1541 /* Finally, let it resume. */
1542 if (the_low_target.prepare_to_resume != NULL)
1543 the_low_target.prepare_to_resume (lwp);
1544 }
230d2906 1545 catch (const gdb_exception_error &ex)
ced2dffb
PA
1546 {
1547 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1548 throw;
ced2dffb 1549 }
ced2dffb
PA
1550
1551 lwpid = lwpid_of (thread);
1552 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1553 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1554 {
1555 int save_errno = errno;
1556
1557 /* We know the thread exists, so ESRCH must mean the lwp is
1558 zombie. This can happen if one of the already-detached
1559 threads exits the whole thread group. In that case we're
1560 still attached, and must reap the lwp. */
1561 if (save_errno == ESRCH)
1562 {
1563 int ret, status;
1564
1565 ret = my_waitpid (lwpid, &status, __WALL);
1566 if (ret == -1)
1567 {
1568 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1569 lwpid, safe_strerror (errno));
ced2dffb
PA
1570 }
1571 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1572 {
1573 warning (_("Reaping LWP %d while detaching "
1574 "returned unexpected status 0x%x"),
1575 lwpid, status);
1576 }
1577 }
1578 else
1579 {
1580 error (_("Can't detach %s: %s"),
1581 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1582 safe_strerror (save_errno));
ced2dffb
PA
1583 }
1584 }
1585 else if (debug_threads)
1586 {
1587 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1588 target_pid_to_str (ptid_of (thread)),
1589 strsignal (sig));
1590 }
bd99dc85
PA
1591
1592 delete_lwp (lwp);
ced2dffb
PA
1593}
1594
798a38e8 1595/* Callback for for_each_thread. Detaches from non-leader threads of a
ced2dffb
PA
1596 given process. */
1597
798a38e8
SM
1598static void
1599linux_detach_lwp_callback (thread_info *thread)
ced2dffb 1600{
ced2dffb
PA
1601 /* We don't actually detach from the thread group leader just yet.
1602 If the thread group exits, we must reap the zombie clone lwps
1603 before we're able to reap the leader. */
798a38e8
SM
1604 if (thread->id.pid () == thread->id.lwp ())
1605 return;
ced2dffb 1606
798a38e8 1607 lwp_info *lwp = get_thread_lwp (thread);
ced2dffb 1608 linux_detach_one_lwp (lwp);
6ad8ae5c
DJ
1609}
1610
9061c9cf
TBA
1611int
1612linux_process_target::detach (process_info *process)
95954743 1613{
ced2dffb 1614 struct lwp_info *main_lwp;
95954743 1615
863d01bd
PA
1616 /* As there's a step over already in progress, let it finish first,
1617 otherwise nesting a stabilize_threads operation on top gets real
1618 messy. */
1619 complete_ongoing_step_over ();
1620
f9e39928 1621 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1622 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1623 may need to uninstall thread event breakpoints from memory, which
1624 only works with a stopped process anyway. */
7984d532 1625 stop_all_lwps (0, NULL);
f9e39928 1626
ca5c370d 1627#ifdef USE_THREAD_DB
8336d594 1628 thread_db_detach (process);
ca5c370d
PA
1629#endif
1630
fa593d66
PA
1631 /* Stabilize threads (move out of jump pads). */
1632 stabilize_threads ();
1633
ced2dffb
PA
1634 /* Detach from the clone lwps first. If the thread group exits just
1635 while we're detaching, we must reap the clone lwps before we're
1636 able to reap the leader. */
ef2ddb33 1637 for_each_thread (process->pid, linux_detach_lwp_callback);
ced2dffb 1638
ef2ddb33 1639 main_lwp = find_lwp_pid (ptid_t (process->pid));
ced2dffb 1640 linux_detach_one_lwp (main_lwp);
8336d594 1641
8adb37b9 1642 mourn (process);
f9e39928
PA
1643
1644 /* Since we presently can only stop all lwps of all processes, we
1645 need to unstop lwps of other processes. */
7984d532 1646 unstop_all_lwps (0, NULL);
f9e39928
PA
1647 return 0;
1648}
1649
1650/* Remove all LWPs that belong to process PROC from the lwp list. */
1651
8adb37b9
TBA
1652void
1653linux_process_target::mourn (process_info *process)
8336d594
PA
1654{
1655 struct process_info_private *priv;
1656
1657#ifdef USE_THREAD_DB
1658 thread_db_mourn (process);
1659#endif
1660
6b2a85da
SM
1661 for_each_thread (process->pid, [] (thread_info *thread)
1662 {
1663 delete_lwp (get_thread_lwp (thread));
1664 });
f9e39928 1665
8336d594 1666 /* Freeing all private data. */
fe978cb0 1667 priv = process->priv;
04ec7890
SM
1668 if (the_low_target.delete_process != NULL)
1669 the_low_target.delete_process (priv->arch_private);
1670 else
1671 gdb_assert (priv->arch_private == NULL);
8336d594 1672 free (priv);
fe978cb0 1673 process->priv = NULL;
505106cd
PA
1674
1675 remove_process (process);
8336d594
PA
1676}
1677
95a49a39
TBA
1678void
1679linux_process_target::join (int pid)
444d6139 1680{
444d6139
PA
1681 int status, ret;
1682
1683 do {
d105de22 1684 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1685 if (WIFEXITED (status) || WIFSIGNALED (status))
1686 break;
1687 } while (ret != -1 || errno != ECHILD);
1688}
1689
13d3d99b
TBA
1690/* Return true if the given thread is still alive. */
1691
1692bool
1693linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1694{
95954743
PA
1695 struct lwp_info *lwp = find_lwp_pid (ptid);
1696
1697 /* We assume we always know if a thread exits. If a whole process
1698 exited but we still haven't been able to report it to GDB, we'll
1699 hold on to the last lwp of the dead process. */
1700 if (lwp != NULL)
00db26fa 1701 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1702 else
1703 return 0;
1704}
1705
582511be
PA
1706/* Return 1 if this lwp still has an interesting status pending. If
1707 not (e.g., it had stopped for a breakpoint that is gone), return
1708 false. */
1709
1710static int
1711thread_still_has_status_pending_p (struct thread_info *thread)
1712{
1713 struct lwp_info *lp = get_thread_lwp (thread);
1714
1715 if (!lp->status_pending_p)
1716 return 0;
1717
582511be 1718 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1719 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1720 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1721 {
1722 struct thread_info *saved_thread;
1723 CORE_ADDR pc;
1724 int discard = 0;
1725
1726 gdb_assert (lp->last_status != 0);
1727
1728 pc = get_pc (lp);
1729
1730 saved_thread = current_thread;
1731 current_thread = thread;
1732
1733 if (pc != lp->stop_pc)
1734 {
1735 if (debug_threads)
1736 debug_printf ("PC of %ld changed\n",
1737 lwpid_of (thread));
1738 discard = 1;
1739 }
3e572f71
PA
1740
1741#if !USE_SIGTRAP_SIGINFO
15c66dd6 1742 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1743 && !(*the_low_target.breakpoint_at) (pc))
1744 {
1745 if (debug_threads)
1746 debug_printf ("previous SW breakpoint of %ld gone\n",
1747 lwpid_of (thread));
1748 discard = 1;
1749 }
15c66dd6 1750 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1751 && !hardware_breakpoint_inserted_here (pc))
1752 {
1753 if (debug_threads)
1754 debug_printf ("previous HW breakpoint of %ld gone\n",
1755 lwpid_of (thread));
1756 discard = 1;
1757 }
3e572f71 1758#endif
582511be
PA
1759
1760 current_thread = saved_thread;
1761
1762 if (discard)
1763 {
1764 if (debug_threads)
1765 debug_printf ("discarding pending breakpoint status\n");
1766 lp->status_pending_p = 0;
1767 return 0;
1768 }
1769 }
1770
1771 return 1;
1772}
1773
a681f9c9
PA
1774/* Returns true if LWP is resumed from the client's perspective. */
1775
1776static int
1777lwp_resumed (struct lwp_info *lwp)
1778{
1779 struct thread_info *thread = get_lwp_thread (lwp);
1780
1781 if (thread->last_resume_kind != resume_stop)
1782 return 1;
1783
1784 /* Did gdb send us a `vCont;t', but we haven't reported the
1785 corresponding stop to gdb yet? If so, the thread is still
1786 resumed/running from gdb's perspective. */
1787 if (thread->last_resume_kind == resume_stop
1788 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1789 return 1;
1790
1791 return 0;
1792}
1793
83e1b6c1
SM
1794/* Return true if this lwp has an interesting status pending. */
1795static bool
1796status_pending_p_callback (thread_info *thread, ptid_t ptid)
0d62e5e8 1797{
582511be 1798 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1799
1800 /* Check if we're only interested in events from a specific process
afa8d396 1801 or a specific LWP. */
83e1b6c1 1802 if (!thread->id.matches (ptid))
95954743 1803 return 0;
0d62e5e8 1804
a681f9c9
PA
1805 if (!lwp_resumed (lp))
1806 return 0;
1807
582511be
PA
1808 if (lp->status_pending_p
1809 && !thread_still_has_status_pending_p (thread))
1810 {
1811 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1812 return 0;
1813 }
0d62e5e8 1814
582511be 1815 return lp->status_pending_p;
0d62e5e8
DJ
1816}
1817
95954743
PA
1818struct lwp_info *
1819find_lwp_pid (ptid_t ptid)
1820{
da4ae14a 1821 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1822 {
1823 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1824 return thr_arg->id.lwp () == lwp;
454296a2 1825 });
d86d4aaf
DE
1826
1827 if (thread == NULL)
1828 return NULL;
1829
9c80ecd6 1830 return get_thread_lwp (thread);
95954743
PA
1831}
1832
fa96cb38 1833/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1834
fa96cb38
PA
1835static int
1836num_lwps (int pid)
1837{
fa96cb38 1838 int count = 0;
0d62e5e8 1839
4d3bb80e
SM
1840 for_each_thread (pid, [&] (thread_info *thread)
1841 {
9c80ecd6 1842 count++;
4d3bb80e 1843 });
3aee8918 1844
fa96cb38
PA
1845 return count;
1846}
d61ddec4 1847
6d4ee8c6
GB
1848/* See nat/linux-nat.h. */
1849
1850struct lwp_info *
1851iterate_over_lwps (ptid_t filter,
d3a70e03 1852 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1853{
da4ae14a 1854 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1855 {
da4ae14a 1856 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1857
d3a70e03 1858 return callback (lwp);
6d1e5673 1859 });
6d4ee8c6 1860
9c80ecd6 1861 if (thread == NULL)
6d4ee8c6
GB
1862 return NULL;
1863
9c80ecd6 1864 return get_thread_lwp (thread);
6d4ee8c6
GB
1865}
1866
fa96cb38
PA
1867/* Detect zombie thread group leaders, and "exit" them. We can't reap
1868 their exits until all other threads in the group have exited. */
c3adc08c 1869
fa96cb38
PA
1870static void
1871check_zombie_leaders (void)
1872{
9179355e
SM
1873 for_each_process ([] (process_info *proc) {
1874 pid_t leader_pid = pid_of (proc);
1875 struct lwp_info *leader_lp;
1876
f2907e49 1877 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1878
1879 if (debug_threads)
1880 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1881 "num_lwps=%d, zombie=%d\n",
1882 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1883 linux_proc_pid_is_zombie (leader_pid));
1884
1885 if (leader_lp != NULL && !leader_lp->stopped
1886 /* Check if there are other threads in the group, as we may
1887 have raced with the inferior simply exiting. */
1888 && !last_thread_of_process_p (leader_pid)
1889 && linux_proc_pid_is_zombie (leader_pid))
1890 {
1891 /* A leader zombie can mean one of two things:
1892
1893 - It exited, and there's an exit status pending
1894 available, or only the leader exited (not the whole
1895 program). In the latter case, we can't waitpid the
1896 leader's exit status until all other threads are gone.
1897
1898 - There are 3 or more threads in the group, and a thread
1899 other than the leader exec'd. On an exec, the Linux
1900 kernel destroys all other threads (except the execing
1901 one) in the thread group, and resets the execing thread's
1902 tid to the tgid. No exit notification is sent for the
1903 execing thread -- from the ptracer's perspective, it
1904 appears as though the execing thread just vanishes.
1905 Until we reap all other threads except the leader and the
1906 execing thread, the leader will be zombie, and the
1907 execing thread will be in `D (disc sleep)'. As soon as
1908 all other threads are reaped, the execing thread changes
1909 it's tid to the tgid, and the previous (zombie) leader
1910 vanishes, giving place to the "new" leader. We could try
1911 distinguishing the exit and exec cases, by waiting once
1912 more, and seeing if something comes out, but it doesn't
1913 sound useful. The previous leader _does_ go away, and
1914 we'll re-add the new one once we see the exec event
1915 (which is just the same as what would happen if the
1916 previous leader did exit voluntarily before some other
1917 thread execs). */
1918
1919 if (debug_threads)
1920 debug_printf ("CZL: Thread group leader %d zombie "
1921 "(it exited, or another thread execd).\n",
1922 leader_pid);
1923
1924 delete_lwp (leader_lp);
1925 }
1926 });
fa96cb38 1927}
c3adc08c 1928
a1385b7b
SM
1929/* Callback for `find_thread'. Returns the first LWP that is not
1930 stopped. */
d50171e4 1931
a1385b7b
SM
1932static bool
1933not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1934{
a1385b7b
SM
1935 if (!thread->id.matches (filter))
1936 return false;
47c0c975 1937
a1385b7b 1938 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1939
a1385b7b 1940 return !lwp->stopped;
0d62e5e8 1941}
611cb4a5 1942
863d01bd
PA
1943/* Increment LWP's suspend count. */
1944
1945static void
1946lwp_suspended_inc (struct lwp_info *lwp)
1947{
1948 lwp->suspended++;
1949
1950 if (debug_threads && lwp->suspended > 4)
1951 {
1952 struct thread_info *thread = get_lwp_thread (lwp);
1953
1954 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1955 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1956 }
1957}
1958
1959/* Decrement LWP's suspend count. */
1960
1961static void
1962lwp_suspended_decr (struct lwp_info *lwp)
1963{
1964 lwp->suspended--;
1965
1966 if (lwp->suspended < 0)
1967 {
1968 struct thread_info *thread = get_lwp_thread (lwp);
1969
1970 internal_error (__FILE__, __LINE__,
1971 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1972 lwp->suspended);
1973 }
1974}
1975
219f2f23
PA
1976/* This function should only be called if the LWP got a SIGTRAP.
1977
1978 Handle any tracepoint steps or hits. Return true if a tracepoint
1979 event was handled, 0 otherwise. */
1980
1981static int
1982handle_tracepoints (struct lwp_info *lwp)
1983{
1984 struct thread_info *tinfo = get_lwp_thread (lwp);
1985 int tpoint_related_event = 0;
1986
582511be
PA
1987 gdb_assert (lwp->suspended == 0);
1988
7984d532
PA
1989 /* If this tracepoint hit causes a tracing stop, we'll immediately
1990 uninsert tracepoints. To do this, we temporarily pause all
1991 threads, unpatch away, and then unpause threads. We need to make
1992 sure the unpausing doesn't resume LWP too. */
863d01bd 1993 lwp_suspended_inc (lwp);
7984d532 1994
219f2f23
PA
1995 /* And we need to be sure that any all-threads-stopping doesn't try
1996 to move threads out of the jump pads, as it could deadlock the
1997 inferior (LWP could be in the jump pad, maybe even holding the
1998 lock.) */
1999
2000 /* Do any necessary step collect actions. */
2001 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2002
fa593d66
PA
2003 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2004
219f2f23
PA
2005 /* See if we just hit a tracepoint and do its main collect
2006 actions. */
2007 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2008
863d01bd 2009 lwp_suspended_decr (lwp);
7984d532
PA
2010
2011 gdb_assert (lwp->suspended == 0);
229d26fc
SM
2012 gdb_assert (!stabilizing_threads
2013 || (lwp->collecting_fast_tracepoint
2014 != fast_tpoint_collect_result::not_collecting));
7984d532 2015
219f2f23
PA
2016 if (tpoint_related_event)
2017 {
2018 if (debug_threads)
87ce2a04 2019 debug_printf ("got a tracepoint event\n");
219f2f23
PA
2020 return 1;
2021 }
2022
2023 return 0;
2024}
2025
229d26fc
SM
2026/* Convenience wrapper. Returns information about LWP's fast tracepoint
2027 collection status. */
fa593d66 2028
229d26fc 2029static fast_tpoint_collect_result
fa593d66
PA
2030linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2031 struct fast_tpoint_collect_status *status)
2032{
2033 CORE_ADDR thread_area;
d86d4aaf 2034 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2035
2036 if (the_low_target.get_thread_area == NULL)
229d26fc 2037 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2038
2039 /* Get the thread area address. This is used to recognize which
2040 thread is which when tracing with the in-process agent library.
2041 We don't read anything from the address, and treat it as opaque;
2042 it's the address itself that we assume is unique per-thread. */
d86d4aaf 2043 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
229d26fc 2044 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2045
2046 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2047}
2048
2049/* The reason we resume in the caller, is because we want to be able
2050 to pass lwp->status_pending as WSTAT, and we need to clear
2051 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2052 refuses to resume. */
2053
2054static int
2055maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2056{
0bfdf32f 2057 struct thread_info *saved_thread;
fa593d66 2058
0bfdf32f
GB
2059 saved_thread = current_thread;
2060 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2061
2062 if ((wstat == NULL
2063 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2064 && supports_fast_tracepoints ()
58b4daa5 2065 && agent_loaded_p ())
fa593d66
PA
2066 {
2067 struct fast_tpoint_collect_status status;
fa593d66
PA
2068
2069 if (debug_threads)
87ce2a04
DE
2070 debug_printf ("Checking whether LWP %ld needs to move out of the "
2071 "jump pad.\n",
0bfdf32f 2072 lwpid_of (current_thread));
fa593d66 2073
229d26fc
SM
2074 fast_tpoint_collect_result r
2075 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2076
2077 if (wstat == NULL
2078 || (WSTOPSIG (*wstat) != SIGILL
2079 && WSTOPSIG (*wstat) != SIGFPE
2080 && WSTOPSIG (*wstat) != SIGSEGV
2081 && WSTOPSIG (*wstat) != SIGBUS))
2082 {
2083 lwp->collecting_fast_tracepoint = r;
2084
229d26fc 2085 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2086 {
229d26fc
SM
2087 if (r == fast_tpoint_collect_result::before_insn
2088 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2089 {
2090 /* Haven't executed the original instruction yet.
2091 Set breakpoint there, and wait till it's hit,
2092 then single-step until exiting the jump pad. */
2093 lwp->exit_jump_pad_bkpt
2094 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2095 }
2096
2097 if (debug_threads)
87ce2a04
DE
2098 debug_printf ("Checking whether LWP %ld needs to move out of "
2099 "the jump pad...it does\n",
0bfdf32f
GB
2100 lwpid_of (current_thread));
2101 current_thread = saved_thread;
fa593d66
PA
2102
2103 return 1;
2104 }
2105 }
2106 else
2107 {
2108 /* If we get a synchronous signal while collecting, *and*
2109 while executing the (relocated) original instruction,
2110 reset the PC to point at the tpoint address, before
2111 reporting to GDB. Otherwise, it's an IPA lib bug: just
2112 report the signal to GDB, and pray for the best. */
2113
229d26fc
SM
2114 lwp->collecting_fast_tracepoint
2115 = fast_tpoint_collect_result::not_collecting;
fa593d66 2116
229d26fc 2117 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2118 && (status.adjusted_insn_addr <= lwp->stop_pc
2119 && lwp->stop_pc < status.adjusted_insn_addr_end))
2120 {
2121 siginfo_t info;
2122 struct regcache *regcache;
2123
2124 /* The si_addr on a few signals references the address
2125 of the faulting instruction. Adjust that as
2126 well. */
2127 if ((WSTOPSIG (*wstat) == SIGILL
2128 || WSTOPSIG (*wstat) == SIGFPE
2129 || WSTOPSIG (*wstat) == SIGBUS
2130 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2131 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2132 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2133 /* Final check just to make sure we don't clobber
2134 the siginfo of non-kernel-sent signals. */
2135 && (uintptr_t) info.si_addr == lwp->stop_pc)
2136 {
2137 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2138 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2139 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2140 }
2141
0bfdf32f 2142 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
2143 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2144 lwp->stop_pc = status.tpoint_addr;
2145
2146 /* Cancel any fast tracepoint lock this thread was
2147 holding. */
2148 force_unlock_trace_buffer ();
2149 }
2150
2151 if (lwp->exit_jump_pad_bkpt != NULL)
2152 {
2153 if (debug_threads)
87ce2a04
DE
2154 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2155 "stopping all threads momentarily.\n");
fa593d66
PA
2156
2157 stop_all_lwps (1, lwp);
fa593d66
PA
2158
2159 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2160 lwp->exit_jump_pad_bkpt = NULL;
2161
2162 unstop_all_lwps (1, lwp);
2163
2164 gdb_assert (lwp->suspended >= 0);
2165 }
2166 }
2167 }
2168
2169 if (debug_threads)
87ce2a04
DE
2170 debug_printf ("Checking whether LWP %ld needs to move out of the "
2171 "jump pad...no\n",
0bfdf32f 2172 lwpid_of (current_thread));
0cccb683 2173
0bfdf32f 2174 current_thread = saved_thread;
fa593d66
PA
2175 return 0;
2176}
2177
2178/* Enqueue one signal in the "signals to report later when out of the
2179 jump pad" list. */
2180
2181static void
2182enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2183{
2184 struct pending_signals *p_sig;
d86d4aaf 2185 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2186
2187 if (debug_threads)
87ce2a04 2188 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2189 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2190
2191 if (debug_threads)
2192 {
2193 struct pending_signals *sig;
2194
2195 for (sig = lwp->pending_signals_to_report;
2196 sig != NULL;
2197 sig = sig->prev)
87ce2a04
DE
2198 debug_printf (" Already queued %d\n",
2199 sig->signal);
fa593d66 2200
87ce2a04 2201 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2202 }
2203
1a981360
PA
2204 /* Don't enqueue non-RT signals if they are already in the deferred
2205 queue. (SIGSTOP being the easiest signal to see ending up here
2206 twice) */
2207 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2208 {
2209 struct pending_signals *sig;
2210
2211 for (sig = lwp->pending_signals_to_report;
2212 sig != NULL;
2213 sig = sig->prev)
2214 {
2215 if (sig->signal == WSTOPSIG (*wstat))
2216 {
2217 if (debug_threads)
87ce2a04
DE
2218 debug_printf ("Not requeuing already queued non-RT signal %d"
2219 " for LWP %ld\n",
2220 sig->signal,
d86d4aaf 2221 lwpid_of (thread));
1a981360
PA
2222 return;
2223 }
2224 }
2225 }
2226
8d749320 2227 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2228 p_sig->prev = lwp->pending_signals_to_report;
2229 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2230
d86d4aaf 2231 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2232 &p_sig->info);
fa593d66
PA
2233
2234 lwp->pending_signals_to_report = p_sig;
2235}
2236
2237/* Dequeue one signal from the "signals to report later when out of
2238 the jump pad" list. */
2239
2240static int
2241dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2242{
d86d4aaf
DE
2243 struct thread_info *thread = get_lwp_thread (lwp);
2244
fa593d66
PA
2245 if (lwp->pending_signals_to_report != NULL)
2246 {
2247 struct pending_signals **p_sig;
2248
2249 p_sig = &lwp->pending_signals_to_report;
2250 while ((*p_sig)->prev != NULL)
2251 p_sig = &(*p_sig)->prev;
2252
2253 *wstat = W_STOPCODE ((*p_sig)->signal);
2254 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2255 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2256 &(*p_sig)->info);
fa593d66
PA
2257 free (*p_sig);
2258 *p_sig = NULL;
2259
2260 if (debug_threads)
87ce2a04 2261 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2262 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2263
2264 if (debug_threads)
2265 {
2266 struct pending_signals *sig;
2267
2268 for (sig = lwp->pending_signals_to_report;
2269 sig != NULL;
2270 sig = sig->prev)
87ce2a04
DE
2271 debug_printf (" Still queued %d\n",
2272 sig->signal);
fa593d66 2273
87ce2a04 2274 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2275 }
2276
2277 return 1;
2278 }
2279
2280 return 0;
2281}
2282
582511be
PA
2283/* Fetch the possibly triggered data watchpoint info and store it in
2284 CHILD.
d50171e4 2285
582511be
PA
2286 On some archs, like x86, that use debug registers to set
2287 watchpoints, it's possible that the way to know which watched
2288 address trapped, is to check the register that is used to select
2289 which address to watch. Problem is, between setting the watchpoint
2290 and reading back which data address trapped, the user may change
2291 the set of watchpoints, and, as a consequence, GDB changes the
2292 debug registers in the inferior. To avoid reading back a stale
2293 stopped-data-address when that happens, we cache in LP the fact
2294 that a watchpoint trapped, and the corresponding data address, as
2295 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2296 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2297
582511be
PA
2298static int
2299check_stopped_by_watchpoint (struct lwp_info *child)
2300{
2301 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2302 {
582511be 2303 struct thread_info *saved_thread;
d50171e4 2304
582511be
PA
2305 saved_thread = current_thread;
2306 current_thread = get_lwp_thread (child);
2307
2308 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2309 {
15c66dd6 2310 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2311
2312 if (the_low_target.stopped_data_address != NULL)
2313 child->stopped_data_address
2314 = the_low_target.stopped_data_address ();
2315 else
2316 child->stopped_data_address = 0;
d50171e4
PA
2317 }
2318
0bfdf32f 2319 current_thread = saved_thread;
d50171e4
PA
2320 }
2321
15c66dd6 2322 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2323}
2324
de0d863e
DB
2325/* Return the ptrace options that we want to try to enable. */
2326
2327static int
2328linux_low_ptrace_options (int attached)
2329{
c12a5089 2330 client_state &cs = get_client_state ();
de0d863e
DB
2331 int options = 0;
2332
2333 if (!attached)
2334 options |= PTRACE_O_EXITKILL;
2335
c12a5089 2336 if (cs.report_fork_events)
de0d863e
DB
2337 options |= PTRACE_O_TRACEFORK;
2338
c12a5089 2339 if (cs.report_vfork_events)
c269dbdb
DB
2340 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2341
c12a5089 2342 if (cs.report_exec_events)
94585166
DB
2343 options |= PTRACE_O_TRACEEXEC;
2344
82075af2
JS
2345 options |= PTRACE_O_TRACESYSGOOD;
2346
de0d863e
DB
2347 return options;
2348}
2349
fa96cb38
PA
2350/* Do low-level handling of the event, and check if we should go on
2351 and pass it to caller code. Return the affected lwp if we are, or
2352 NULL otherwise. */
2353
2354static struct lwp_info *
582511be 2355linux_low_filter_event (int lwpid, int wstat)
fa96cb38 2356{
c12a5089 2357 client_state &cs = get_client_state ();
fa96cb38
PA
2358 struct lwp_info *child;
2359 struct thread_info *thread;
582511be 2360 int have_stop_pc = 0;
fa96cb38 2361
f2907e49 2362 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2363
94585166
DB
2364 /* Check for stop events reported by a process we didn't already
2365 know about - anything not already in our LWP list.
2366
2367 If we're expecting to receive stopped processes after
2368 fork, vfork, and clone events, then we'll just add the
2369 new one to our list and go back to waiting for the event
2370 to be reported - the stopped process might be returned
2371 from waitpid before or after the event is.
2372
2373 But note the case of a non-leader thread exec'ing after the
2374 leader having exited, and gone from our lists (because
2375 check_zombie_leaders deleted it). The non-leader thread
2376 changes its tid to the tgid. */
2377
2378 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2379 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2380 {
2381 ptid_t child_ptid;
2382
2383 /* A multi-thread exec after we had seen the leader exiting. */
2384 if (debug_threads)
2385 {
2386 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2387 "after exec.\n", lwpid);
2388 }
2389
fd79271b 2390 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2391 child = add_lwp (child_ptid);
2392 child->stopped = 1;
2393 current_thread = child->thread;
2394 }
2395
fa96cb38
PA
2396 /* If we didn't find a process, one of two things presumably happened:
2397 - A process we started and then detached from has exited. Ignore it.
2398 - A process we are controlling has forked and the new child's stop
2399 was reported to us by the kernel. Save its PID. */
2400 if (child == NULL && WIFSTOPPED (wstat))
2401 {
2402 add_to_pid_list (&stopped_pids, lwpid, wstat);
2403 return NULL;
2404 }
2405 else if (child == NULL)
2406 return NULL;
2407
2408 thread = get_lwp_thread (child);
2409
2410 child->stopped = 1;
2411
2412 child->last_status = wstat;
2413
582511be
PA
2414 /* Check if the thread has exited. */
2415 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2416 {
2417 if (debug_threads)
2418 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2419
2420 if (finish_step_over (child))
2421 {
2422 /* Unsuspend all other LWPs, and set them back running again. */
2423 unsuspend_all_lwps (child);
2424 }
2425
65706a29
PA
2426 /* If there is at least one more LWP, then the exit signal was
2427 not the end of the debugged application and should be
2428 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2429 if (cs.report_thread_events
65706a29 2430 || last_thread_of_process_p (pid_of (thread)))
582511be 2431 {
65706a29
PA
2432 /* Since events are serialized to GDB core, and we can't
2433 report this one right now. Leave the status pending for
2434 the next time we're able to report it. */
2435 mark_lwp_dead (child, wstat);
2436 return child;
582511be
PA
2437 }
2438 else
2439 {
65706a29
PA
2440 delete_lwp (child);
2441 return NULL;
582511be
PA
2442 }
2443 }
2444
2445 gdb_assert (WIFSTOPPED (wstat));
2446
fa96cb38
PA
2447 if (WIFSTOPPED (wstat))
2448 {
2449 struct process_info *proc;
2450
c06cbd92 2451 /* Architecture-specific setup after inferior is running. */
fa96cb38 2452 proc = find_process_pid (pid_of (thread));
c06cbd92 2453 if (proc->tdesc == NULL)
fa96cb38 2454 {
c06cbd92
YQ
2455 if (proc->attached)
2456 {
c06cbd92
YQ
2457 /* This needs to happen after we have attached to the
2458 inferior and it is stopped for the first time, but
2459 before we access any inferior registers. */
94585166 2460 linux_arch_setup_thread (thread);
c06cbd92
YQ
2461 }
2462 else
2463 {
2464 /* The process is started, but GDBserver will do
2465 architecture-specific setup after the program stops at
2466 the first instruction. */
2467 child->status_pending_p = 1;
2468 child->status_pending = wstat;
2469 return child;
2470 }
fa96cb38
PA
2471 }
2472 }
2473
fa96cb38
PA
2474 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2475 {
beed38b8 2476 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2477 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2478
de0d863e 2479 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2480 child->must_set_ptrace_flags = 0;
2481 }
2482
82075af2
JS
2483 /* Always update syscall_state, even if it will be filtered later. */
2484 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2485 {
2486 child->syscall_state
2487 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2488 ? TARGET_WAITKIND_SYSCALL_RETURN
2489 : TARGET_WAITKIND_SYSCALL_ENTRY);
2490 }
2491 else
2492 {
2493 /* Almost all other ptrace-stops are known to be outside of system
2494 calls, with further exceptions in handle_extended_wait. */
2495 child->syscall_state = TARGET_WAITKIND_IGNORE;
2496 }
2497
e7ad2f14
PA
2498 /* Be careful to not overwrite stop_pc until save_stop_reason is
2499 called. */
fa96cb38 2500 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2501 && linux_is_extended_waitstatus (wstat))
fa96cb38 2502 {
582511be 2503 child->stop_pc = get_pc (child);
94585166 2504 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2505 {
2506 /* The event has been handled, so just return without
2507 reporting it. */
2508 return NULL;
2509 }
fa96cb38
PA
2510 }
2511
80aea927 2512 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2513 {
e7ad2f14 2514 if (save_stop_reason (child))
582511be
PA
2515 have_stop_pc = 1;
2516 }
2517
2518 if (!have_stop_pc)
2519 child->stop_pc = get_pc (child);
2520
fa96cb38
PA
2521 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2522 && child->stop_expected)
2523 {
2524 if (debug_threads)
2525 debug_printf ("Expected stop.\n");
2526 child->stop_expected = 0;
2527
2528 if (thread->last_resume_kind == resume_stop)
2529 {
2530 /* We want to report the stop to the core. Treat the
2531 SIGSTOP as a normal event. */
2bf6fb9d
PA
2532 if (debug_threads)
2533 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2534 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2535 }
2536 else if (stopping_threads != NOT_STOPPING_THREADS)
2537 {
2538 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2539 pending. */
2bf6fb9d
PA
2540 if (debug_threads)
2541 debug_printf ("LLW: SIGSTOP caught for %s "
2542 "while stopping threads.\n",
2543 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2544 return NULL;
2545 }
2546 else
2547 {
2bf6fb9d
PA
2548 /* This is a delayed SIGSTOP. Filter out the event. */
2549 if (debug_threads)
2550 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2551 child->stepping ? "step" : "continue",
2552 target_pid_to_str (ptid_of (thread)));
2553
fa96cb38
PA
2554 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2555 return NULL;
2556 }
2557 }
2558
582511be
PA
2559 child->status_pending_p = 1;
2560 child->status_pending = wstat;
fa96cb38
PA
2561 return child;
2562}
2563
f79b145d
YQ
2564/* Return true if THREAD is doing hardware single step. */
2565
2566static int
2567maybe_hw_step (struct thread_info *thread)
2568{
2569 if (can_hardware_single_step ())
2570 return 1;
2571 else
2572 {
3b9a79ef 2573 /* GDBserver must insert single-step breakpoint for software
f79b145d 2574 single step. */
3b9a79ef 2575 gdb_assert (has_single_step_breakpoints (thread));
f79b145d
YQ
2576 return 0;
2577 }
2578}
2579
20ba1ce6
PA
2580/* Resume LWPs that are currently stopped without any pending status
2581 to report, but are resumed from the core's perspective. */
2582
2583static void
9c80ecd6 2584resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2585{
20ba1ce6
PA
2586 struct lwp_info *lp = get_thread_lwp (thread);
2587
2588 if (lp->stopped
863d01bd 2589 && !lp->suspended
20ba1ce6 2590 && !lp->status_pending_p
20ba1ce6
PA
2591 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2592 {
8901d193
YQ
2593 int step = 0;
2594
2595 if (thread->last_resume_kind == resume_step)
2596 step = maybe_hw_step (thread);
20ba1ce6
PA
2597
2598 if (debug_threads)
2599 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2600 target_pid_to_str (ptid_of (thread)),
2601 paddress (lp->stop_pc),
2602 step);
2603
2604 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2605 }
2606}
2607
fa96cb38
PA
2608/* Wait for an event from child(ren) WAIT_PTID, and return any that
2609 match FILTER_PTID (leaving others pending). The PTIDs can be:
2610 minus_one_ptid, to specify any child; a pid PTID, specifying all
2611 lwps of a thread group; or a PTID representing a single lwp. Store
2612 the stop status through the status pointer WSTAT. OPTIONS is
2613 passed to the waitpid call. Return 0 if no event was found and
2614 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2615 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2616
0d62e5e8 2617static int
fa96cb38
PA
2618linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2619 int *wstatp, int options)
0d62e5e8 2620{
d86d4aaf 2621 struct thread_info *event_thread;
d50171e4 2622 struct lwp_info *event_child, *requested_child;
fa96cb38 2623 sigset_t block_mask, prev_mask;
d50171e4 2624
fa96cb38 2625 retry:
d86d4aaf
DE
2626 /* N.B. event_thread points to the thread_info struct that contains
2627 event_child. Keep them in sync. */
2628 event_thread = NULL;
d50171e4
PA
2629 event_child = NULL;
2630 requested_child = NULL;
0d62e5e8 2631
95954743 2632 /* Check for a lwp with a pending status. */
bd99dc85 2633
d7e15655 2634 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2635 {
83e1b6c1
SM
2636 event_thread = find_thread_in_random ([&] (thread_info *thread)
2637 {
2638 return status_pending_p_callback (thread, filter_ptid);
2639 });
2640
d86d4aaf
DE
2641 if (event_thread != NULL)
2642 event_child = get_thread_lwp (event_thread);
2643 if (debug_threads && event_thread)
2644 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2645 }
d7e15655 2646 else if (filter_ptid != null_ptid)
0d62e5e8 2647 {
fa96cb38 2648 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2649
bde24c0a 2650 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2651 && requested_child->status_pending_p
229d26fc
SM
2652 && (requested_child->collecting_fast_tracepoint
2653 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2654 {
2655 enqueue_one_deferred_signal (requested_child,
2656 &requested_child->status_pending);
2657 requested_child->status_pending_p = 0;
2658 requested_child->status_pending = 0;
2659 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2660 }
2661
2662 if (requested_child->suspended
2663 && requested_child->status_pending_p)
38e08fca
GB
2664 {
2665 internal_error (__FILE__, __LINE__,
2666 "requesting an event out of a"
2667 " suspended child?");
2668 }
fa593d66 2669
d50171e4 2670 if (requested_child->status_pending_p)
d86d4aaf
DE
2671 {
2672 event_child = requested_child;
2673 event_thread = get_lwp_thread (event_child);
2674 }
0d62e5e8 2675 }
611cb4a5 2676
0d62e5e8
DJ
2677 if (event_child != NULL)
2678 {
bd99dc85 2679 if (debug_threads)
87ce2a04 2680 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2681 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2682 *wstatp = event_child->status_pending;
bd99dc85
PA
2683 event_child->status_pending_p = 0;
2684 event_child->status_pending = 0;
0bfdf32f 2685 current_thread = event_thread;
d86d4aaf 2686 return lwpid_of (event_thread);
0d62e5e8
DJ
2687 }
2688
fa96cb38
PA
2689 /* But if we don't find a pending event, we'll have to wait.
2690
2691 We only enter this loop if no process has a pending wait status.
2692 Thus any action taken in response to a wait status inside this
2693 loop is responding as soon as we detect the status, not after any
2694 pending events. */
d8301ad1 2695
fa96cb38
PA
2696 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2697 all signals while here. */
2698 sigfillset (&block_mask);
21987b9c 2699 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2700
582511be
PA
2701 /* Always pull all events out of the kernel. We'll randomly select
2702 an event LWP out of all that have events, to prevent
2703 starvation. */
fa96cb38 2704 while (event_child == NULL)
0d62e5e8 2705 {
fa96cb38 2706 pid_t ret = 0;
0d62e5e8 2707
fa96cb38
PA
2708 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2709 quirks:
0d62e5e8 2710
fa96cb38
PA
2711 - If the thread group leader exits while other threads in the
2712 thread group still exist, waitpid(TGID, ...) hangs. That
2713 waitpid won't return an exit status until the other threads
2714 in the group are reaped.
611cb4a5 2715
fa96cb38
PA
2716 - When a non-leader thread execs, that thread just vanishes
2717 without reporting an exit (so we'd hang if we waited for it
2718 explicitly in that case). The exec event is reported to
94585166 2719 the TGID pid. */
fa96cb38
PA
2720 errno = 0;
2721 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2722
fa96cb38
PA
2723 if (debug_threads)
2724 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2725 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2726
fa96cb38 2727 if (ret > 0)
0d62e5e8 2728 {
89be2091 2729 if (debug_threads)
bd99dc85 2730 {
fa96cb38
PA
2731 debug_printf ("LLW: waitpid %ld received %s\n",
2732 (long) ret, status_to_str (*wstatp));
bd99dc85 2733 }
89be2091 2734
582511be
PA
2735 /* Filter all events. IOW, leave all events pending. We'll
2736 randomly select an event LWP out of all that have events
2737 below. */
2738 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2739 /* Retry until nothing comes out of waitpid. A single
2740 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2741 continue;
2742 }
2743
20ba1ce6
PA
2744 /* Now that we've pulled all events out of the kernel, resume
2745 LWPs that don't have an interesting event to report. */
2746 if (stopping_threads == NOT_STOPPING_THREADS)
f0045347 2747 for_each_thread (resume_stopped_resumed_lwps);
20ba1ce6
PA
2748
2749 /* ... and find an LWP with a status to report to the core, if
2750 any. */
83e1b6c1
SM
2751 event_thread = find_thread_in_random ([&] (thread_info *thread)
2752 {
2753 return status_pending_p_callback (thread, filter_ptid);
2754 });
2755
582511be
PA
2756 if (event_thread != NULL)
2757 {
2758 event_child = get_thread_lwp (event_thread);
2759 *wstatp = event_child->status_pending;
2760 event_child->status_pending_p = 0;
2761 event_child->status_pending = 0;
2762 break;
2763 }
2764
fa96cb38
PA
2765 /* Check for zombie thread group leaders. Those can't be reaped
2766 until all other threads in the thread group are. */
2767 check_zombie_leaders ();
2768
a1385b7b
SM
2769 auto not_stopped = [&] (thread_info *thread)
2770 {
2771 return not_stopped_callback (thread, wait_ptid);
2772 };
2773
fa96cb38
PA
2774 /* If there are no resumed children left in the set of LWPs we
2775 want to wait for, bail. We can't just block in
2776 waitpid/sigsuspend, because lwps might have been left stopped
2777 in trace-stop state, and we'd be stuck forever waiting for
2778 their status to change (which would only happen if we resumed
2779 them). Even if WNOHANG is set, this return code is preferred
2780 over 0 (below), as it is more detailed. */
a1385b7b 2781 if (find_thread (not_stopped) == NULL)
a6dbe5df 2782 {
fa96cb38
PA
2783 if (debug_threads)
2784 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2785 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2786 return -1;
a6dbe5df
PA
2787 }
2788
fa96cb38
PA
2789 /* No interesting event to report to the caller. */
2790 if ((options & WNOHANG))
24a09b5f 2791 {
fa96cb38
PA
2792 if (debug_threads)
2793 debug_printf ("WNOHANG set, no event found\n");
2794
21987b9c 2795 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2796 return 0;
24a09b5f
DJ
2797 }
2798
fa96cb38
PA
2799 /* Block until we get an event reported with SIGCHLD. */
2800 if (debug_threads)
2801 debug_printf ("sigsuspend'ing\n");
d50171e4 2802
fa96cb38 2803 sigsuspend (&prev_mask);
21987b9c 2804 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2805 goto retry;
2806 }
d50171e4 2807
21987b9c 2808 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2809
0bfdf32f 2810 current_thread = event_thread;
d50171e4 2811
fa96cb38
PA
2812 return lwpid_of (event_thread);
2813}
2814
2815/* Wait for an event from child(ren) PTID. PTIDs can be:
2816 minus_one_ptid, to specify any child; a pid PTID, specifying all
2817 lwps of a thread group; or a PTID representing a single lwp. Store
2818 the stop status through the status pointer WSTAT. OPTIONS is
2819 passed to the waitpid call. Return 0 if no event was found and
2820 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2821 was found. Return the PID of the stopped child otherwise. */
2822
2823static int
2824linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2825{
2826 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2827}
2828
6bf5e0ba
PA
2829/* Select one LWP out of those that have events pending. */
2830
2831static void
2832select_event_lwp (struct lwp_info **orig_lp)
2833{
582511be
PA
2834 struct thread_info *event_thread = NULL;
2835
2836 /* In all-stop, give preference to the LWP that is being
2837 single-stepped. There will be at most one, and it's the LWP that
2838 the core is most interested in. If we didn't do this, then we'd
2839 have to handle pending step SIGTRAPs somehow in case the core
2840 later continues the previously-stepped thread, otherwise we'd
2841 report the pending SIGTRAP, and the core, not having stepped the
2842 thread, wouldn't understand what the trap was for, and therefore
2843 would report it to the user as a random signal. */
2844 if (!non_stop)
6bf5e0ba 2845 {
39a64da5
SM
2846 event_thread = find_thread ([] (thread_info *thread)
2847 {
2848 lwp_info *lp = get_thread_lwp (thread);
2849
2850 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2851 && thread->last_resume_kind == resume_step
2852 && lp->status_pending_p);
2853 });
2854
582511be
PA
2855 if (event_thread != NULL)
2856 {
2857 if (debug_threads)
2858 debug_printf ("SEL: Select single-step %s\n",
2859 target_pid_to_str (ptid_of (event_thread)));
2860 }
6bf5e0ba 2861 }
582511be 2862 if (event_thread == NULL)
6bf5e0ba
PA
2863 {
2864 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2865 which have had events. */
6bf5e0ba 2866
b0319eaa 2867 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2868 {
2869 lwp_info *lp = get_thread_lwp (thread);
2870
b0319eaa
TT
2871 /* Only resumed LWPs that have an event pending. */
2872 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2873 && lp->status_pending_p);
39a64da5 2874 });
6bf5e0ba
PA
2875 }
2876
d86d4aaf 2877 if (event_thread != NULL)
6bf5e0ba 2878 {
d86d4aaf
DE
2879 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2880
6bf5e0ba
PA
2881 /* Switch the event LWP. */
2882 *orig_lp = event_lp;
2883 }
2884}
2885
7984d532
PA
2886/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2887 NULL. */
2888
2889static void
2890unsuspend_all_lwps (struct lwp_info *except)
2891{
139720c5
SM
2892 for_each_thread ([&] (thread_info *thread)
2893 {
2894 lwp_info *lwp = get_thread_lwp (thread);
2895
2896 if (lwp != except)
2897 lwp_suspended_decr (lwp);
2898 });
7984d532
PA
2899}
2900
9c80ecd6 2901static void move_out_of_jump_pad_callback (thread_info *thread);
fcb056a5 2902static bool stuck_in_jump_pad_callback (thread_info *thread);
5a6b0a41 2903static bool lwp_running (thread_info *thread);
fa593d66
PA
2904static ptid_t linux_wait_1 (ptid_t ptid,
2905 struct target_waitstatus *ourstatus,
2906 int target_options);
2907
2908/* Stabilize threads (move out of jump pads).
2909
2910 If a thread is midway collecting a fast tracepoint, we need to
2911 finish the collection and move it out of the jump pad before
2912 reporting the signal.
2913
2914 This avoids recursion while collecting (when a signal arrives
2915 midway, and the signal handler itself collects), which would trash
2916 the trace buffer. In case the user set a breakpoint in a signal
2917 handler, this avoids the backtrace showing the jump pad, etc..
2918 Most importantly, there are certain things we can't do safely if
2919 threads are stopped in a jump pad (or in its callee's). For
2920 example:
2921
2922 - starting a new trace run. A thread still collecting the
2923 previous run, could trash the trace buffer when resumed. The trace
2924 buffer control structures would have been reset but the thread had
2925 no way to tell. The thread could even midway memcpy'ing to the
2926 buffer, which would mean that when resumed, it would clobber the
2927 trace buffer that had been set for a new run.
2928
2929 - we can't rewrite/reuse the jump pads for new tracepoints
2930 safely. Say you do tstart while a thread is stopped midway while
2931 collecting. When the thread is later resumed, it finishes the
2932 collection, and returns to the jump pad, to execute the original
2933 instruction that was under the tracepoint jump at the time the
2934 older run had been started. If the jump pad had been rewritten
2935 since for something else in the new run, the thread would now
2936 execute the wrong / random instructions. */
2937
2938static void
2939linux_stabilize_threads (void)
2940{
fcb056a5 2941 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
fa593d66 2942
d86d4aaf 2943 if (thread_stuck != NULL)
fa593d66 2944 {
b4d51a55 2945 if (debug_threads)
87ce2a04 2946 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2947 lwpid_of (thread_stuck));
fa593d66
PA
2948 return;
2949 }
2950
fcb056a5 2951 thread_info *saved_thread = current_thread;
fa593d66
PA
2952
2953 stabilizing_threads = 1;
2954
2955 /* Kick 'em all. */
f0045347 2956 for_each_thread (move_out_of_jump_pad_callback);
fa593d66
PA
2957
2958 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2959 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2960 {
2961 struct target_waitstatus ourstatus;
2962 struct lwp_info *lwp;
fa593d66
PA
2963 int wstat;
2964
2965 /* Note that we go through the full wait even loop. While
2966 moving threads out of jump pad, we need to be able to step
2967 over internal breakpoints and such. */
32fcada3 2968 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2969
2970 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2971 {
0bfdf32f 2972 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2973
2974 /* Lock it. */
863d01bd 2975 lwp_suspended_inc (lwp);
fa593d66 2976
a493e3e2 2977 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2978 || current_thread->last_resume_kind == resume_stop)
fa593d66 2979 {
2ea28649 2980 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2981 enqueue_one_deferred_signal (lwp, &wstat);
2982 }
2983 }
2984 }
2985
fcdad592 2986 unsuspend_all_lwps (NULL);
fa593d66
PA
2987
2988 stabilizing_threads = 0;
2989
0bfdf32f 2990 current_thread = saved_thread;
fa593d66 2991
b4d51a55 2992 if (debug_threads)
fa593d66 2993 {
fcb056a5
SM
2994 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2995
d86d4aaf 2996 if (thread_stuck != NULL)
87ce2a04 2997 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2998 lwpid_of (thread_stuck));
fa593d66
PA
2999 }
3000}
3001
582511be
PA
3002/* Convenience function that is called when the kernel reports an
3003 event that is not passed out to GDB. */
3004
3005static ptid_t
3006ignore_event (struct target_waitstatus *ourstatus)
3007{
3008 /* If we got an event, there may still be others, as a single
3009 SIGCHLD can indicate more than one child stopped. This forces
3010 another target_wait call. */
3011 async_file_mark ();
3012
3013 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3014 return null_ptid;
3015}
3016
65706a29
PA
3017/* Convenience function that is called when the kernel reports an exit
3018 event. This decides whether to report the event to GDB as a
3019 process exit event, a thread exit event, or to suppress the
3020 event. */
3021
3022static ptid_t
3023filter_exit_event (struct lwp_info *event_child,
3024 struct target_waitstatus *ourstatus)
3025{
c12a5089 3026 client_state &cs = get_client_state ();
65706a29
PA
3027 struct thread_info *thread = get_lwp_thread (event_child);
3028 ptid_t ptid = ptid_of (thread);
3029
3030 if (!last_thread_of_process_p (pid_of (thread)))
3031 {
c12a5089 3032 if (cs.report_thread_events)
65706a29
PA
3033 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3034 else
3035 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3036
3037 delete_lwp (event_child);
3038 }
3039 return ptid;
3040}
3041
82075af2
JS
3042/* Returns 1 if GDB is interested in any event_child syscalls. */
3043
3044static int
3045gdb_catching_syscalls_p (struct lwp_info *event_child)
3046{
3047 struct thread_info *thread = get_lwp_thread (event_child);
3048 struct process_info *proc = get_thread_process (thread);
3049
f27866ba 3050 return !proc->syscalls_to_catch.empty ();
82075af2
JS
3051}
3052
3053/* Returns 1 if GDB is interested in the event_child syscall.
3054 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3055
3056static int
3057gdb_catch_this_syscall_p (struct lwp_info *event_child)
3058{
4cc32bec 3059 int sysno;
82075af2
JS
3060 struct thread_info *thread = get_lwp_thread (event_child);
3061 struct process_info *proc = get_thread_process (thread);
3062
f27866ba 3063 if (proc->syscalls_to_catch.empty ())
82075af2
JS
3064 return 0;
3065
f27866ba 3066 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
82075af2
JS
3067 return 1;
3068
4cc32bec 3069 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
3070
3071 for (int iter : proc->syscalls_to_catch)
82075af2
JS
3072 if (iter == sysno)
3073 return 1;
3074
3075 return 0;
3076}
3077
0d62e5e8 3078/* Wait for process, returns status. */
da6d8c04 3079
95954743
PA
3080static ptid_t
3081linux_wait_1 (ptid_t ptid,
3082 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 3083{
c12a5089 3084 client_state &cs = get_client_state ();
e5f1222d 3085 int w;
fc7238bb 3086 struct lwp_info *event_child;
bd99dc85 3087 int options;
bd99dc85 3088 int pid;
6bf5e0ba
PA
3089 int step_over_finished;
3090 int bp_explains_trap;
3091 int maybe_internal_trap;
3092 int report_to_gdb;
219f2f23 3093 int trace_event;
c2d6af84 3094 int in_step_range;
f2faf941 3095 int any_resumed;
bd99dc85 3096
87ce2a04
DE
3097 if (debug_threads)
3098 {
3099 debug_enter ();
3100 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3101 }
3102
bd99dc85
PA
3103 /* Translate generic target options into linux options. */
3104 options = __WALL;
3105 if (target_options & TARGET_WNOHANG)
3106 options |= WNOHANG;
0d62e5e8 3107
fa593d66
PA
3108 bp_explains_trap = 0;
3109 trace_event = 0;
c2d6af84 3110 in_step_range = 0;
bd99dc85
PA
3111 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3112
83e1b6c1
SM
3113 auto status_pending_p_any = [&] (thread_info *thread)
3114 {
3115 return status_pending_p_callback (thread, minus_one_ptid);
3116 };
3117
a1385b7b
SM
3118 auto not_stopped = [&] (thread_info *thread)
3119 {
3120 return not_stopped_callback (thread, minus_one_ptid);
3121 };
3122
f2faf941 3123 /* Find a resumed LWP, if any. */
83e1b6c1 3124 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3125 any_resumed = 1;
a1385b7b 3126 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3127 any_resumed = 1;
3128 else
3129 any_resumed = 0;
3130
d7e15655 3131 if (step_over_bkpt == null_ptid)
6bf5e0ba
PA
3132 pid = linux_wait_for_event (ptid, &w, options);
3133 else
3134 {
3135 if (debug_threads)
87ce2a04
DE
3136 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3137 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
3138 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3139 }
3140
f2faf941 3141 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3142 {
fa96cb38
PA
3143 gdb_assert (target_options & TARGET_WNOHANG);
3144
87ce2a04
DE
3145 if (debug_threads)
3146 {
fa96cb38
PA
3147 debug_printf ("linux_wait_1 ret = null_ptid, "
3148 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3149 debug_exit ();
3150 }
fa96cb38
PA
3151
3152 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3153 return null_ptid;
3154 }
fa96cb38
PA
3155 else if (pid == -1)
3156 {
3157 if (debug_threads)
3158 {
3159 debug_printf ("linux_wait_1 ret = null_ptid, "
3160 "TARGET_WAITKIND_NO_RESUMED\n");
3161 debug_exit ();
3162 }
bd99dc85 3163
fa96cb38
PA
3164 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3165 return null_ptid;
3166 }
0d62e5e8 3167
0bfdf32f 3168 event_child = get_thread_lwp (current_thread);
0d62e5e8 3169
fa96cb38
PA
3170 /* linux_wait_for_event only returns an exit status for the last
3171 child of a process. Report it. */
3172 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3173 {
fa96cb38 3174 if (WIFEXITED (w))
0d62e5e8 3175 {
fa96cb38
PA
3176 ourstatus->kind = TARGET_WAITKIND_EXITED;
3177 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3178
fa96cb38 3179 if (debug_threads)
bd99dc85 3180 {
fa96cb38
PA
3181 debug_printf ("linux_wait_1 ret = %s, exited with "
3182 "retcode %d\n",
0bfdf32f 3183 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3184 WEXITSTATUS (w));
3185 debug_exit ();
bd99dc85 3186 }
fa96cb38
PA
3187 }
3188 else
3189 {
3190 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3191 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3192
fa96cb38
PA
3193 if (debug_threads)
3194 {
3195 debug_printf ("linux_wait_1 ret = %s, terminated with "
3196 "signal %d\n",
0bfdf32f 3197 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3198 WTERMSIG (w));
3199 debug_exit ();
3200 }
0d62e5e8 3201 }
fa96cb38 3202
65706a29
PA
3203 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3204 return filter_exit_event (event_child, ourstatus);
3205
0bfdf32f 3206 return ptid_of (current_thread);
da6d8c04
DJ
3207 }
3208
2d97cd35
AT
3209 /* If step-over executes a breakpoint instruction, in the case of a
3210 hardware single step it means a gdb/gdbserver breakpoint had been
3211 planted on top of a permanent breakpoint, in the case of a software
3212 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3213 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3214 the breakpoint address.
3215 So in the case of the hardware single step advance the PC manually
3216 past the breakpoint and in the case of software single step advance only
3b9a79ef 3217 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3218 This avoids that a program would keep trapping a permanent breakpoint
3219 forever. */
d7e15655 3220 if (step_over_bkpt != null_ptid
2d97cd35
AT
3221 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3222 && (event_child->stepping
3b9a79ef 3223 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3224 {
dd373349
AT
3225 int increment_pc = 0;
3226 int breakpoint_kind = 0;
3227 CORE_ADDR stop_pc = event_child->stop_pc;
3228
769ef81f
AT
3229 breakpoint_kind =
3230 the_target->breakpoint_kind_from_current_state (&stop_pc);
dd373349 3231 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3232
3233 if (debug_threads)
3234 {
3235 debug_printf ("step-over for %s executed software breakpoint\n",
3236 target_pid_to_str (ptid_of (current_thread)));
3237 }
3238
3239 if (increment_pc != 0)
3240 {
3241 struct regcache *regcache
3242 = get_thread_regcache (current_thread, 1);
3243
3244 event_child->stop_pc += increment_pc;
3245 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3246
3247 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3248 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3249 }
3250 }
3251
6bf5e0ba
PA
3252 /* If this event was not handled before, and is not a SIGTRAP, we
3253 report it. SIGILL and SIGSEGV are also treated as traps in case
3254 a breakpoint is inserted at the current PC. If this target does
3255 not support internal breakpoints at all, we also report the
3256 SIGTRAP without further processing; it's of no concern to us. */
3257 maybe_internal_trap
3258 = (supports_breakpoints ()
3259 && (WSTOPSIG (w) == SIGTRAP
3260 || ((WSTOPSIG (w) == SIGILL
3261 || WSTOPSIG (w) == SIGSEGV)
3262 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3263
3264 if (maybe_internal_trap)
3265 {
3266 /* Handle anything that requires bookkeeping before deciding to
3267 report the event or continue waiting. */
3268
3269 /* First check if we can explain the SIGTRAP with an internal
3270 breakpoint, or if we should possibly report the event to GDB.
3271 Do this before anything that may remove or insert a
3272 breakpoint. */
3273 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3274
3275 /* We have a SIGTRAP, possibly a step-over dance has just
3276 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3277 reinsert breakpoints and delete any single-step
3278 breakpoints. */
6bf5e0ba
PA
3279 step_over_finished = finish_step_over (event_child);
3280
3281 /* Now invoke the callbacks of any internal breakpoints there. */
3282 check_breakpoints (event_child->stop_pc);
3283
219f2f23
PA
3284 /* Handle tracepoint data collecting. This may overflow the
3285 trace buffer, and cause a tracing stop, removing
3286 breakpoints. */
3287 trace_event = handle_tracepoints (event_child);
3288
6bf5e0ba
PA
3289 if (bp_explains_trap)
3290 {
6bf5e0ba 3291 if (debug_threads)
87ce2a04 3292 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3293 }
3294 }
3295 else
3296 {
3297 /* We have some other signal, possibly a step-over dance was in
3298 progress, and it should be cancelled too. */
3299 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3300 }
3301
3302 /* We have all the data we need. Either report the event to GDB, or
3303 resume threads and keep waiting for more. */
3304
3305 /* If we're collecting a fast tracepoint, finish the collection and
3306 move out of the jump pad before delivering a signal. See
3307 linux_stabilize_threads. */
3308
3309 if (WIFSTOPPED (w)
3310 && WSTOPSIG (w) != SIGTRAP
3311 && supports_fast_tracepoints ()
58b4daa5 3312 && agent_loaded_p ())
fa593d66
PA
3313 {
3314 if (debug_threads)
87ce2a04
DE
3315 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3316 "to defer or adjust it.\n",
0bfdf32f 3317 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3318
3319 /* Allow debugging the jump pad itself. */
0bfdf32f 3320 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3321 && maybe_move_out_of_jump_pad (event_child, &w))
3322 {
3323 enqueue_one_deferred_signal (event_child, &w);
3324
3325 if (debug_threads)
87ce2a04 3326 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3327 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3328
3329 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be 3330
edeeb602
YQ
3331 if (debug_threads)
3332 debug_exit ();
582511be 3333 return ignore_event (ourstatus);
fa593d66
PA
3334 }
3335 }
219f2f23 3336
229d26fc
SM
3337 if (event_child->collecting_fast_tracepoint
3338 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3339 {
3340 if (debug_threads)
87ce2a04
DE
3341 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3342 "Check if we're already there.\n",
0bfdf32f 3343 lwpid_of (current_thread),
229d26fc 3344 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3345
3346 trace_event = 1;
3347
3348 event_child->collecting_fast_tracepoint
3349 = linux_fast_tracepoint_collecting (event_child, NULL);
3350
229d26fc
SM
3351 if (event_child->collecting_fast_tracepoint
3352 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3353 {
3354 /* No longer need this breakpoint. */
3355 if (event_child->exit_jump_pad_bkpt != NULL)
3356 {
3357 if (debug_threads)
87ce2a04
DE
3358 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3359 "stopping all threads momentarily.\n");
fa593d66
PA
3360
3361 /* Other running threads could hit this breakpoint.
3362 We don't handle moribund locations like GDB does,
3363 instead we always pause all threads when removing
3364 breakpoints, so that any step-over or
3365 decr_pc_after_break adjustment is always taken
3366 care of while the breakpoint is still
3367 inserted. */
3368 stop_all_lwps (1, event_child);
fa593d66
PA
3369
3370 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3371 event_child->exit_jump_pad_bkpt = NULL;
3372
3373 unstop_all_lwps (1, event_child);
3374
3375 gdb_assert (event_child->suspended >= 0);
3376 }
3377 }
3378
229d26fc
SM
3379 if (event_child->collecting_fast_tracepoint
3380 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3381 {
3382 if (debug_threads)
87ce2a04
DE
3383 debug_printf ("fast tracepoint finished "
3384 "collecting successfully.\n");
fa593d66
PA
3385
3386 /* We may have a deferred signal to report. */
3387 if (dequeue_one_deferred_signal (event_child, &w))
3388 {
3389 if (debug_threads)
87ce2a04 3390 debug_printf ("dequeued one signal.\n");
fa593d66 3391 }
3c11dd79 3392 else
fa593d66 3393 {
3c11dd79 3394 if (debug_threads)
87ce2a04 3395 debug_printf ("no deferred signals.\n");
fa593d66
PA
3396
3397 if (stabilizing_threads)
3398 {
3399 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3400 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3401
3402 if (debug_threads)
3403 {
3404 debug_printf ("linux_wait_1 ret = %s, stopped "
3405 "while stabilizing threads\n",
0bfdf32f 3406 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3407 debug_exit ();
3408 }
3409
0bfdf32f 3410 return ptid_of (current_thread);
fa593d66
PA
3411 }
3412 }
3413 }
6bf5e0ba
PA
3414 }
3415
e471f25b
PA
3416 /* Check whether GDB would be interested in this event. */
3417
82075af2
JS
3418 /* Check if GDB is interested in this syscall. */
3419 if (WIFSTOPPED (w)
3420 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3421 && !gdb_catch_this_syscall_p (event_child))
3422 {
3423 if (debug_threads)
3424 {
3425 debug_printf ("Ignored syscall for LWP %ld.\n",
3426 lwpid_of (current_thread));
3427 }
3428
3429 linux_resume_one_lwp (event_child, event_child->stepping,
3430 0, NULL);
edeeb602
YQ
3431
3432 if (debug_threads)
3433 debug_exit ();
82075af2
JS
3434 return ignore_event (ourstatus);
3435 }
3436
e471f25b
PA
3437 /* If GDB is not interested in this signal, don't stop other
3438 threads, and don't report it to GDB. Just resume the inferior
3439 right away. We do this for threading-related signals as well as
3440 any that GDB specifically requested we ignore. But never ignore
3441 SIGSTOP if we sent it ourselves, and do not ignore signals when
3442 stepping - they may require special handling to skip the signal
c9587f88
AT
3443 handler. Also never ignore signals that could be caused by a
3444 breakpoint. */
e471f25b 3445 if (WIFSTOPPED (w)
0bfdf32f 3446 && current_thread->last_resume_kind != resume_step
e471f25b 3447 && (
1a981360 3448#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3449 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3450 && (WSTOPSIG (w) == __SIGRTMIN
3451 || WSTOPSIG (w) == __SIGRTMIN + 1))
3452 ||
3453#endif
c12a5089 3454 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3455 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3456 && current_thread->last_resume_kind == resume_stop)
3457 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3458 {
3459 siginfo_t info, *info_p;
3460
3461 if (debug_threads)
87ce2a04 3462 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3463 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3464
0bfdf32f 3465 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3466 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3467 info_p = &info;
3468 else
3469 info_p = NULL;
863d01bd
PA
3470
3471 if (step_over_finished)
3472 {
3473 /* We cancelled this thread's step-over above. We still
3474 need to unsuspend all other LWPs, and set them back
3475 running again while the signal handler runs. */
3476 unsuspend_all_lwps (event_child);
3477
3478 /* Enqueue the pending signal info so that proceed_all_lwps
3479 doesn't lose it. */
3480 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3481
3482 proceed_all_lwps ();
3483 }
3484 else
3485 {
3486 linux_resume_one_lwp (event_child, event_child->stepping,
3487 WSTOPSIG (w), info_p);
3488 }
edeeb602
YQ
3489
3490 if (debug_threads)
3491 debug_exit ();
3492
582511be 3493 return ignore_event (ourstatus);
e471f25b
PA
3494 }
3495
c2d6af84
PA
3496 /* Note that all addresses are always "out of the step range" when
3497 there's no range to begin with. */
3498 in_step_range = lwp_in_step_range (event_child);
3499
3500 /* If GDB wanted this thread to single step, and the thread is out
3501 of the step range, we always want to report the SIGTRAP, and let
3502 GDB handle it. Watchpoints should always be reported. So should
3503 signals we can't explain. A SIGTRAP we can't explain could be a
3504 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3505 do, we're be able to handle GDB breakpoints on top of internal
3506 breakpoints, by handling the internal breakpoint and still
3507 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3508 won't see the breakpoint hit. If we see a single-step event but
3509 the thread should be continuing, don't pass the trap to gdb.
3510 That indicates that we had previously finished a single-step but
3511 left the single-step pending -- see
3512 complete_ongoing_step_over. */
6bf5e0ba 3513 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3514 || (current_thread->last_resume_kind == resume_step
c2d6af84 3515 && !in_step_range)
15c66dd6 3516 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3517 || (!in_step_range
3518 && !bp_explains_trap
3519 && !trace_event
3520 && !step_over_finished
3521 && !(current_thread->last_resume_kind == resume_continue
3522 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3523 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3524 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3525 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3526 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3527
3528 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3529
3530 /* We found no reason GDB would want us to stop. We either hit one
3531 of our own breakpoints, or finished an internal step GDB
3532 shouldn't know about. */
3533 if (!report_to_gdb)
3534 {
3535 if (debug_threads)
3536 {
3537 if (bp_explains_trap)
87ce2a04 3538 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3539 if (step_over_finished)
87ce2a04 3540 debug_printf ("Step-over finished.\n");
219f2f23 3541 if (trace_event)
87ce2a04 3542 debug_printf ("Tracepoint event.\n");
c2d6af84 3543 if (lwp_in_step_range (event_child))
87ce2a04
DE
3544 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3545 paddress (event_child->stop_pc),
3546 paddress (event_child->step_range_start),
3547 paddress (event_child->step_range_end));
6bf5e0ba
PA
3548 }
3549
3550 /* We're not reporting this breakpoint to GDB, so apply the
3551 decr_pc_after_break adjustment to the inferior's regcache
3552 ourselves. */
3553
3554 if (the_low_target.set_pc != NULL)
3555 {
3556 struct regcache *regcache
0bfdf32f 3557 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3558 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3559 }
3560
7984d532 3561 if (step_over_finished)
e3652c84
YQ
3562 {
3563 /* If we have finished stepping over a breakpoint, we've
3564 stopped and suspended all LWPs momentarily except the
3565 stepping one. This is where we resume them all again.
3566 We're going to keep waiting, so use proceed, which
3567 handles stepping over the next breakpoint. */
3568 unsuspend_all_lwps (event_child);
3569 }
3570 else
3571 {
3572 /* Remove the single-step breakpoints if any. Note that
3573 there isn't single-step breakpoint if we finished stepping
3574 over. */
3575 if (can_software_single_step ()
3576 && has_single_step_breakpoints (current_thread))
3577 {
3578 stop_all_lwps (0, event_child);
3579 delete_single_step_breakpoints (current_thread);
3580 unstop_all_lwps (0, event_child);
3581 }
3582 }
7984d532 3583
e3652c84
YQ
3584 if (debug_threads)
3585 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3586 proceed_all_lwps ();
edeeb602
YQ
3587
3588 if (debug_threads)
3589 debug_exit ();
3590
582511be 3591 return ignore_event (ourstatus);
6bf5e0ba
PA
3592 }
3593
3594 if (debug_threads)
3595 {
00db26fa 3596 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3597 {
23fdd69e
SM
3598 std::string str
3599 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3600
ad071a30 3601 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3602 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3603 }
0bfdf32f 3604 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3605 {
3606 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3607 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3608 else if (!lwp_in_step_range (event_child))
87ce2a04 3609 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3610 }
15c66dd6 3611 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3612 debug_printf ("Stopped by watchpoint.\n");
582511be 3613 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3614 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3615 if (debug_threads)
87ce2a04 3616 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3617 }
3618
3619 /* Alright, we're going to report a stop. */
3620
3b9a79ef 3621 /* Remove single-step breakpoints. */
8901d193
YQ
3622 if (can_software_single_step ())
3623 {
3b9a79ef 3624 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3625 lwps, so that other threads won't hit the breakpoint in the
3626 staled memory. */
3b9a79ef 3627 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3628
3629 if (non_stop)
3630 {
3b9a79ef
YQ
3631 remove_single_step_breakpoints_p
3632 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3633 }
3634 else
3635 {
3636 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3637 requests. Delete all single-step breakpoints. */
8901d193 3638
9c80ecd6
SM
3639 find_thread ([&] (thread_info *thread) {
3640 if (has_single_step_breakpoints (thread))
3641 {
3642 remove_single_step_breakpoints_p = 1;
3643 return true;
3644 }
8901d193 3645
9c80ecd6
SM
3646 return false;
3647 });
8901d193
YQ
3648 }
3649
3b9a79ef 3650 if (remove_single_step_breakpoints_p)
8901d193 3651 {
3b9a79ef 3652 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3653 so that other threads won't hit the breakpoint in the staled
3654 memory. */
3655 stop_all_lwps (0, event_child);
3656
3657 if (non_stop)
3658 {
3b9a79ef
YQ
3659 gdb_assert (has_single_step_breakpoints (current_thread));
3660 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3661 }
3662 else
3663 {
9c80ecd6
SM
3664 for_each_thread ([] (thread_info *thread){
3665 if (has_single_step_breakpoints (thread))
3666 delete_single_step_breakpoints (thread);
3667 });
8901d193
YQ
3668 }
3669
3670 unstop_all_lwps (0, event_child);
3671 }
3672 }
3673
582511be 3674 if (!stabilizing_threads)
6bf5e0ba
PA
3675 {
3676 /* In all-stop, stop all threads. */
582511be
PA
3677 if (!non_stop)
3678 stop_all_lwps (0, NULL);
6bf5e0ba 3679
c03e6ccc 3680 if (step_over_finished)
582511be
PA
3681 {
3682 if (!non_stop)
3683 {
3684 /* If we were doing a step-over, all other threads but
3685 the stepping one had been paused in start_step_over,
3686 with their suspend counts incremented. We don't want
3687 to do a full unstop/unpause, because we're in
3688 all-stop mode (so we want threads stopped), but we
3689 still need to unsuspend the other threads, to
3690 decrement their `suspended' count back. */
3691 unsuspend_all_lwps (event_child);
3692 }
3693 else
3694 {
3695 /* If we just finished a step-over, then all threads had
3696 been momentarily paused. In all-stop, that's fine,
3697 we want threads stopped by now anyway. In non-stop,
3698 we need to re-resume threads that GDB wanted to be
3699 running. */
3700 unstop_all_lwps (1, event_child);
3701 }
3702 }
c03e6ccc 3703
3aa5cfa0
AT
3704 /* If we're not waiting for a specific LWP, choose an event LWP
3705 from among those that have had events. Giving equal priority
3706 to all LWPs that have had events helps prevent
3707 starvation. */
d7e15655 3708 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3709 {
3710 event_child->status_pending_p = 1;
3711 event_child->status_pending = w;
3712
3713 select_event_lwp (&event_child);
3714
3715 /* current_thread and event_child must stay in sync. */
3716 current_thread = get_lwp_thread (event_child);
3717
3718 event_child->status_pending_p = 0;
3719 w = event_child->status_pending;
3720 }
3721
3722
fa593d66 3723 /* Stabilize threads (move out of jump pads). */
582511be
PA
3724 if (!non_stop)
3725 stabilize_threads ();
6bf5e0ba
PA
3726 }
3727 else
3728 {
3729 /* If we just finished a step-over, then all threads had been
3730 momentarily paused. In all-stop, that's fine, we want
3731 threads stopped by now anyway. In non-stop, we need to
3732 re-resume threads that GDB wanted to be running. */
3733 if (step_over_finished)
7984d532 3734 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3735 }
3736
00db26fa 3737 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3738 {
00db26fa
PA
3739 /* If the reported event is an exit, fork, vfork or exec, let
3740 GDB know. */
5a04c4cf
PA
3741
3742 /* Break the unreported fork relationship chain. */
3743 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3744 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3745 {
3746 event_child->fork_relative->fork_relative = NULL;
3747 event_child->fork_relative = NULL;
3748 }
3749
00db26fa 3750 *ourstatus = event_child->waitstatus;
de0d863e
DB
3751 /* Clear the event lwp's waitstatus since we handled it already. */
3752 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3753 }
3754 else
3755 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3756
582511be 3757 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3758 it was a software breakpoint, and the client doesn't know we can
3759 adjust the breakpoint ourselves. */
3760 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3761 && !cs.swbreak_feature)
582511be
PA
3762 {
3763 int decr_pc = the_low_target.decr_pc_after_break;
3764
3765 if (decr_pc != 0)
3766 {
3767 struct regcache *regcache
3768 = get_thread_regcache (current_thread, 1);
3769 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3770 }
3771 }
3772
82075af2
JS
3773 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3774 {
82075af2 3775 get_syscall_trapinfo (event_child,
4cc32bec 3776 &ourstatus->value.syscall_number);
82075af2
JS
3777 ourstatus->kind = event_child->syscall_state;
3778 }
3779 else if (current_thread->last_resume_kind == resume_stop
3780 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3781 {
3782 /* A thread that has been requested to stop by GDB with vCont;t,
3783 and it stopped cleanly, so report as SIG0. The use of
3784 SIGSTOP is an implementation detail. */
a493e3e2 3785 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3786 }
0bfdf32f 3787 else if (current_thread->last_resume_kind == resume_stop
8336d594 3788 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3789 {
3790 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3791 but, it stopped for other reasons. */
2ea28649 3792 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3793 }
de0d863e 3794 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3795 {
2ea28649 3796 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3797 }
3798
d7e15655 3799 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3800
bd99dc85 3801 if (debug_threads)
87ce2a04
DE
3802 {
3803 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3804 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3805 ourstatus->kind, ourstatus->value.sig);
3806 debug_exit ();
3807 }
bd99dc85 3808
65706a29
PA
3809 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3810 return filter_exit_event (event_child, ourstatus);
3811
0bfdf32f 3812 return ptid_of (current_thread);
bd99dc85
PA
3813}
3814
3815/* Get rid of any pending event in the pipe. */
3816static void
3817async_file_flush (void)
3818{
3819 int ret;
3820 char buf;
3821
3822 do
3823 ret = read (linux_event_pipe[0], &buf, 1);
3824 while (ret >= 0 || (ret == -1 && errno == EINTR));
3825}
3826
3827/* Put something in the pipe, so the event loop wakes up. */
3828static void
3829async_file_mark (void)
3830{
3831 int ret;
3832
3833 async_file_flush ();
3834
3835 do
3836 ret = write (linux_event_pipe[1], "+", 1);
3837 while (ret == 0 || (ret == -1 && errno == EINTR));
3838
3839 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3840 be awakened anyway. */
3841}
3842
6532e7e3
TBA
3843ptid_t
3844linux_process_target::wait (ptid_t ptid,
3845 target_waitstatus *ourstatus,
3846 int target_options)
bd99dc85 3847{
95954743 3848 ptid_t event_ptid;
bd99dc85 3849
bd99dc85
PA
3850 /* Flush the async file first. */
3851 if (target_is_async_p ())
3852 async_file_flush ();
3853
582511be
PA
3854 do
3855 {
3856 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3857 }
3858 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3859 && event_ptid == null_ptid
582511be 3860 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3861
3862 /* If at least one stop was reported, there may be more. A single
3863 SIGCHLD can signal more than one child stop. */
3864 if (target_is_async_p ()
3865 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3866 && event_ptid != null_ptid)
bd99dc85
PA
3867 async_file_mark ();
3868
3869 return event_ptid;
da6d8c04
DJ
3870}
3871
c5f62d5f 3872/* Send a signal to an LWP. */
fd500816
DJ
3873
3874static int
a1928bad 3875kill_lwp (unsigned long lwpid, int signo)
fd500816 3876{
4a6ed09b 3877 int ret;
fd500816 3878
4a6ed09b
PA
3879 errno = 0;
3880 ret = syscall (__NR_tkill, lwpid, signo);
3881 if (errno == ENOSYS)
3882 {
3883 /* If tkill fails, then we are not using nptl threads, a
3884 configuration we no longer support. */
3885 perror_with_name (("tkill"));
3886 }
3887 return ret;
fd500816
DJ
3888}
3889
964e4306
PA
3890void
3891linux_stop_lwp (struct lwp_info *lwp)
3892{
3893 send_sigstop (lwp);
3894}
3895
0d62e5e8 3896static void
02fc4de7 3897send_sigstop (struct lwp_info *lwp)
0d62e5e8 3898{
bd99dc85 3899 int pid;
0d62e5e8 3900
d86d4aaf 3901 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3902
0d62e5e8
DJ
3903 /* If we already have a pending stop signal for this process, don't
3904 send another. */
54a0b537 3905 if (lwp->stop_expected)
0d62e5e8 3906 {
ae13219e 3907 if (debug_threads)
87ce2a04 3908 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3909
0d62e5e8
DJ
3910 return;
3911 }
3912
3913 if (debug_threads)
87ce2a04 3914 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3915
d50171e4 3916 lwp->stop_expected = 1;
bd99dc85 3917 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3918}
3919
df3e4dbe
SM
3920static void
3921send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3922{
d86d4aaf 3923 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3924
7984d532
PA
3925 /* Ignore EXCEPT. */
3926 if (lwp == except)
df3e4dbe 3927 return;
7984d532 3928
02fc4de7 3929 if (lwp->stopped)
df3e4dbe 3930 return;
02fc4de7
PA
3931
3932 send_sigstop (lwp);
7984d532
PA
3933}
3934
3935/* Increment the suspend count of an LWP, and stop it, if not stopped
3936 yet. */
df3e4dbe
SM
3937static void
3938suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3939{
d86d4aaf 3940 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3941
3942 /* Ignore EXCEPT. */
3943 if (lwp == except)
df3e4dbe 3944 return;
7984d532 3945
863d01bd 3946 lwp_suspended_inc (lwp);
7984d532 3947
df3e4dbe 3948 send_sigstop (thread, except);
02fc4de7
PA
3949}
3950
95954743
PA
3951static void
3952mark_lwp_dead (struct lwp_info *lwp, int wstat)
3953{
95954743
PA
3954 /* Store the exit status for later. */
3955 lwp->status_pending_p = 1;
3956 lwp->status_pending = wstat;
3957
00db26fa
PA
3958 /* Store in waitstatus as well, as there's nothing else to process
3959 for this event. */
3960 if (WIFEXITED (wstat))
3961 {
3962 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3963 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3964 }
3965 else if (WIFSIGNALED (wstat))
3966 {
3967 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3968 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3969 }
3970
95954743
PA
3971 /* Prevent trying to stop it. */
3972 lwp->stopped = 1;
3973
3974 /* No further stops are expected from a dead lwp. */
3975 lwp->stop_expected = 0;
3976}
3977
00db26fa
PA
3978/* Return true if LWP has exited already, and has a pending exit event
3979 to report to GDB. */
3980
3981static int
3982lwp_is_marked_dead (struct lwp_info *lwp)
3983{
3984 return (lwp->status_pending_p
3985 && (WIFEXITED (lwp->status_pending)
3986 || WIFSIGNALED (lwp->status_pending)));
3987}
3988
fa96cb38
PA
3989/* Wait for all children to stop for the SIGSTOPs we just queued. */
3990
0d62e5e8 3991static void
fa96cb38 3992wait_for_sigstop (void)
0d62e5e8 3993{
0bfdf32f 3994 struct thread_info *saved_thread;
95954743 3995 ptid_t saved_tid;
fa96cb38
PA
3996 int wstat;
3997 int ret;
0d62e5e8 3998
0bfdf32f
GB
3999 saved_thread = current_thread;
4000 if (saved_thread != NULL)
9c80ecd6 4001 saved_tid = saved_thread->id;
bd99dc85 4002 else
95954743 4003 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 4004
d50171e4 4005 if (debug_threads)
fa96cb38 4006 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 4007
fa96cb38
PA
4008 /* Passing NULL_PTID as filter indicates we want all events to be
4009 left pending. Eventually this returns when there are no
4010 unwaited-for children left. */
4011 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4012 &wstat, __WALL);
4013 gdb_assert (ret == -1);
0d62e5e8 4014
13d3d99b 4015 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 4016 current_thread = saved_thread;
0d62e5e8
DJ
4017 else
4018 {
4019 if (debug_threads)
87ce2a04 4020 debug_printf ("Previously current thread died.\n");
0d62e5e8 4021
f0db101d
PA
4022 /* We can't change the current inferior behind GDB's back,
4023 otherwise, a subsequent command may apply to the wrong
4024 process. */
4025 current_thread = NULL;
0d62e5e8
DJ
4026 }
4027}
4028
fcb056a5 4029/* Returns true if THREAD is stopped in a jump pad, and we can't
fa593d66
PA
4030 move it out, because we need to report the stop event to GDB. For
4031 example, if the user puts a breakpoint in the jump pad, it's
4032 because she wants to debug it. */
4033
fcb056a5
SM
4034static bool
4035stuck_in_jump_pad_callback (thread_info *thread)
fa593d66 4036{
d86d4aaf 4037 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4038
863d01bd
PA
4039 if (lwp->suspended != 0)
4040 {
4041 internal_error (__FILE__, __LINE__,
4042 "LWP %ld is suspended, suspended=%d\n",
4043 lwpid_of (thread), lwp->suspended);
4044 }
fa593d66
PA
4045 gdb_assert (lwp->stopped);
4046
4047 /* Allow debugging the jump pad, gdb_collect, etc.. */
4048 return (supports_fast_tracepoints ()
58b4daa5 4049 && agent_loaded_p ()
fa593d66 4050 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4051 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 4052 || thread->last_resume_kind == resume_step)
229d26fc
SM
4053 && (linux_fast_tracepoint_collecting (lwp, NULL)
4054 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
4055}
4056
4057static void
9c80ecd6 4058move_out_of_jump_pad_callback (thread_info *thread)
fa593d66 4059{
f0ce0d3a 4060 struct thread_info *saved_thread;
d86d4aaf 4061 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
4062 int *wstat;
4063
863d01bd
PA
4064 if (lwp->suspended != 0)
4065 {
4066 internal_error (__FILE__, __LINE__,
4067 "LWP %ld is suspended, suspended=%d\n",
4068 lwpid_of (thread), lwp->suspended);
4069 }
fa593d66
PA
4070 gdb_assert (lwp->stopped);
4071
f0ce0d3a
PA
4072 /* For gdb_breakpoint_here. */
4073 saved_thread = current_thread;
4074 current_thread = thread;
4075
fa593d66
PA
4076 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4077
4078 /* Allow debugging the jump pad, gdb_collect, etc. */
4079 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4080 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4081 && thread->last_resume_kind != resume_step
4082 && maybe_move_out_of_jump_pad (lwp, wstat))
4083 {
4084 if (debug_threads)
87ce2a04 4085 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4086 lwpid_of (thread));
fa593d66
PA
4087
4088 if (wstat)
4089 {
4090 lwp->status_pending_p = 0;
4091 enqueue_one_deferred_signal (lwp, wstat);
4092
4093 if (debug_threads)
87ce2a04
DE
4094 debug_printf ("Signal %d for LWP %ld deferred "
4095 "(in jump pad)\n",
d86d4aaf 4096 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4097 }
4098
4099 linux_resume_one_lwp (lwp, 0, 0, NULL);
4100 }
4101 else
863d01bd 4102 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4103
4104 current_thread = saved_thread;
fa593d66
PA
4105}
4106
5a6b0a41
SM
4107static bool
4108lwp_running (thread_info *thread)
fa593d66 4109{
d86d4aaf 4110 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4111
00db26fa 4112 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4113 return false;
4114
4115 return !lwp->stopped;
fa593d66
PA
4116}
4117
7984d532
PA
4118/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4119 If SUSPEND, then also increase the suspend count of every LWP,
4120 except EXCEPT. */
4121
0d62e5e8 4122static void
7984d532 4123stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 4124{
bde24c0a
PA
4125 /* Should not be called recursively. */
4126 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4127
87ce2a04
DE
4128 if (debug_threads)
4129 {
4130 debug_enter ();
4131 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4132 suspend ? "stop-and-suspend" : "stop",
4133 except != NULL
d86d4aaf 4134 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4135 : "none");
4136 }
4137
bde24c0a
PA
4138 stopping_threads = (suspend
4139 ? STOPPING_AND_SUSPENDING_THREADS
4140 : STOPPING_THREADS);
7984d532
PA
4141
4142 if (suspend)
df3e4dbe
SM
4143 for_each_thread ([&] (thread_info *thread)
4144 {
4145 suspend_and_send_sigstop (thread, except);
4146 });
7984d532 4147 else
df3e4dbe
SM
4148 for_each_thread ([&] (thread_info *thread)
4149 {
4150 send_sigstop (thread, except);
4151 });
4152
fa96cb38 4153 wait_for_sigstop ();
bde24c0a 4154 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4155
4156 if (debug_threads)
4157 {
4158 debug_printf ("stop_all_lwps done, setting stopping_threads "
4159 "back to !stopping\n");
4160 debug_exit ();
4161 }
0d62e5e8
DJ
4162}
4163
863d01bd
PA
4164/* Enqueue one signal in the chain of signals which need to be
4165 delivered to this process on next resume. */
4166
4167static void
4168enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4169{
8d749320 4170 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4171
863d01bd
PA
4172 p_sig->prev = lwp->pending_signals;
4173 p_sig->signal = signal;
4174 if (info == NULL)
4175 memset (&p_sig->info, 0, sizeof (siginfo_t));
4176 else
4177 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4178 lwp->pending_signals = p_sig;
4179}
4180
fa5308bd
AT
4181/* Install breakpoints for software single stepping. */
4182
4183static void
4184install_software_single_step_breakpoints (struct lwp_info *lwp)
4185{
984a2c04
YQ
4186 struct thread_info *thread = get_lwp_thread (lwp);
4187 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4188
4189 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4190
984a2c04 4191 current_thread = thread;
a0ff9e1a 4192 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
fa5308bd 4193
a0ff9e1a 4194 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4195 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4196}
4197
7fe5e27e
AT
4198/* Single step via hardware or software single step.
4199 Return 1 if hardware single stepping, 0 if software single stepping
4200 or can't single step. */
4201
4202static int
4203single_step (struct lwp_info* lwp)
4204{
4205 int step = 0;
4206
4207 if (can_hardware_single_step ())
4208 {
4209 step = 1;
4210 }
4211 else if (can_software_single_step ())
4212 {
4213 install_software_single_step_breakpoints (lwp);
4214 step = 0;
4215 }
4216 else
4217 {
4218 if (debug_threads)
4219 debug_printf ("stepping is not implemented on this target");
4220 }
4221
4222 return step;
4223}
4224
35ac8b3e 4225/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4226 finish a fast tracepoint collect. Since signal can be delivered in
4227 the step-over, the program may go to signal handler and trap again
4228 after return from the signal handler. We can live with the spurious
4229 double traps. */
35ac8b3e
YQ
4230
4231static int
4232lwp_signal_can_be_delivered (struct lwp_info *lwp)
4233{
229d26fc
SM
4234 return (lwp->collecting_fast_tracepoint
4235 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4236}
4237
23f238d3
PA
4238/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4239 SIGNAL is nonzero, give it that signal. */
da6d8c04 4240
ce3a066d 4241static void
23f238d3
PA
4242linux_resume_one_lwp_throw (struct lwp_info *lwp,
4243 int step, int signal, siginfo_t *info)
da6d8c04 4244{
d86d4aaf 4245 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4246 struct thread_info *saved_thread;
82075af2 4247 int ptrace_request;
c06cbd92
YQ
4248 struct process_info *proc = get_thread_process (thread);
4249
4250 /* Note that target description may not be initialised
4251 (proc->tdesc == NULL) at this point because the program hasn't
4252 stopped at the first instruction yet. It means GDBserver skips
4253 the extra traps from the wrapper program (see option --wrapper).
4254 Code in this function that requires register access should be
4255 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4256
54a0b537 4257 if (lwp->stopped == 0)
0d62e5e8
DJ
4258 return;
4259
65706a29
PA
4260 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4261
229d26fc
SM
4262 fast_tpoint_collect_result fast_tp_collecting
4263 = lwp->collecting_fast_tracepoint;
fa593d66 4264
229d26fc
SM
4265 gdb_assert (!stabilizing_threads
4266 || (fast_tp_collecting
4267 != fast_tpoint_collect_result::not_collecting));
fa593d66 4268
219f2f23
PA
4269 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4270 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4271 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4272 {
4273 /* Collecting 'while-stepping' actions doesn't make sense
4274 anymore. */
d86d4aaf 4275 release_while_stepping_state_list (thread);
219f2f23
PA
4276 }
4277
0d62e5e8 4278 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4279 signal. Also enqueue the signal if it can't be delivered to the
4280 inferior right now. */
0d62e5e8 4281 if (signal != 0
fa593d66
PA
4282 && (lwp->status_pending_p
4283 || lwp->pending_signals != NULL
35ac8b3e 4284 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4285 {
4286 enqueue_pending_signal (lwp, signal, info);
4287
4288 /* Postpone any pending signal. It was enqueued above. */
4289 signal = 0;
4290 }
0d62e5e8 4291
d50171e4
PA
4292 if (lwp->status_pending_p)
4293 {
4294 if (debug_threads)
94610ec4 4295 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4296 " has pending status\n",
94610ec4 4297 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4298 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4299 return;
4300 }
0d62e5e8 4301
0bfdf32f
GB
4302 saved_thread = current_thread;
4303 current_thread = thread;
0d62e5e8 4304
0d62e5e8
DJ
4305 /* This bit needs some thinking about. If we get a signal that
4306 we must report while a single-step reinsert is still pending,
4307 we often end up resuming the thread. It might be better to
4308 (ew) allow a stack of pending events; then we could be sure that
4309 the reinsert happened right away and not lose any signals.
4310
4311 Making this stack would also shrink the window in which breakpoints are
54a0b537 4312 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4313 complete correctness, so it won't solve that problem. It may be
4314 worthwhile just to solve this one, however. */
54a0b537 4315 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4316 {
4317 if (debug_threads)
87ce2a04
DE
4318 debug_printf (" pending reinsert at 0x%s\n",
4319 paddress (lwp->bp_reinsert));
d50171e4 4320
85e00e85 4321 if (can_hardware_single_step ())
d50171e4 4322 {
229d26fc 4323 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4324 {
4325 if (step == 0)
9986ba08 4326 warning ("BAD - reinserting but not stepping.");
fa593d66 4327 if (lwp->suspended)
9986ba08
PA
4328 warning ("BAD - reinserting and suspended(%d).",
4329 lwp->suspended);
fa593d66 4330 }
d50171e4 4331 }
f79b145d
YQ
4332
4333 step = maybe_hw_step (thread);
0d62e5e8
DJ
4334 }
4335
229d26fc 4336 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4337 {
4338 if (debug_threads)
87ce2a04
DE
4339 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4340 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4341 lwpid_of (thread));
fa593d66 4342 }
229d26fc 4343 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4344 {
4345 if (debug_threads)
87ce2a04
DE
4346 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4347 " single-stepping\n",
d86d4aaf 4348 lwpid_of (thread));
fa593d66
PA
4349
4350 if (can_hardware_single_step ())
4351 step = 1;
4352 else
38e08fca
GB
4353 {
4354 internal_error (__FILE__, __LINE__,
4355 "moving out of jump pad single-stepping"
4356 " not implemented on this target");
4357 }
fa593d66
PA
4358 }
4359
219f2f23
PA
4360 /* If we have while-stepping actions in this thread set it stepping.
4361 If we have a signal to deliver, it may or may not be set to
4362 SIG_IGN, we don't know. Assume so, and allow collecting
4363 while-stepping into a signal handler. A possible smart thing to
4364 do would be to set an internal breakpoint at the signal return
4365 address, continue, and carry on catching this while-stepping
4366 action only when that breakpoint is hit. A future
4367 enhancement. */
7fe5e27e 4368 if (thread->while_stepping != NULL)
219f2f23
PA
4369 {
4370 if (debug_threads)
87ce2a04 4371 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4372 lwpid_of (thread));
7fe5e27e
AT
4373
4374 step = single_step (lwp);
219f2f23
PA
4375 }
4376
c06cbd92 4377 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4378 {
0bfdf32f 4379 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4380
4381 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4382
4383 if (debug_threads)
4384 {
4385 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4386 (long) lwp->stop_pc);
4387 }
0d62e5e8
DJ
4388 }
4389
35ac8b3e
YQ
4390 /* If we have pending signals, consume one if it can be delivered to
4391 the inferior. */
4392 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4393 {
4394 struct pending_signals **p_sig;
4395
54a0b537 4396 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4397 while ((*p_sig)->prev != NULL)
4398 p_sig = &(*p_sig)->prev;
4399
4400 signal = (*p_sig)->signal;
32ca6d61 4401 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4402 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4403 &(*p_sig)->info);
32ca6d61 4404
0d62e5e8
DJ
4405 free (*p_sig);
4406 *p_sig = NULL;
4407 }
4408
94610ec4
YQ
4409 if (debug_threads)
4410 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4411 lwpid_of (thread), step ? "step" : "continue", signal,
4412 lwp->stop_expected ? "expected" : "not expected");
4413
aa5ca48f
DE
4414 if (the_low_target.prepare_to_resume != NULL)
4415 the_low_target.prepare_to_resume (lwp);
4416
d86d4aaf 4417 regcache_invalidate_thread (thread);
da6d8c04 4418 errno = 0;
54a0b537 4419 lwp->stepping = step;
82075af2
JS
4420 if (step)
4421 ptrace_request = PTRACE_SINGLESTEP;
4422 else if (gdb_catching_syscalls_p (lwp))
4423 ptrace_request = PTRACE_SYSCALL;
4424 else
4425 ptrace_request = PTRACE_CONT;
4426 ptrace (ptrace_request,
4427 lwpid_of (thread),
b8e1b30e 4428 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4429 /* Coerce to a uintptr_t first to avoid potential gcc warning
4430 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4431 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4432
0bfdf32f 4433 current_thread = saved_thread;
da6d8c04 4434 if (errno)
23f238d3
PA
4435 perror_with_name ("resuming thread");
4436
4437 /* Successfully resumed. Clear state that no longer makes sense,
4438 and mark the LWP as running. Must not do this before resuming
4439 otherwise if that fails other code will be confused. E.g., we'd
4440 later try to stop the LWP and hang forever waiting for a stop
4441 status. Note that we must not throw after this is cleared,
4442 otherwise handle_zombie_lwp_error would get confused. */
4443 lwp->stopped = 0;
4444 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4445}
4446
4447/* Called when we try to resume a stopped LWP and that errors out. If
4448 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4449 or about to become), discard the error, clear any pending status
4450 the LWP may have, and return true (we'll collect the exit status
4451 soon enough). Otherwise, return false. */
4452
4453static int
4454check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4455{
4456 struct thread_info *thread = get_lwp_thread (lp);
4457
4458 /* If we get an error after resuming the LWP successfully, we'd
4459 confuse !T state for the LWP being gone. */
4460 gdb_assert (lp->stopped);
4461
4462 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4463 because even if ptrace failed with ESRCH, the tracee may be "not
4464 yet fully dead", but already refusing ptrace requests. In that
4465 case the tracee has 'R (Running)' state for a little bit
4466 (observed in Linux 3.18). See also the note on ESRCH in the
4467 ptrace(2) man page. Instead, check whether the LWP has any state
4468 other than ptrace-stopped. */
4469
4470 /* Don't assume anything if /proc/PID/status can't be read. */
4471 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4472 {
23f238d3
PA
4473 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4474 lp->status_pending_p = 0;
4475 return 1;
4476 }
4477 return 0;
4478}
4479
4480/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4481 disappears while we try to resume it. */
3221518c 4482
23f238d3
PA
4483static void
4484linux_resume_one_lwp (struct lwp_info *lwp,
4485 int step, int signal, siginfo_t *info)
4486{
a70b8144 4487 try
23f238d3
PA
4488 {
4489 linux_resume_one_lwp_throw (lwp, step, signal, info);
4490 }
230d2906 4491 catch (const gdb_exception_error &ex)
23f238d3
PA
4492 {
4493 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4494 throw;
3221518c 4495 }
da6d8c04
DJ
4496}
4497
5fdda392
SM
4498/* This function is called once per thread via for_each_thread.
4499 We look up which resume request applies to THREAD and mark it with a
4500 pointer to the appropriate resume request.
5544ad89
DJ
4501
4502 This algorithm is O(threads * resume elements), but resume elements
4503 is small (and will remain small at least until GDB supports thread
4504 suspension). */
ebcf782c 4505
5fdda392
SM
4506static void
4507linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4508{
d86d4aaf 4509 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4510
5fdda392 4511 for (int ndx = 0; ndx < n; ndx++)
95954743 4512 {
5fdda392 4513 ptid_t ptid = resume[ndx].thread;
d7e15655 4514 if (ptid == minus_one_ptid
9c80ecd6 4515 || ptid == thread->id
0c9070b3
YQ
4516 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4517 of PID'. */
e99b03dc 4518 || (ptid.pid () == pid_of (thread)
0e998d96 4519 && (ptid.is_pid ()
e38504b3 4520 || ptid.lwp () == -1)))
95954743 4521 {
5fdda392 4522 if (resume[ndx].kind == resume_stop
8336d594 4523 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4524 {
4525 if (debug_threads)
87ce2a04
DE
4526 debug_printf ("already %s LWP %ld at GDB's request\n",
4527 (thread->last_status.kind
4528 == TARGET_WAITKIND_STOPPED)
4529 ? "stopped"
4530 : "stopping",
d86d4aaf 4531 lwpid_of (thread));
d50171e4
PA
4532
4533 continue;
4534 }
4535
5a04c4cf
PA
4536 /* Ignore (wildcard) resume requests for already-resumed
4537 threads. */
5fdda392 4538 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4539 && thread->last_resume_kind != resume_stop)
4540 {
4541 if (debug_threads)
4542 debug_printf ("already %s LWP %ld at GDB's request\n",
4543 (thread->last_resume_kind
4544 == resume_step)
4545 ? "stepping"
4546 : "continuing",
4547 lwpid_of (thread));
4548 continue;
4549 }
4550
4551 /* Don't let wildcard resumes resume fork children that GDB
4552 does not yet know are new fork children. */
4553 if (lwp->fork_relative != NULL)
4554 {
5a04c4cf
PA
4555 struct lwp_info *rel = lwp->fork_relative;
4556
4557 if (rel->status_pending_p
4558 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4559 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4560 {
4561 if (debug_threads)
4562 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4563 lwpid_of (thread));
4564 continue;
4565 }
4566 }
4567
4568 /* If the thread has a pending event that has already been
4569 reported to GDBserver core, but GDB has not pulled the
4570 event out of the vStopped queue yet, likewise, ignore the
4571 (wildcard) resume request. */
9c80ecd6 4572 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4573 {
4574 if (debug_threads)
4575 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4576 lwpid_of (thread));
4577 continue;
4578 }
4579
5fdda392 4580 lwp->resume = &resume[ndx];
8336d594 4581 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4582
c2d6af84
PA
4583 lwp->step_range_start = lwp->resume->step_range_start;
4584 lwp->step_range_end = lwp->resume->step_range_end;
4585
fa593d66
PA
4586 /* If we had a deferred signal to report, dequeue one now.
4587 This can happen if LWP gets more than one signal while
4588 trying to get out of a jump pad. */
4589 if (lwp->stopped
4590 && !lwp->status_pending_p
4591 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4592 {
4593 lwp->status_pending_p = 1;
4594
4595 if (debug_threads)
87ce2a04
DE
4596 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4597 "leaving status pending.\n",
d86d4aaf
DE
4598 WSTOPSIG (lwp->status_pending),
4599 lwpid_of (thread));
fa593d66
PA
4600 }
4601
5fdda392 4602 return;
95954743
PA
4603 }
4604 }
2bd7c093
PA
4605
4606 /* No resume action for this thread. */
4607 lwp->resume = NULL;
5544ad89
DJ
4608}
4609
8f86d7aa
SM
4610/* find_thread callback for linux_resume. Return true if this lwp has an
4611 interesting status pending. */
5544ad89 4612
25c28b4d
SM
4613static bool
4614resume_status_pending_p (thread_info *thread)
5544ad89 4615{
d86d4aaf 4616 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4617
bd99dc85
PA
4618 /* LWPs which will not be resumed are not interesting, because
4619 we might not wait for them next time through linux_wait. */
2bd7c093 4620 if (lwp->resume == NULL)
25c28b4d 4621 return false;
64386c31 4622
25c28b4d 4623 return thread_still_has_status_pending_p (thread);
d50171e4
PA
4624}
4625
4626/* Return 1 if this lwp that GDB wants running is stopped at an
4627 internal breakpoint that we need to step over. It assumes that any
4628 required STOP_PC adjustment has already been propagated to the
4629 inferior's regcache. */
4630
eca55aec
SM
4631static bool
4632need_step_over_p (thread_info *thread)
d50171e4 4633{
d86d4aaf 4634 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4635 struct thread_info *saved_thread;
d50171e4 4636 CORE_ADDR pc;
c06cbd92
YQ
4637 struct process_info *proc = get_thread_process (thread);
4638
4639 /* GDBserver is skipping the extra traps from the wrapper program,
4640 don't have to do step over. */
4641 if (proc->tdesc == NULL)
eca55aec 4642 return false;
d50171e4
PA
4643
4644 /* LWPs which will not be resumed are not interesting, because we
4645 might not wait for them next time through linux_wait. */
4646
4647 if (!lwp->stopped)
4648 {
4649 if (debug_threads)
87ce2a04 4650 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4651 lwpid_of (thread));
eca55aec 4652 return false;
d50171e4
PA
4653 }
4654
8336d594 4655 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4656 {
4657 if (debug_threads)
87ce2a04
DE
4658 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4659 " stopped\n",
d86d4aaf 4660 lwpid_of (thread));
eca55aec 4661 return false;
d50171e4
PA
4662 }
4663
7984d532
PA
4664 gdb_assert (lwp->suspended >= 0);
4665
4666 if (lwp->suspended)
4667 {
4668 if (debug_threads)
87ce2a04 4669 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4670 lwpid_of (thread));
eca55aec 4671 return false;
7984d532
PA
4672 }
4673
bd99dc85 4674 if (lwp->status_pending_p)
d50171e4
PA
4675 {
4676 if (debug_threads)
87ce2a04
DE
4677 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4678 " status.\n",
d86d4aaf 4679 lwpid_of (thread));
eca55aec 4680 return false;
d50171e4
PA
4681 }
4682
4683 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4684 or we have. */
4685 pc = get_pc (lwp);
4686
4687 /* If the PC has changed since we stopped, then don't do anything,
4688 and let the breakpoint/tracepoint be hit. This happens if, for
4689 instance, GDB handled the decr_pc_after_break subtraction itself,
4690 GDB is OOL stepping this thread, or the user has issued a "jump"
4691 command, or poked thread's registers herself. */
4692 if (pc != lwp->stop_pc)
4693 {
4694 if (debug_threads)
87ce2a04
DE
4695 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4696 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4697 lwpid_of (thread),
4698 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4699 return false;
d50171e4
PA
4700 }
4701
484b3c32
YQ
4702 /* On software single step target, resume the inferior with signal
4703 rather than stepping over. */
4704 if (can_software_single_step ()
4705 && lwp->pending_signals != NULL
4706 && lwp_signal_can_be_delivered (lwp))
4707 {
4708 if (debug_threads)
4709 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4710 " signals.\n",
4711 lwpid_of (thread));
4712
eca55aec 4713 return false;
484b3c32
YQ
4714 }
4715
0bfdf32f
GB
4716 saved_thread = current_thread;
4717 current_thread = thread;
d50171e4 4718
8b07ae33 4719 /* We can only step over breakpoints we know about. */
fa593d66 4720 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4721 {
8b07ae33 4722 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4723 though. If the condition is being evaluated on the target's side
4724 and it evaluate to false, step over this breakpoint as well. */
4725 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4726 && gdb_condition_true_at_breakpoint (pc)
4727 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4728 {
4729 if (debug_threads)
87ce2a04
DE
4730 debug_printf ("Need step over [LWP %ld]? yes, but found"
4731 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4732 lwpid_of (thread), paddress (pc));
d50171e4 4733
0bfdf32f 4734 current_thread = saved_thread;
eca55aec 4735 return false;
8b07ae33
PA
4736 }
4737 else
4738 {
4739 if (debug_threads)
87ce2a04
DE
4740 debug_printf ("Need step over [LWP %ld]? yes, "
4741 "found breakpoint at 0x%s\n",
d86d4aaf 4742 lwpid_of (thread), paddress (pc));
d50171e4 4743
8b07ae33 4744 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4745 that find_thread stops looking. */
0bfdf32f 4746 current_thread = saved_thread;
8b07ae33 4747
eca55aec 4748 return true;
8b07ae33 4749 }
d50171e4
PA
4750 }
4751
0bfdf32f 4752 current_thread = saved_thread;
d50171e4
PA
4753
4754 if (debug_threads)
87ce2a04
DE
4755 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4756 " at 0x%s\n",
d86d4aaf 4757 lwpid_of (thread), paddress (pc));
c6ecbae5 4758
eca55aec 4759 return false;
5544ad89
DJ
4760}
4761
d50171e4
PA
4762/* Start a step-over operation on LWP. When LWP stopped at a
4763 breakpoint, to make progress, we need to remove the breakpoint out
4764 of the way. If we let other threads run while we do that, they may
4765 pass by the breakpoint location and miss hitting it. To avoid
4766 that, a step-over momentarily stops all threads while LWP is
c40c8d4b
YQ
4767 single-stepped by either hardware or software while the breakpoint
4768 is temporarily uninserted from the inferior. When the single-step
4769 finishes, we reinsert the breakpoint, and let all threads that are
4770 supposed to be running, run again. */
d50171e4
PA
4771
4772static int
4773start_step_over (struct lwp_info *lwp)
4774{
d86d4aaf 4775 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4776 struct thread_info *saved_thread;
d50171e4
PA
4777 CORE_ADDR pc;
4778 int step;
4779
4780 if (debug_threads)
87ce2a04 4781 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4782 lwpid_of (thread));
d50171e4 4783
7984d532 4784 stop_all_lwps (1, lwp);
863d01bd
PA
4785
4786 if (lwp->suspended != 0)
4787 {
4788 internal_error (__FILE__, __LINE__,
4789 "LWP %ld suspended=%d\n", lwpid_of (thread),
4790 lwp->suspended);
4791 }
d50171e4
PA
4792
4793 if (debug_threads)
87ce2a04 4794 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4795
4796 /* Note, we should always reach here with an already adjusted PC,
4797 either by GDB (if we're resuming due to GDB's request), or by our
4798 caller, if we just finished handling an internal breakpoint GDB
4799 shouldn't care about. */
4800 pc = get_pc (lwp);
4801
0bfdf32f
GB
4802 saved_thread = current_thread;
4803 current_thread = thread;
d50171e4
PA
4804
4805 lwp->bp_reinsert = pc;
4806 uninsert_breakpoints_at (pc);
fa593d66 4807 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4808
7fe5e27e 4809 step = single_step (lwp);
d50171e4 4810
0bfdf32f 4811 current_thread = saved_thread;
d50171e4
PA
4812
4813 linux_resume_one_lwp (lwp, step, 0, NULL);
4814
4815 /* Require next event from this LWP. */
9c80ecd6 4816 step_over_bkpt = thread->id;
d50171e4
PA
4817 return 1;
4818}
4819
4820/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3b9a79ef 4821 start_step_over, if still there, and delete any single-step
d50171e4
PA
4822 breakpoints we've set, on non hardware single-step targets. */
4823
4824static int
4825finish_step_over (struct lwp_info *lwp)
4826{
4827 if (lwp->bp_reinsert != 0)
4828 {
f79b145d
YQ
4829 struct thread_info *saved_thread = current_thread;
4830
d50171e4 4831 if (debug_threads)
87ce2a04 4832 debug_printf ("Finished step over.\n");
d50171e4 4833
f79b145d
YQ
4834 current_thread = get_lwp_thread (lwp);
4835
d50171e4
PA
4836 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4837 may be no breakpoint to reinsert there by now. */
4838 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4839 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4840
4841 lwp->bp_reinsert = 0;
4842
3b9a79ef
YQ
4843 /* Delete any single-step breakpoints. No longer needed. We
4844 don't have to worry about other threads hitting this trap,
4845 and later not being able to explain it, because we were
4846 stepping over a breakpoint, and we hold all threads but
4847 LWP stopped while doing that. */
d50171e4 4848 if (!can_hardware_single_step ())
f79b145d 4849 {
3b9a79ef
YQ
4850 gdb_assert (has_single_step_breakpoints (current_thread));
4851 delete_single_step_breakpoints (current_thread);
f79b145d 4852 }
d50171e4
PA
4853
4854 step_over_bkpt = null_ptid;
f79b145d 4855 current_thread = saved_thread;
d50171e4
PA
4856 return 1;
4857 }
4858 else
4859 return 0;
4860}
4861
863d01bd
PA
4862/* If there's a step over in progress, wait until all threads stop
4863 (that is, until the stepping thread finishes its step), and
4864 unsuspend all lwps. The stepping thread ends with its status
4865 pending, which is processed later when we get back to processing
4866 events. */
4867
4868static void
4869complete_ongoing_step_over (void)
4870{
d7e15655 4871 if (step_over_bkpt != null_ptid)
863d01bd
PA
4872 {
4873 struct lwp_info *lwp;
4874 int wstat;
4875 int ret;
4876
4877 if (debug_threads)
4878 debug_printf ("detach: step over in progress, finish it first\n");
4879
4880 /* Passing NULL_PTID as filter indicates we want all events to
4881 be left pending. Eventually this returns when there are no
4882 unwaited-for children left. */
4883 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4884 &wstat, __WALL);
4885 gdb_assert (ret == -1);
4886
4887 lwp = find_lwp_pid (step_over_bkpt);
4888 if (lwp != NULL)
4889 finish_step_over (lwp);
4890 step_over_bkpt = null_ptid;
4891 unsuspend_all_lwps (lwp);
4892 }
4893}
4894
5544ad89
DJ
4895/* This function is called once per thread. We check the thread's resume
4896 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4897 stopped; and what signal, if any, it should be sent.
5544ad89 4898
bd99dc85
PA
4899 For threads which we aren't explicitly told otherwise, we preserve
4900 the stepping flag; this is used for stepping over gdbserver-placed
4901 breakpoints.
4902
4903 If pending_flags was set in any thread, we queue any needed
4904 signals, since we won't actually resume. We already have a pending
4905 event to report, so we don't need to preserve any step requests;
4906 they should be re-issued if necessary. */
4907
c80825ff
SM
4908static void
4909linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
5544ad89 4910{
d86d4aaf 4911 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4912 int leave_pending;
5544ad89 4913
2bd7c093 4914 if (lwp->resume == NULL)
c80825ff 4915 return;
5544ad89 4916
bd99dc85 4917 if (lwp->resume->kind == resume_stop)
5544ad89 4918 {
bd99dc85 4919 if (debug_threads)
d86d4aaf 4920 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4921
4922 if (!lwp->stopped)
4923 {
4924 if (debug_threads)
d86d4aaf 4925 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4926
d50171e4
PA
4927 /* Stop the thread, and wait for the event asynchronously,
4928 through the event loop. */
02fc4de7 4929 send_sigstop (lwp);
bd99dc85
PA
4930 }
4931 else
4932 {
4933 if (debug_threads)
87ce2a04 4934 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4935 lwpid_of (thread));
d50171e4
PA
4936
4937 /* The LWP may have been stopped in an internal event that
4938 was not meant to be notified back to GDB (e.g., gdbserver
4939 breakpoint), so we should be reporting a stop event in
4940 this case too. */
4941
4942 /* If the thread already has a pending SIGSTOP, this is a
4943 no-op. Otherwise, something later will presumably resume
4944 the thread and this will cause it to cancel any pending
4945 operation, due to last_resume_kind == resume_stop. If
4946 the thread already has a pending status to report, we
4947 will still report it the next time we wait - see
4948 status_pending_p_callback. */
1a981360
PA
4949
4950 /* If we already have a pending signal to report, then
4951 there's no need to queue a SIGSTOP, as this means we're
4952 midway through moving the LWP out of the jumppad, and we
4953 will report the pending signal as soon as that is
4954 finished. */
4955 if (lwp->pending_signals_to_report == NULL)
4956 send_sigstop (lwp);
bd99dc85 4957 }
32ca6d61 4958
bd99dc85
PA
4959 /* For stop requests, we're done. */
4960 lwp->resume = NULL;
fc7238bb 4961 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4962 return;
5544ad89
DJ
4963 }
4964
bd99dc85 4965 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4966 then don't resume it - we can just report the pending status.
4967 Likewise if it is suspended, because e.g., another thread is
4968 stepping past a breakpoint. Make sure to queue any signals that
4969 would otherwise be sent. In all-stop mode, we do this decision
4970 based on if *any* thread has a pending status. If there's a
4971 thread that needs the step-over-breakpoint dance, then don't
4972 resume any other thread but that particular one. */
4973 leave_pending = (lwp->suspended
4974 || lwp->status_pending_p
4975 || leave_all_stopped);
5544ad89 4976
0e9a339e
YQ
4977 /* If we have a new signal, enqueue the signal. */
4978 if (lwp->resume->sig != 0)
4979 {
4980 siginfo_t info, *info_p;
4981
4982 /* If this is the same signal we were previously stopped by,
4983 make sure to queue its siginfo. */
4984 if (WIFSTOPPED (lwp->last_status)
4985 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4986 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4987 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4988 info_p = &info;
4989 else
4990 info_p = NULL;
4991
4992 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4993 }
4994
d50171e4 4995 if (!leave_pending)
bd99dc85
PA
4996 {
4997 if (debug_threads)
d86d4aaf 4998 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4999
9c80ecd6 5000 proceed_one_lwp (thread, NULL);
bd99dc85
PA
5001 }
5002 else
5003 {
5004 if (debug_threads)
d86d4aaf 5005 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 5006 }
5544ad89 5007
fc7238bb 5008 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 5009 lwp->resume = NULL;
0d62e5e8
DJ
5010}
5011
0e4d7e35
TBA
5012void
5013linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 5014{
d86d4aaf 5015 struct thread_info *need_step_over = NULL;
c6ecbae5 5016
87ce2a04
DE
5017 if (debug_threads)
5018 {
5019 debug_enter ();
5020 debug_printf ("linux_resume:\n");
5021 }
5022
5fdda392
SM
5023 for_each_thread ([&] (thread_info *thread)
5024 {
5025 linux_set_resume_request (thread, resume_info, n);
5026 });
5544ad89 5027
d50171e4
PA
5028 /* If there is a thread which would otherwise be resumed, which has
5029 a pending status, then don't resume any threads - we can just
5030 report the pending status. Make sure to queue any signals that
5031 would otherwise be sent. In non-stop mode, we'll apply this
5032 logic to each thread individually. We consume all pending events
5033 before considering to start a step-over (in all-stop). */
25c28b4d 5034 bool any_pending = false;
bd99dc85 5035 if (!non_stop)
25c28b4d 5036 any_pending = find_thread (resume_status_pending_p) != NULL;
d50171e4
PA
5037
5038 /* If there is a thread which would otherwise be resumed, which is
5039 stopped at a breakpoint that needs stepping over, then don't
5040 resume any threads - have it step over the breakpoint with all
5041 other threads stopped, then resume all threads again. Make sure
5042 to queue any signals that would otherwise be delivered or
5043 queued. */
5044 if (!any_pending && supports_breakpoints ())
eca55aec 5045 need_step_over = find_thread (need_step_over_p);
d50171e4 5046
c80825ff 5047 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
5048
5049 if (debug_threads)
5050 {
5051 if (need_step_over != NULL)
87ce2a04 5052 debug_printf ("Not resuming all, need step over\n");
d50171e4 5053 else if (any_pending)
87ce2a04
DE
5054 debug_printf ("Not resuming, all-stop and found "
5055 "an LWP with pending status\n");
d50171e4 5056 else
87ce2a04 5057 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
5058 }
5059
5060 /* Even if we're leaving threads stopped, queue all signals we'd
5061 otherwise deliver. */
c80825ff
SM
5062 for_each_thread ([&] (thread_info *thread)
5063 {
5064 linux_resume_one_thread (thread, leave_all_stopped);
5065 });
d50171e4
PA
5066
5067 if (need_step_over)
d86d4aaf 5068 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
5069
5070 if (debug_threads)
5071 {
5072 debug_printf ("linux_resume done\n");
5073 debug_exit ();
5074 }
1bebeeca
PA
5075
5076 /* We may have events that were pending that can/should be sent to
5077 the client now. Trigger a linux_wait call. */
5078 if (target_is_async_p ())
5079 async_file_mark ();
d50171e4
PA
5080}
5081
5082/* This function is called once per thread. We check the thread's
5083 last resume request, which will tell us whether to resume, step, or
5084 leave the thread stopped. Any signal the client requested to be
5085 delivered has already been enqueued at this point.
5086
5087 If any thread that GDB wants running is stopped at an internal
5088 breakpoint that needs stepping over, we start a step-over operation
5089 on that particular thread, and leave all others stopped. */
5090
e2b44075
SM
5091static void
5092proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 5093{
d86d4aaf 5094 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
5095 int step;
5096
7984d532 5097 if (lwp == except)
e2b44075 5098 return;
d50171e4
PA
5099
5100 if (debug_threads)
d86d4aaf 5101 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
5102
5103 if (!lwp->stopped)
5104 {
5105 if (debug_threads)
d86d4aaf 5106 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 5107 return;
d50171e4
PA
5108 }
5109
02fc4de7
PA
5110 if (thread->last_resume_kind == resume_stop
5111 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
5112 {
5113 if (debug_threads)
87ce2a04 5114 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 5115 lwpid_of (thread));
e2b44075 5116 return;
d50171e4
PA
5117 }
5118
5119 if (lwp->status_pending_p)
5120 {
5121 if (debug_threads)
87ce2a04 5122 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 5123 lwpid_of (thread));
e2b44075 5124 return;
d50171e4
PA
5125 }
5126
7984d532
PA
5127 gdb_assert (lwp->suspended >= 0);
5128
d50171e4
PA
5129 if (lwp->suspended)
5130 {
5131 if (debug_threads)
d86d4aaf 5132 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 5133 return;
d50171e4
PA
5134 }
5135
1a981360
PA
5136 if (thread->last_resume_kind == resume_stop
5137 && lwp->pending_signals_to_report == NULL
229d26fc
SM
5138 && (lwp->collecting_fast_tracepoint
5139 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
5140 {
5141 /* We haven't reported this LWP as stopped yet (otherwise, the
5142 last_status.kind check above would catch it, and we wouldn't
5143 reach here. This LWP may have been momentarily paused by a
5144 stop_all_lwps call while handling for example, another LWP's
5145 step-over. In that case, the pending expected SIGSTOP signal
5146 that was queued at vCont;t handling time will have already
5147 been consumed by wait_for_sigstop, and so we need to requeue
5148 another one here. Note that if the LWP already has a SIGSTOP
5149 pending, this is a no-op. */
5150
5151 if (debug_threads)
87ce2a04
DE
5152 debug_printf ("Client wants LWP %ld to stop. "
5153 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5154 lwpid_of (thread));
02fc4de7
PA
5155
5156 send_sigstop (lwp);
5157 }
5158
863d01bd
PA
5159 if (thread->last_resume_kind == resume_step)
5160 {
5161 if (debug_threads)
5162 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5163 lwpid_of (thread));
8901d193 5164
3b9a79ef 5165 /* If resume_step is requested by GDB, install single-step
8901d193 5166 breakpoints when the thread is about to be actually resumed if
3b9a79ef
YQ
5167 the single-step breakpoints weren't removed. */
5168 if (can_software_single_step ()
5169 && !has_single_step_breakpoints (thread))
8901d193
YQ
5170 install_software_single_step_breakpoints (lwp);
5171
5172 step = maybe_hw_step (thread);
863d01bd
PA
5173 }
5174 else if (lwp->bp_reinsert != 0)
5175 {
5176 if (debug_threads)
5177 debug_printf (" stepping LWP %ld, reinsert set\n",
5178 lwpid_of (thread));
f79b145d
YQ
5179
5180 step = maybe_hw_step (thread);
863d01bd
PA
5181 }
5182 else
5183 step = 0;
5184
d50171e4 5185 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5186}
5187
e2b44075
SM
5188static void
5189unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
7984d532 5190{
d86d4aaf 5191 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5192
5193 if (lwp == except)
e2b44075 5194 return;
7984d532 5195
863d01bd 5196 lwp_suspended_decr (lwp);
7984d532 5197
e2b44075 5198 proceed_one_lwp (thread, except);
d50171e4
PA
5199}
5200
5201/* When we finish a step-over, set threads running again. If there's
5202 another thread that may need a step-over, now's the time to start
5203 it. Eventually, we'll move all threads past their breakpoints. */
5204
5205static void
5206proceed_all_lwps (void)
5207{
d86d4aaf 5208 struct thread_info *need_step_over;
d50171e4
PA
5209
5210 /* If there is a thread which would otherwise be resumed, which is
5211 stopped at a breakpoint that needs stepping over, then don't
5212 resume any threads - have it step over the breakpoint with all
5213 other threads stopped, then resume all threads again. */
5214
5215 if (supports_breakpoints ())
5216 {
eca55aec 5217 need_step_over = find_thread (need_step_over_p);
d50171e4
PA
5218
5219 if (need_step_over != NULL)
5220 {
5221 if (debug_threads)
87ce2a04
DE
5222 debug_printf ("proceed_all_lwps: found "
5223 "thread %ld needing a step-over\n",
5224 lwpid_of (need_step_over));
d50171e4 5225
d86d4aaf 5226 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5227 return;
5228 }
5229 }
5544ad89 5230
d50171e4 5231 if (debug_threads)
87ce2a04 5232 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5233
e2b44075
SM
5234 for_each_thread ([] (thread_info *thread)
5235 {
5236 proceed_one_lwp (thread, NULL);
5237 });
d50171e4
PA
5238}
5239
5240/* Stopped LWPs that the client wanted to be running, that don't have
5241 pending statuses, are set to run again, except for EXCEPT, if not
5242 NULL. This undoes a stop_all_lwps call. */
5243
5244static void
7984d532 5245unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 5246{
5544ad89
DJ
5247 if (debug_threads)
5248 {
87ce2a04 5249 debug_enter ();
d50171e4 5250 if (except)
87ce2a04 5251 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5252 lwpid_of (get_lwp_thread (except)));
5544ad89 5253 else
87ce2a04 5254 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5255 }
5256
7984d532 5257 if (unsuspend)
e2b44075
SM
5258 for_each_thread ([&] (thread_info *thread)
5259 {
5260 unsuspend_and_proceed_one_lwp (thread, except);
5261 });
7984d532 5262 else
e2b44075
SM
5263 for_each_thread ([&] (thread_info *thread)
5264 {
5265 proceed_one_lwp (thread, except);
5266 });
87ce2a04
DE
5267
5268 if (debug_threads)
5269 {
5270 debug_printf ("unstop_all_lwps done\n");
5271 debug_exit ();
5272 }
0d62e5e8
DJ
5273}
5274
58caa3dc
DJ
5275
5276#ifdef HAVE_LINUX_REGSETS
5277
1faeff08
MR
5278#define use_linux_regsets 1
5279
030031ee
PA
5280/* Returns true if REGSET has been disabled. */
5281
5282static int
5283regset_disabled (struct regsets_info *info, struct regset_info *regset)
5284{
5285 return (info->disabled_regsets != NULL
5286 && info->disabled_regsets[regset - info->regsets]);
5287}
5288
5289/* Disable REGSET. */
5290
5291static void
5292disable_regset (struct regsets_info *info, struct regset_info *regset)
5293{
5294 int dr_offset;
5295
5296 dr_offset = regset - info->regsets;
5297 if (info->disabled_regsets == NULL)
224c3ddb 5298 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5299 info->disabled_regsets[dr_offset] = 1;
5300}
5301
58caa3dc 5302static int
3aee8918
PA
5303regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5304 struct regcache *regcache)
58caa3dc
DJ
5305{
5306 struct regset_info *regset;
e9d25b98 5307 int saw_general_regs = 0;
95954743 5308 int pid;
1570b33e 5309 struct iovec iov;
58caa3dc 5310
0bfdf32f 5311 pid = lwpid_of (current_thread);
28eef672 5312 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5313 {
1570b33e
L
5314 void *buf, *data;
5315 int nt_type, res;
58caa3dc 5316
030031ee 5317 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5318 continue;
58caa3dc 5319
bca929d3 5320 buf = xmalloc (regset->size);
1570b33e
L
5321
5322 nt_type = regset->nt_type;
5323 if (nt_type)
5324 {
5325 iov.iov_base = buf;
5326 iov.iov_len = regset->size;
5327 data = (void *) &iov;
5328 }
5329 else
5330 data = buf;
5331
dfb64f85 5332#ifndef __sparc__
f15f9948 5333 res = ptrace (regset->get_request, pid,
b8e1b30e 5334 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5335#else
1570b33e 5336 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5337#endif
58caa3dc
DJ
5338 if (res < 0)
5339 {
1ef53e6b
AH
5340 if (errno == EIO
5341 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5342 {
1ef53e6b
AH
5343 /* If we get EIO on a regset, or an EINVAL and the regset is
5344 optional, do not try it again for this process mode. */
030031ee 5345 disable_regset (regsets_info, regset);
58caa3dc 5346 }
e5a9158d
AA
5347 else if (errno == ENODATA)
5348 {
5349 /* ENODATA may be returned if the regset is currently
5350 not "active". This can happen in normal operation,
5351 so suppress the warning in this case. */
5352 }
fcd4a73d
YQ
5353 else if (errno == ESRCH)
5354 {
5355 /* At this point, ESRCH should mean the process is
5356 already gone, in which case we simply ignore attempts
5357 to read its registers. */
5358 }
58caa3dc
DJ
5359 else
5360 {
0d62e5e8 5361 char s[256];
95954743
PA
5362 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5363 pid);
0d62e5e8 5364 perror (s);
58caa3dc
DJ
5365 }
5366 }
098dbe61
AA
5367 else
5368 {
5369 if (regset->type == GENERAL_REGS)
5370 saw_general_regs = 1;
5371 regset->store_function (regcache, buf);
5372 }
fdeb2a12 5373 free (buf);
58caa3dc 5374 }
e9d25b98
DJ
5375 if (saw_general_regs)
5376 return 0;
5377 else
5378 return 1;
58caa3dc
DJ
5379}
5380
5381static int
3aee8918
PA
5382regsets_store_inferior_registers (struct regsets_info *regsets_info,
5383 struct regcache *regcache)
58caa3dc
DJ
5384{
5385 struct regset_info *regset;
e9d25b98 5386 int saw_general_regs = 0;
95954743 5387 int pid;
1570b33e 5388 struct iovec iov;
58caa3dc 5389
0bfdf32f 5390 pid = lwpid_of (current_thread);
28eef672 5391 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5392 {
1570b33e
L
5393 void *buf, *data;
5394 int nt_type, res;
58caa3dc 5395
feea5f36
AA
5396 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5397 || regset->fill_function == NULL)
28eef672 5398 continue;
58caa3dc 5399
bca929d3 5400 buf = xmalloc (regset->size);
545587ee
DJ
5401
5402 /* First fill the buffer with the current register set contents,
5403 in case there are any items in the kernel's regset that are
5404 not in gdbserver's regcache. */
1570b33e
L
5405
5406 nt_type = regset->nt_type;
5407 if (nt_type)
5408 {
5409 iov.iov_base = buf;
5410 iov.iov_len = regset->size;
5411 data = (void *) &iov;
5412 }
5413 else
5414 data = buf;
5415
dfb64f85 5416#ifndef __sparc__
f15f9948 5417 res = ptrace (regset->get_request, pid,
b8e1b30e 5418 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5419#else
689cc2ae 5420 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5421#endif
545587ee
DJ
5422
5423 if (res == 0)
5424 {
5425 /* Then overlay our cached registers on that. */
442ea881 5426 regset->fill_function (regcache, buf);
545587ee
DJ
5427
5428 /* Only now do we write the register set. */
dfb64f85 5429#ifndef __sparc__
f15f9948 5430 res = ptrace (regset->set_request, pid,
b8e1b30e 5431 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5432#else
1570b33e 5433 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5434#endif
545587ee
DJ
5435 }
5436
58caa3dc
DJ
5437 if (res < 0)
5438 {
1ef53e6b
AH
5439 if (errno == EIO
5440 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5441 {
1ef53e6b
AH
5442 /* If we get EIO on a regset, or an EINVAL and the regset is
5443 optional, do not try it again for this process mode. */
030031ee 5444 disable_regset (regsets_info, regset);
58caa3dc 5445 }
3221518c
UW
5446 else if (errno == ESRCH)
5447 {
1b3f6016
PA
5448 /* At this point, ESRCH should mean the process is
5449 already gone, in which case we simply ignore attempts
5450 to change its registers. See also the related
5451 comment in linux_resume_one_lwp. */
fdeb2a12 5452 free (buf);
3221518c
UW
5453 return 0;
5454 }
58caa3dc
DJ
5455 else
5456 {
ce3a066d 5457 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5458 }
5459 }
e9d25b98
DJ
5460 else if (regset->type == GENERAL_REGS)
5461 saw_general_regs = 1;
09ec9b38 5462 free (buf);
58caa3dc 5463 }
e9d25b98
DJ
5464 if (saw_general_regs)
5465 return 0;
5466 else
5467 return 1;
58caa3dc
DJ
5468}
5469
1faeff08 5470#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5471
1faeff08 5472#define use_linux_regsets 0
3aee8918
PA
5473#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5474#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5475
58caa3dc 5476#endif
1faeff08
MR
5477
5478/* Return 1 if register REGNO is supported by one of the regset ptrace
5479 calls or 0 if it has to be transferred individually. */
5480
5481static int
3aee8918 5482linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5483{
5484 unsigned char mask = 1 << (regno % 8);
5485 size_t index = regno / 8;
5486
5487 return (use_linux_regsets
3aee8918
PA
5488 && (regs_info->regset_bitmap == NULL
5489 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5490}
5491
58caa3dc 5492#ifdef HAVE_LINUX_USRREGS
1faeff08 5493
5b3da067 5494static int
3aee8918 5495register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5496{
5497 int addr;
5498
3aee8918 5499 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5500 error ("Invalid register number %d.", regnum);
5501
3aee8918 5502 addr = usrregs->regmap[regnum];
1faeff08
MR
5503
5504 return addr;
5505}
5506
5507/* Fetch one register. */
5508static void
3aee8918
PA
5509fetch_register (const struct usrregs_info *usrregs,
5510 struct regcache *regcache, int regno)
1faeff08
MR
5511{
5512 CORE_ADDR regaddr;
5513 int i, size;
5514 char *buf;
5515 int pid;
5516
3aee8918 5517 if (regno >= usrregs->num_regs)
1faeff08
MR
5518 return;
5519 if ((*the_low_target.cannot_fetch_register) (regno))
5520 return;
5521
3aee8918 5522 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5523 if (regaddr == -1)
5524 return;
5525
3aee8918
PA
5526 size = ((register_size (regcache->tdesc, regno)
5527 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5528 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5529 buf = (char *) alloca (size);
1faeff08 5530
0bfdf32f 5531 pid = lwpid_of (current_thread);
1faeff08
MR
5532 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5533 {
5534 errno = 0;
5535 *(PTRACE_XFER_TYPE *) (buf + i) =
5536 ptrace (PTRACE_PEEKUSER, pid,
5537 /* Coerce to a uintptr_t first to avoid potential gcc warning
5538 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5539 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5540 regaddr += sizeof (PTRACE_XFER_TYPE);
5541 if (errno != 0)
9a70f35c
YQ
5542 {
5543 /* Mark register REGNO unavailable. */
5544 supply_register (regcache, regno, NULL);
5545 return;
5546 }
1faeff08
MR
5547 }
5548
5549 if (the_low_target.supply_ptrace_register)
5550 the_low_target.supply_ptrace_register (regcache, regno, buf);
5551 else
5552 supply_register (regcache, regno, buf);
5553}
5554
5555/* Store one register. */
5556static void
3aee8918
PA
5557store_register (const struct usrregs_info *usrregs,
5558 struct regcache *regcache, int regno)
1faeff08
MR
5559{
5560 CORE_ADDR regaddr;
5561 int i, size;
5562 char *buf;
5563 int pid;
5564
3aee8918 5565 if (regno >= usrregs->num_regs)
1faeff08
MR
5566 return;
5567 if ((*the_low_target.cannot_store_register) (regno))
5568 return;
5569
3aee8918 5570 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5571 if (regaddr == -1)
5572 return;
5573
3aee8918
PA
5574 size = ((register_size (regcache->tdesc, regno)
5575 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5576 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5577 buf = (char *) alloca (size);
1faeff08
MR
5578 memset (buf, 0, size);
5579
5580 if (the_low_target.collect_ptrace_register)
5581 the_low_target.collect_ptrace_register (regcache, regno, buf);
5582 else
5583 collect_register (regcache, regno, buf);
5584
0bfdf32f 5585 pid = lwpid_of (current_thread);
1faeff08
MR
5586 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5587 {
5588 errno = 0;
5589 ptrace (PTRACE_POKEUSER, pid,
5590 /* Coerce to a uintptr_t first to avoid potential gcc warning
5591 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5592 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5593 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5594 if (errno != 0)
5595 {
5596 /* At this point, ESRCH should mean the process is
5597 already gone, in which case we simply ignore attempts
5598 to change its registers. See also the related
5599 comment in linux_resume_one_lwp. */
5600 if (errno == ESRCH)
5601 return;
5602
5603 if ((*the_low_target.cannot_store_register) (regno) == 0)
6d91ce9a 5604 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5605 }
5606 regaddr += sizeof (PTRACE_XFER_TYPE);
5607 }
5608}
5609
5610/* Fetch all registers, or just one, from the child process.
5611 If REGNO is -1, do this for all registers, skipping any that are
5612 assumed to have been retrieved by regsets_fetch_inferior_registers,
5613 unless ALL is non-zero.
5614 Otherwise, REGNO specifies which register (so we can save time). */
5615static void
3aee8918
PA
5616usr_fetch_inferior_registers (const struct regs_info *regs_info,
5617 struct regcache *regcache, int regno, int all)
1faeff08 5618{
3aee8918
PA
5619 struct usrregs_info *usr = regs_info->usrregs;
5620
1faeff08
MR
5621 if (regno == -1)
5622 {
3aee8918
PA
5623 for (regno = 0; regno < usr->num_regs; regno++)
5624 if (all || !linux_register_in_regsets (regs_info, regno))
5625 fetch_register (usr, regcache, regno);
1faeff08
MR
5626 }
5627 else
3aee8918 5628 fetch_register (usr, regcache, regno);
1faeff08
MR
5629}
5630
5631/* Store our register values back into the inferior.
5632 If REGNO is -1, do this for all registers, skipping any that are
5633 assumed to have been saved by regsets_store_inferior_registers,
5634 unless ALL is non-zero.
5635 Otherwise, REGNO specifies which register (so we can save time). */
5636static void
3aee8918
PA
5637usr_store_inferior_registers (const struct regs_info *regs_info,
5638 struct regcache *regcache, int regno, int all)
1faeff08 5639{
3aee8918
PA
5640 struct usrregs_info *usr = regs_info->usrregs;
5641
1faeff08
MR
5642 if (regno == -1)
5643 {
3aee8918
PA
5644 for (regno = 0; regno < usr->num_regs; regno++)
5645 if (all || !linux_register_in_regsets (regs_info, regno))
5646 store_register (usr, regcache, regno);
1faeff08
MR
5647 }
5648 else
3aee8918 5649 store_register (usr, regcache, regno);
1faeff08
MR
5650}
5651
5652#else /* !HAVE_LINUX_USRREGS */
5653
3aee8918
PA
5654#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5655#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 5656
58caa3dc 5657#endif
1faeff08
MR
5658
5659
a5a4d4cd
TBA
5660void
5661linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5662{
5663 int use_regsets;
5664 int all = 0;
3aee8918 5665 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5666
5667 if (regno == -1)
5668 {
3aee8918
PA
5669 if (the_low_target.fetch_register != NULL
5670 && regs_info->usrregs != NULL)
5671 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5672 (*the_low_target.fetch_register) (regcache, regno);
5673
3aee8918
PA
5674 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5675 if (regs_info->usrregs != NULL)
5676 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5677 }
5678 else
5679 {
c14dfd32
PA
5680 if (the_low_target.fetch_register != NULL
5681 && (*the_low_target.fetch_register) (regcache, regno))
5682 return;
5683
3aee8918 5684 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5685 if (use_regsets)
3aee8918
PA
5686 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5687 regcache);
5688 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5689 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5690 }
58caa3dc
DJ
5691}
5692
a5a4d4cd
TBA
5693void
5694linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5695{
1faeff08
MR
5696 int use_regsets;
5697 int all = 0;
3aee8918 5698 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5699
5700 if (regno == -1)
5701 {
3aee8918
PA
5702 all = regsets_store_inferior_registers (regs_info->regsets_info,
5703 regcache);
5704 if (regs_info->usrregs != NULL)
5705 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5706 }
5707 else
5708 {
3aee8918 5709 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5710 if (use_regsets)
3aee8918
PA
5711 all = regsets_store_inferior_registers (regs_info->regsets_info,
5712 regcache);
5713 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5714 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5715 }
58caa3dc
DJ
5716}
5717
da6d8c04 5718
e2558df3 5719/* A wrapper for the read_memory target op. */
da6d8c04 5720
c3e735a6 5721static int
f450004a 5722linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3
TBA
5723{
5724 return the_target->pt->read_memory (memaddr, myaddr, len);
5725}
5726
5727/* Copy LEN bytes from inferior's memory starting at MEMADDR
5728 to debugger memory starting at MYADDR. */
5729
5730int
5731linux_process_target::read_memory (CORE_ADDR memaddr,
5732 unsigned char *myaddr, int len)
da6d8c04 5733{
0bfdf32f 5734 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5735 PTRACE_XFER_TYPE *buffer;
5736 CORE_ADDR addr;
5737 int count;
4934b29e 5738 char filename[64];
ae3e2ccf 5739 int i;
4934b29e 5740 int ret;
fd462a61 5741 int fd;
fd462a61
DJ
5742
5743 /* Try using /proc. Don't bother for one word. */
5744 if (len >= 3 * sizeof (long))
5745 {
4934b29e
MR
5746 int bytes;
5747
fd462a61
DJ
5748 /* We could keep this file open and cache it - possibly one per
5749 thread. That requires some juggling, but is even faster. */
95954743 5750 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5751 fd = open (filename, O_RDONLY | O_LARGEFILE);
5752 if (fd == -1)
5753 goto no_proc;
5754
5755 /* If pread64 is available, use it. It's faster if the kernel
5756 supports it (only one syscall), and it's 64-bit safe even on
5757 32-bit platforms (for instance, SPARC debugging a SPARC64
5758 application). */
5759#ifdef HAVE_PREAD64
4934b29e 5760 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5761#else
4934b29e
MR
5762 bytes = -1;
5763 if (lseek (fd, memaddr, SEEK_SET) != -1)
5764 bytes = read (fd, myaddr, len);
fd462a61 5765#endif
fd462a61
DJ
5766
5767 close (fd);
4934b29e
MR
5768 if (bytes == len)
5769 return 0;
5770
5771 /* Some data was read, we'll try to get the rest with ptrace. */
5772 if (bytes > 0)
5773 {
5774 memaddr += bytes;
5775 myaddr += bytes;
5776 len -= bytes;
5777 }
fd462a61 5778 }
da6d8c04 5779
fd462a61 5780 no_proc:
4934b29e
MR
5781 /* Round starting address down to longword boundary. */
5782 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5783 /* Round ending address up; get number of longwords that makes. */
5784 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5785 / sizeof (PTRACE_XFER_TYPE));
5786 /* Allocate buffer of that many longwords. */
8d749320 5787 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5788
da6d8c04 5789 /* Read all the longwords */
4934b29e 5790 errno = 0;
da6d8c04
DJ
5791 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5792 {
14ce3065
DE
5793 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5794 about coercing an 8 byte integer to a 4 byte pointer. */
5795 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5796 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5797 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5798 if (errno)
4934b29e 5799 break;
da6d8c04 5800 }
4934b29e 5801 ret = errno;
da6d8c04
DJ
5802
5803 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5804 if (i > 0)
5805 {
5806 i *= sizeof (PTRACE_XFER_TYPE);
5807 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5808 memcpy (myaddr,
5809 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5810 i < len ? i : len);
5811 }
c3e735a6 5812
4934b29e 5813 return ret;
da6d8c04
DJ
5814}
5815
93ae6fdc
PA
5816/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5817 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5818 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5819
e2558df3
TBA
5820int
5821linux_process_target::write_memory (CORE_ADDR memaddr,
5822 const unsigned char *myaddr, int len)
da6d8c04 5823{
ae3e2ccf 5824 int i;
da6d8c04 5825 /* Round starting address down to longword boundary. */
ae3e2ccf 5826 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5827 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5828 int count
493e2a69
MS
5829 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5830 / sizeof (PTRACE_XFER_TYPE);
5831
da6d8c04 5832 /* Allocate buffer of that many longwords. */
ae3e2ccf 5833 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5834
0bfdf32f 5835 int pid = lwpid_of (current_thread);
da6d8c04 5836
f0ae6fc3
PA
5837 if (len == 0)
5838 {
5839 /* Zero length write always succeeds. */
5840 return 0;
5841 }
5842
0d62e5e8
DJ
5843 if (debug_threads)
5844 {
58d6951d 5845 /* Dump up to four bytes. */
bf47e248
PA
5846 char str[4 * 2 + 1];
5847 char *p = str;
5848 int dump = len < 4 ? len : 4;
5849
5850 for (i = 0; i < dump; i++)
5851 {
5852 sprintf (p, "%02x", myaddr[i]);
5853 p += 2;
5854 }
5855 *p = '\0';
5856
5857 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5858 str, (long) memaddr, pid);
0d62e5e8
DJ
5859 }
5860
da6d8c04
DJ
5861 /* Fill start and end extra bytes of buffer with existing memory data. */
5862
93ae6fdc 5863 errno = 0;
14ce3065
DE
5864 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5865 about coercing an 8 byte integer to a 4 byte pointer. */
5866 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5867 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5868 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5869 if (errno)
5870 return errno;
da6d8c04
DJ
5871
5872 if (count > 1)
5873 {
93ae6fdc 5874 errno = 0;
da6d8c04 5875 buffer[count - 1]
95954743 5876 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5877 /* Coerce to a uintptr_t first to avoid potential gcc warning
5878 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5879 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5880 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5881 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5882 if (errno)
5883 return errno;
da6d8c04
DJ
5884 }
5885
93ae6fdc 5886 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5887
493e2a69
MS
5888 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5889 myaddr, len);
da6d8c04
DJ
5890
5891 /* Write the entire buffer. */
5892
5893 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5894 {
5895 errno = 0;
14ce3065
DE
5896 ptrace (PTRACE_POKETEXT, pid,
5897 /* Coerce to a uintptr_t first to avoid potential gcc warning
5898 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5899 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5900 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5901 if (errno)
5902 return errno;
5903 }
5904
5905 return 0;
5906}
2f2893d9 5907
2a31c7aa
TBA
5908void
5909linux_process_target::look_up_symbols ()
2f2893d9 5910{
0d62e5e8 5911#ifdef USE_THREAD_DB
95954743
PA
5912 struct process_info *proc = current_process ();
5913
fe978cb0 5914 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5915 return;
5916
9b4c5f87 5917 thread_db_init ();
0d62e5e8
DJ
5918#endif
5919}
5920
eb497a2a
TBA
5921void
5922linux_process_target::request_interrupt ()
e5379b03 5923{
78708b7c
PA
5924 /* Send a SIGINT to the process group. This acts just like the user
5925 typed a ^C on the controlling terminal. */
eb497a2a 5926 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5927}
5928
eac215cc
TBA
5929bool
5930linux_process_target::supports_read_auxv ()
5931{
5932 return true;
5933}
5934
aa691b87
RM
5935/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5936 to debugger memory starting at MYADDR. */
5937
eac215cc
TBA
5938int
5939linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5940 unsigned int len)
aa691b87
RM
5941{
5942 char filename[PATH_MAX];
5943 int fd, n;
0bfdf32f 5944 int pid = lwpid_of (current_thread);
aa691b87 5945
6cebaf6e 5946 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5947
5948 fd = open (filename, O_RDONLY);
5949 if (fd < 0)
5950 return -1;
5951
5952 if (offset != (CORE_ADDR) 0
5953 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5954 n = -1;
5955 else
5956 n = read (fd, myaddr, len);
5957
5958 close (fd);
5959
5960 return n;
5961}
5962
d993e290
PA
5963/* These breakpoint and watchpoint related wrapper functions simply
5964 pass on the function call if the target has registered a
5965 corresponding function. */
e013ee27 5966
a2b2297a
TBA
5967bool
5968linux_process_target::supports_z_point_type (char z_type)
802e8e6d
PA
5969{
5970 return (the_low_target.supports_z_point_type != NULL
5971 && the_low_target.supports_z_point_type (z_type));
5972}
5973
7e0bde70
TBA
5974int
5975linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5976 int size, raw_breakpoint *bp)
e013ee27 5977{
c8f4bfdd
YQ
5978 if (type == raw_bkpt_type_sw)
5979 return insert_memory_breakpoint (bp);
5980 else if (the_low_target.insert_point != NULL)
802e8e6d 5981 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5982 else
5983 /* Unsupported (see target.h). */
5984 return 1;
5985}
5986
7e0bde70
TBA
5987int
5988linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5989 int size, raw_breakpoint *bp)
e013ee27 5990{
c8f4bfdd
YQ
5991 if (type == raw_bkpt_type_sw)
5992 return remove_memory_breakpoint (bp);
5993 else if (the_low_target.remove_point != NULL)
802e8e6d 5994 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5995 else
5996 /* Unsupported (see target.h). */
5997 return 1;
5998}
5999
84320c4e 6000/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
6001 method. */
6002
84320c4e
TBA
6003bool
6004linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
6005{
6006 struct lwp_info *lwp = get_thread_lwp (current_thread);
6007
6008 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6009}
6010
84320c4e 6011/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
6012 method. */
6013
84320c4e
TBA
6014bool
6015linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
6016{
6017 return USE_SIGTRAP_SIGINFO;
6018}
6019
93fe88b2 6020/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
6021 method. */
6022
93fe88b2
TBA
6023bool
6024linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
6025{
6026 struct lwp_info *lwp = get_thread_lwp (current_thread);
6027
6028 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6029}
6030
93fe88b2 6031/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
6032 method. */
6033
93fe88b2
TBA
6034bool
6035linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
6036{
6037 return USE_SIGTRAP_SIGINFO;
6038}
6039
70b90b91 6040/* Implement the supports_hardware_single_step target_ops method. */
45614f15 6041
22aa6223
TBA
6042bool
6043linux_process_target::supports_hardware_single_step ()
45614f15 6044{
45614f15
YQ
6045 return can_hardware_single_step ();
6046}
6047
7d00775e
AT
6048static int
6049linux_supports_software_single_step (void)
6050{
6051 return can_software_single_step ();
6052}
6053
6eeb5c55
TBA
6054bool
6055linux_process_target::stopped_by_watchpoint ()
e013ee27 6056{
0bfdf32f 6057 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 6058
15c66dd6 6059 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
6060}
6061
6eeb5c55
TBA
6062CORE_ADDR
6063linux_process_target::stopped_data_address ()
e013ee27 6064{
0bfdf32f 6065 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
6066
6067 return lwp->stopped_data_address;
e013ee27
OF
6068}
6069
db0dfaa0
LM
6070/* This is only used for targets that define PT_TEXT_ADDR,
6071 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6072 the target has different ways of acquiring this information, like
6073 loadmaps. */
52fb6437 6074
5203ae1e
TBA
6075bool
6076linux_process_target::supports_read_offsets ()
6077{
6078#ifdef SUPPORTS_READ_OFFSETS
6079 return true;
6080#else
6081 return false;
6082#endif
6083}
6084
52fb6437
NS
6085/* Under uClinux, programs are loaded at non-zero offsets, which we need
6086 to tell gdb about. */
6087
5203ae1e
TBA
6088int
6089linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 6090{
5203ae1e 6091#ifdef SUPPORTS_READ_OFFSETS
52fb6437 6092 unsigned long text, text_end, data;
62828379 6093 int pid = lwpid_of (current_thread);
52fb6437
NS
6094
6095 errno = 0;
6096
b8e1b30e
LM
6097 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6098 (PTRACE_TYPE_ARG4) 0);
6099 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6100 (PTRACE_TYPE_ARG4) 0);
6101 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6102 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
6103
6104 if (errno == 0)
6105 {
6106 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
6107 used by gdb) are relative to the beginning of the program,
6108 with the data segment immediately following the text segment.
6109 However, the actual runtime layout in memory may put the data
6110 somewhere else, so when we send gdb a data base-address, we
6111 use the real data base address and subtract the compile-time
6112 data base-address from it (which is just the length of the
6113 text segment). BSS immediately follows data in both
6114 cases. */
52fb6437
NS
6115 *text_p = text;
6116 *data_p = data - (text_end - text);
1b3f6016 6117
52fb6437
NS
6118 return 1;
6119 }
5203ae1e
TBA
6120 return 0;
6121#else
6122 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 6123#endif
5203ae1e 6124}
52fb6437 6125
6e3fd7e9
TBA
6126bool
6127linux_process_target::supports_get_tls_address ()
6128{
6129#ifdef USE_THREAD_DB
6130 return true;
6131#else
6132 return false;
6133#endif
6134}
6135
6136int
6137linux_process_target::get_tls_address (thread_info *thread,
6138 CORE_ADDR offset,
6139 CORE_ADDR load_module,
6140 CORE_ADDR *address)
6141{
6142#ifdef USE_THREAD_DB
6143 return thread_db_get_tls_address (thread, offset, load_module, address);
6144#else
6145 return -1;
6146#endif
6147}
6148
07e059b5
VP
6149static int
6150linux_qxfer_osdata (const char *annex,
1b3f6016
PA
6151 unsigned char *readbuf, unsigned const char *writebuf,
6152 CORE_ADDR offset, int len)
07e059b5 6153{
d26e3629 6154 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6155}
6156
d0722149
DE
6157/* Convert a native/host siginfo object, into/from the siginfo in the
6158 layout of the inferiors' architecture. */
6159
6160static void
8adce034 6161siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
6162{
6163 int done = 0;
6164
6165 if (the_low_target.siginfo_fixup != NULL)
6166 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6167
6168 /* If there was no callback, or the callback didn't do anything,
6169 then just do a straight memcpy. */
6170 if (!done)
6171 {
6172 if (direction == 1)
a5362b9a 6173 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6174 else
a5362b9a 6175 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6176 }
6177}
6178
4aa995e1
PA
6179static int
6180linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6181 unsigned const char *writebuf, CORE_ADDR offset, int len)
6182{
d0722149 6183 int pid;
a5362b9a 6184 siginfo_t siginfo;
8adce034 6185 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6186
0bfdf32f 6187 if (current_thread == NULL)
4aa995e1
PA
6188 return -1;
6189
0bfdf32f 6190 pid = lwpid_of (current_thread);
4aa995e1
PA
6191
6192 if (debug_threads)
87ce2a04
DE
6193 debug_printf ("%s siginfo for lwp %d.\n",
6194 readbuf != NULL ? "Reading" : "Writing",
6195 pid);
4aa995e1 6196
0adea5f7 6197 if (offset >= sizeof (siginfo))
4aa995e1
PA
6198 return -1;
6199
b8e1b30e 6200 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6201 return -1;
6202
d0722149
DE
6203 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6204 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6205 inferior with a 64-bit GDBSERVER should look the same as debugging it
6206 with a 32-bit GDBSERVER, we need to convert it. */
6207 siginfo_fixup (&siginfo, inf_siginfo, 0);
6208
4aa995e1
PA
6209 if (offset + len > sizeof (siginfo))
6210 len = sizeof (siginfo) - offset;
6211
6212 if (readbuf != NULL)
d0722149 6213 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6214 else
6215 {
d0722149
DE
6216 memcpy (inf_siginfo + offset, writebuf, len);
6217
6218 /* Convert back to ptrace layout before flushing it out. */
6219 siginfo_fixup (&siginfo, inf_siginfo, 1);
6220
b8e1b30e 6221 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6222 return -1;
6223 }
6224
6225 return len;
6226}
6227
bd99dc85
PA
6228/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6229 so we notice when children change state; as the handler for the
6230 sigsuspend in my_waitpid. */
6231
6232static void
6233sigchld_handler (int signo)
6234{
6235 int old_errno = errno;
6236
6237 if (debug_threads)
e581f2b4
PA
6238 {
6239 do
6240 {
a7e559cc
AH
6241 /* Use the async signal safe debug function. */
6242 if (debug_write ("sigchld_handler\n",
6243 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6244 break; /* just ignore */
6245 } while (0);
6246 }
bd99dc85
PA
6247
6248 if (target_is_async_p ())
6249 async_file_mark (); /* trigger a linux_wait */
6250
6251 errno = old_errno;
6252}
6253
6254static int
6255linux_supports_non_stop (void)
6256{
6257 return 1;
6258}
6259
6260static int
6261linux_async (int enable)
6262{
7089dca4 6263 int previous = target_is_async_p ();
bd99dc85 6264
8336d594 6265 if (debug_threads)
87ce2a04
DE
6266 debug_printf ("linux_async (%d), previous=%d\n",
6267 enable, previous);
8336d594 6268
bd99dc85
PA
6269 if (previous != enable)
6270 {
6271 sigset_t mask;
6272 sigemptyset (&mask);
6273 sigaddset (&mask, SIGCHLD);
6274
21987b9c 6275 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6276
6277 if (enable)
6278 {
6279 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6280 {
6281 linux_event_pipe[0] = -1;
6282 linux_event_pipe[1] = -1;
21987b9c 6283 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6284
6285 warning ("creating event pipe failed.");
6286 return previous;
6287 }
bd99dc85
PA
6288
6289 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6290 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6291
6292 /* Register the event loop handler. */
6293 add_file_handler (linux_event_pipe[0],
6294 handle_target_event, NULL);
6295
6296 /* Always trigger a linux_wait. */
6297 async_file_mark ();
6298 }
6299 else
6300 {
6301 delete_file_handler (linux_event_pipe[0]);
6302
6303 close (linux_event_pipe[0]);
6304 close (linux_event_pipe[1]);
6305 linux_event_pipe[0] = -1;
6306 linux_event_pipe[1] = -1;
6307 }
6308
21987b9c 6309 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6310 }
6311
6312 return previous;
6313}
6314
6315static int
6316linux_start_non_stop (int nonstop)
6317{
6318 /* Register or unregister from event-loop accordingly. */
6319 linux_async (nonstop);
aa96c426
GB
6320
6321 if (target_is_async_p () != (nonstop != 0))
6322 return -1;
6323
bd99dc85
PA
6324 return 0;
6325}
6326
cf8fd78b
PA
6327static int
6328linux_supports_multi_process (void)
6329{
6330 return 1;
6331}
6332
89245bc0
DB
6333/* Check if fork events are supported. */
6334
6335static int
6336linux_supports_fork_events (void)
6337{
6338 return linux_supports_tracefork ();
6339}
6340
6341/* Check if vfork events are supported. */
6342
6343static int
6344linux_supports_vfork_events (void)
6345{
6346 return linux_supports_tracefork ();
6347}
6348
94585166
DB
6349/* Check if exec events are supported. */
6350
6351static int
6352linux_supports_exec_events (void)
6353{
6354 return linux_supports_traceexec ();
6355}
6356
de0d863e
DB
6357/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6358 ptrace flags for all inferiors. This is in case the new GDB connection
6359 doesn't support the same set of events that the previous one did. */
6360
6361static void
6362linux_handle_new_gdb_connection (void)
6363{
de0d863e 6364 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6365 for_each_thread ([] (thread_info *thread)
6366 {
6367 struct lwp_info *lwp = get_thread_lwp (thread);
6368
6369 if (!lwp->stopped)
6370 {
6371 /* Stop the lwp so we can modify its ptrace options. */
6372 lwp->must_set_ptrace_flags = 1;
6373 linux_stop_lwp (lwp);
6374 }
6375 else
6376 {
6377 /* Already stopped; go ahead and set the ptrace options. */
6378 struct process_info *proc = find_process_pid (pid_of (thread));
6379 int options = linux_low_ptrace_options (proc->attached);
6380
6381 linux_enable_event_reporting (lwpid_of (thread), options);
6382 lwp->must_set_ptrace_flags = 0;
6383 }
6384 });
de0d863e
DB
6385}
6386
03583c20
UW
6387static int
6388linux_supports_disable_randomization (void)
6389{
6390#ifdef HAVE_PERSONALITY
6391 return 1;
6392#else
6393 return 0;
6394#endif
6395}
efcbbd14 6396
d1feda86
YQ
6397static int
6398linux_supports_agent (void)
6399{
6400 return 1;
6401}
6402
c2d6af84
PA
6403static int
6404linux_supports_range_stepping (void)
6405{
c3805894
YQ
6406 if (can_software_single_step ())
6407 return 1;
c2d6af84
PA
6408 if (*the_low_target.supports_range_stepping == NULL)
6409 return 0;
6410
6411 return (*the_low_target.supports_range_stepping) ();
6412}
6413
723b724b 6414#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6415struct target_loadseg
6416{
6417 /* Core address to which the segment is mapped. */
6418 Elf32_Addr addr;
6419 /* VMA recorded in the program header. */
6420 Elf32_Addr p_vaddr;
6421 /* Size of this segment in memory. */
6422 Elf32_Word p_memsz;
6423};
6424
723b724b 6425# if defined PT_GETDSBT
78d85199
YQ
6426struct target_loadmap
6427{
6428 /* Protocol version number, must be zero. */
6429 Elf32_Word version;
6430 /* Pointer to the DSBT table, its size, and the DSBT index. */
6431 unsigned *dsbt_table;
6432 unsigned dsbt_size, dsbt_index;
6433 /* Number of segments in this map. */
6434 Elf32_Word nsegs;
6435 /* The actual memory map. */
6436 struct target_loadseg segs[/*nsegs*/];
6437};
723b724b
MF
6438# define LINUX_LOADMAP PT_GETDSBT
6439# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6440# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6441# else
6442struct target_loadmap
6443{
6444 /* Protocol version number, must be zero. */
6445 Elf32_Half version;
6446 /* Number of segments in this map. */
6447 Elf32_Half nsegs;
6448 /* The actual memory map. */
6449 struct target_loadseg segs[/*nsegs*/];
6450};
6451# define LINUX_LOADMAP PTRACE_GETFDPIC
6452# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6453# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6454# endif
78d85199 6455
78d85199
YQ
6456static int
6457linux_read_loadmap (const char *annex, CORE_ADDR offset,
6458 unsigned char *myaddr, unsigned int len)
6459{
0bfdf32f 6460 int pid = lwpid_of (current_thread);
78d85199
YQ
6461 int addr = -1;
6462 struct target_loadmap *data = NULL;
6463 unsigned int actual_length, copy_length;
6464
6465 if (strcmp (annex, "exec") == 0)
723b724b 6466 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6467 else if (strcmp (annex, "interp") == 0)
723b724b 6468 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6469 else
6470 return -1;
6471
723b724b 6472 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6473 return -1;
6474
6475 if (data == NULL)
6476 return -1;
6477
6478 actual_length = sizeof (struct target_loadmap)
6479 + sizeof (struct target_loadseg) * data->nsegs;
6480
6481 if (offset < 0 || offset > actual_length)
6482 return -1;
6483
6484 copy_length = actual_length - offset < len ? actual_length - offset : len;
6485 memcpy (myaddr, (char *) data + offset, copy_length);
6486 return copy_length;
6487}
723b724b
MF
6488#else
6489# define linux_read_loadmap NULL
6490#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6491
1570b33e 6492static void
06e03fff 6493linux_process_qsupported (char **features, int count)
1570b33e
L
6494{
6495 if (the_low_target.process_qsupported != NULL)
06e03fff 6496 the_low_target.process_qsupported (features, count);
1570b33e
L
6497}
6498
82075af2
JS
6499static int
6500linux_supports_catch_syscall (void)
6501{
6502 return (the_low_target.get_syscall_trapinfo != NULL
6503 && linux_supports_tracesysgood ());
6504}
6505
ae91f625
MK
6506static int
6507linux_get_ipa_tdesc_idx (void)
6508{
6509 if (the_low_target.get_ipa_tdesc_idx == NULL)
6510 return 0;
6511
6512 return (*the_low_target.get_ipa_tdesc_idx) ();
6513}
6514
219f2f23
PA
6515static int
6516linux_supports_tracepoints (void)
6517{
6518 if (*the_low_target.supports_tracepoints == NULL)
6519 return 0;
6520
6521 return (*the_low_target.supports_tracepoints) ();
6522}
6523
6524static CORE_ADDR
6525linux_read_pc (struct regcache *regcache)
6526{
6527 if (the_low_target.get_pc == NULL)
6528 return 0;
6529
6530 return (*the_low_target.get_pc) (regcache);
6531}
6532
6533static void
6534linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6535{
6536 gdb_assert (the_low_target.set_pc != NULL);
6537
6538 (*the_low_target.set_pc) (regcache, pc);
6539}
6540
8336d594
PA
6541static int
6542linux_thread_stopped (struct thread_info *thread)
6543{
6544 return get_thread_lwp (thread)->stopped;
6545}
6546
6547/* This exposes stop-all-threads functionality to other modules. */
6548
6549static void
7984d532 6550linux_pause_all (int freeze)
8336d594 6551{
7984d532
PA
6552 stop_all_lwps (freeze, NULL);
6553}
6554
6555/* This exposes unstop-all-threads functionality to other gdbserver
6556 modules. */
6557
6558static void
6559linux_unpause_all (int unfreeze)
6560{
6561 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6562}
6563
79b44087
TBA
6564int
6565linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6566{
6567 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6568 running LWP. */
6569 if (non_stop)
6570 linux_pause_all (1);
6571 return 0;
6572}
6573
79b44087
TBA
6574void
6575linux_process_target::done_accessing_memory ()
90d74c30
PA
6576{
6577 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6578 running LWP. */
6579 if (non_stop)
6580 linux_unpause_all (1);
6581}
6582
fa593d66
PA
6583static int
6584linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6585 CORE_ADDR collector,
6586 CORE_ADDR lockaddr,
6587 ULONGEST orig_size,
6588 CORE_ADDR *jump_entry,
405f8e94
SS
6589 CORE_ADDR *trampoline,
6590 ULONGEST *trampoline_size,
fa593d66
PA
6591 unsigned char *jjump_pad_insn,
6592 ULONGEST *jjump_pad_insn_size,
6593 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
6594 CORE_ADDR *adjusted_insn_addr_end,
6595 char *err)
fa593d66
PA
6596{
6597 return (*the_low_target.install_fast_tracepoint_jump_pad)
6598 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6599 jump_entry, trampoline, trampoline_size,
6600 jjump_pad_insn, jjump_pad_insn_size,
6601 adjusted_insn_addr, adjusted_insn_addr_end,
6602 err);
fa593d66
PA
6603}
6604
6a271cae
PA
6605static struct emit_ops *
6606linux_emit_ops (void)
6607{
6608 if (the_low_target.emit_ops != NULL)
6609 return (*the_low_target.emit_ops) ();
6610 else
6611 return NULL;
6612}
6613
405f8e94
SS
6614static int
6615linux_get_min_fast_tracepoint_insn_len (void)
6616{
6617 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6618}
6619
2268b414
JK
6620/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6621
6622static int
6623get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6624 CORE_ADDR *phdr_memaddr, int *num_phdr)
6625{
6626 char filename[PATH_MAX];
6627 int fd;
6628 const int auxv_size = is_elf64
6629 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6630 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6631
6632 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6633
6634 fd = open (filename, O_RDONLY);
6635 if (fd < 0)
6636 return 1;
6637
6638 *phdr_memaddr = 0;
6639 *num_phdr = 0;
6640 while (read (fd, buf, auxv_size) == auxv_size
6641 && (*phdr_memaddr == 0 || *num_phdr == 0))
6642 {
6643 if (is_elf64)
6644 {
6645 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6646
6647 switch (aux->a_type)
6648 {
6649 case AT_PHDR:
6650 *phdr_memaddr = aux->a_un.a_val;
6651 break;
6652 case AT_PHNUM:
6653 *num_phdr = aux->a_un.a_val;
6654 break;
6655 }
6656 }
6657 else
6658 {
6659 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6660
6661 switch (aux->a_type)
6662 {
6663 case AT_PHDR:
6664 *phdr_memaddr = aux->a_un.a_val;
6665 break;
6666 case AT_PHNUM:
6667 *num_phdr = aux->a_un.a_val;
6668 break;
6669 }
6670 }
6671 }
6672
6673 close (fd);
6674
6675 if (*phdr_memaddr == 0 || *num_phdr == 0)
6676 {
6677 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6678 "phdr_memaddr = %ld, phdr_num = %d",
6679 (long) *phdr_memaddr, *num_phdr);
6680 return 2;
6681 }
6682
6683 return 0;
6684}
6685
6686/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6687
6688static CORE_ADDR
6689get_dynamic (const int pid, const int is_elf64)
6690{
6691 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6692 int num_phdr, i;
2268b414 6693 unsigned char *phdr_buf;
db1ff28b 6694 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6695
6696 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6697 return 0;
6698
6699 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6700 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6701
6702 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6703 return 0;
6704
6705 /* Compute relocation: it is expected to be 0 for "regular" executables,
6706 non-zero for PIE ones. */
6707 relocation = -1;
db1ff28b
JK
6708 for (i = 0; relocation == -1 && i < num_phdr; i++)
6709 if (is_elf64)
6710 {
6711 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6712
6713 if (p->p_type == PT_PHDR)
6714 relocation = phdr_memaddr - p->p_vaddr;
6715 }
6716 else
6717 {
6718 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6719
6720 if (p->p_type == PT_PHDR)
6721 relocation = phdr_memaddr - p->p_vaddr;
6722 }
6723
2268b414
JK
6724 if (relocation == -1)
6725 {
e237a7e2
JK
6726 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6727 any real world executables, including PIE executables, have always
6728 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6729 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6730 or present DT_DEBUG anyway (fpc binaries are statically linked).
6731
6732 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6733
6734 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6735
2268b414
JK
6736 return 0;
6737 }
6738
db1ff28b
JK
6739 for (i = 0; i < num_phdr; i++)
6740 {
6741 if (is_elf64)
6742 {
6743 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6744
6745 if (p->p_type == PT_DYNAMIC)
6746 return p->p_vaddr + relocation;
6747 }
6748 else
6749 {
6750 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6751
db1ff28b
JK
6752 if (p->p_type == PT_DYNAMIC)
6753 return p->p_vaddr + relocation;
6754 }
6755 }
2268b414
JK
6756
6757 return 0;
6758}
6759
6760/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6761 can be 0 if the inferior does not yet have the library list initialized.
6762 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6763 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6764
6765static CORE_ADDR
6766get_r_debug (const int pid, const int is_elf64)
6767{
6768 CORE_ADDR dynamic_memaddr;
6769 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6770 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6771 CORE_ADDR map = -1;
2268b414
JK
6772
6773 dynamic_memaddr = get_dynamic (pid, is_elf64);
6774 if (dynamic_memaddr == 0)
367ba2c2 6775 return map;
2268b414
JK
6776
6777 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6778 {
6779 if (is_elf64)
6780 {
6781 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6782#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6783 union
6784 {
6785 Elf64_Xword map;
6786 unsigned char buf[sizeof (Elf64_Xword)];
6787 }
6788 rld_map;
a738da3a
MF
6789#endif
6790#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6791 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6792 {
6793 if (linux_read_memory (dyn->d_un.d_val,
6794 rld_map.buf, sizeof (rld_map.buf)) == 0)
6795 return rld_map.map;
6796 else
6797 break;
6798 }
75f62ce7 6799#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6800#ifdef DT_MIPS_RLD_MAP_REL
6801 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6802 {
6803 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6804 rld_map.buf, sizeof (rld_map.buf)) == 0)
6805 return rld_map.map;
6806 else
6807 break;
6808 }
6809#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6810
367ba2c2
MR
6811 if (dyn->d_tag == DT_DEBUG && map == -1)
6812 map = dyn->d_un.d_val;
2268b414
JK
6813
6814 if (dyn->d_tag == DT_NULL)
6815 break;
6816 }
6817 else
6818 {
6819 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6820#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6821 union
6822 {
6823 Elf32_Word map;
6824 unsigned char buf[sizeof (Elf32_Word)];
6825 }
6826 rld_map;
a738da3a
MF
6827#endif
6828#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6829 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6830 {
6831 if (linux_read_memory (dyn->d_un.d_val,
6832 rld_map.buf, sizeof (rld_map.buf)) == 0)
6833 return rld_map.map;
6834 else
6835 break;
6836 }
75f62ce7 6837#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6838#ifdef DT_MIPS_RLD_MAP_REL
6839 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6840 {
6841 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6842 rld_map.buf, sizeof (rld_map.buf)) == 0)
6843 return rld_map.map;
6844 else
6845 break;
6846 }
6847#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6848
367ba2c2
MR
6849 if (dyn->d_tag == DT_DEBUG && map == -1)
6850 map = dyn->d_un.d_val;
2268b414
JK
6851
6852 if (dyn->d_tag == DT_NULL)
6853 break;
6854 }
6855
6856 dynamic_memaddr += dyn_size;
6857 }
6858
367ba2c2 6859 return map;
2268b414
JK
6860}
6861
6862/* Read one pointer from MEMADDR in the inferior. */
6863
6864static int
6865read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6866{
485f1ee4
PA
6867 int ret;
6868
6869 /* Go through a union so this works on either big or little endian
6870 hosts, when the inferior's pointer size is smaller than the size
6871 of CORE_ADDR. It is assumed the inferior's endianness is the
6872 same of the superior's. */
6873 union
6874 {
6875 CORE_ADDR core_addr;
6876 unsigned int ui;
6877 unsigned char uc;
6878 } addr;
6879
6880 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6881 if (ret == 0)
6882 {
6883 if (ptr_size == sizeof (CORE_ADDR))
6884 *ptr = addr.core_addr;
6885 else if (ptr_size == sizeof (unsigned int))
6886 *ptr = addr.ui;
6887 else
6888 gdb_assert_not_reached ("unhandled pointer size");
6889 }
6890 return ret;
2268b414
JK
6891}
6892
6893struct link_map_offsets
6894 {
6895 /* Offset and size of r_debug.r_version. */
6896 int r_version_offset;
6897
6898 /* Offset and size of r_debug.r_map. */
6899 int r_map_offset;
6900
6901 /* Offset to l_addr field in struct link_map. */
6902 int l_addr_offset;
6903
6904 /* Offset to l_name field in struct link_map. */
6905 int l_name_offset;
6906
6907 /* Offset to l_ld field in struct link_map. */
6908 int l_ld_offset;
6909
6910 /* Offset to l_next field in struct link_map. */
6911 int l_next_offset;
6912
6913 /* Offset to l_prev field in struct link_map. */
6914 int l_prev_offset;
6915 };
6916
fb723180 6917/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6918
6919static int
6920linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6921 unsigned const char *writebuf,
6922 CORE_ADDR offset, int len)
6923{
fe978cb0 6924 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6925 char filename[PATH_MAX];
6926 int pid, is_elf64;
6927
6928 static const struct link_map_offsets lmo_32bit_offsets =
6929 {
6930 0, /* r_version offset. */
6931 4, /* r_debug.r_map offset. */
6932 0, /* l_addr offset in link_map. */
6933 4, /* l_name offset in link_map. */
6934 8, /* l_ld offset in link_map. */
6935 12, /* l_next offset in link_map. */
6936 16 /* l_prev offset in link_map. */
6937 };
6938
6939 static const struct link_map_offsets lmo_64bit_offsets =
6940 {
6941 0, /* r_version offset. */
6942 8, /* r_debug.r_map offset. */
6943 0, /* l_addr offset in link_map. */
6944 8, /* l_name offset in link_map. */
6945 16, /* l_ld offset in link_map. */
6946 24, /* l_next offset in link_map. */
6947 32 /* l_prev offset in link_map. */
6948 };
6949 const struct link_map_offsets *lmo;
214d508e 6950 unsigned int machine;
b1fbec62
GB
6951 int ptr_size;
6952 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6953 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6954 int header_done = 0;
2268b414
JK
6955
6956 if (writebuf != NULL)
6957 return -2;
6958 if (readbuf == NULL)
6959 return -1;
6960
0bfdf32f 6961 pid = lwpid_of (current_thread);
2268b414 6962 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6963 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6964 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6965 ptr_size = is_elf64 ? 8 : 4;
2268b414 6966
b1fbec62
GB
6967 while (annex[0] != '\0')
6968 {
6969 const char *sep;
6970 CORE_ADDR *addrp;
da4ae14a 6971 int name_len;
2268b414 6972
b1fbec62
GB
6973 sep = strchr (annex, '=');
6974 if (sep == NULL)
6975 break;
0c5bf5a9 6976
da4ae14a
TT
6977 name_len = sep - annex;
6978 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6979 addrp = &lm_addr;
da4ae14a 6980 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6981 addrp = &lm_prev;
6982 else
6983 {
6984 annex = strchr (sep, ';');
6985 if (annex == NULL)
6986 break;
6987 annex++;
6988 continue;
6989 }
6990
6991 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6992 }
b1fbec62
GB
6993
6994 if (lm_addr == 0)
2268b414 6995 {
b1fbec62
GB
6996 int r_version = 0;
6997
6998 if (priv->r_debug == 0)
6999 priv->r_debug = get_r_debug (pid, is_elf64);
7000
7001 /* We failed to find DT_DEBUG. Such situation will not change
7002 for this inferior - do not retry it. Report it to GDB as
7003 E01, see for the reasons at the GDB solib-svr4.c side. */
7004 if (priv->r_debug == (CORE_ADDR) -1)
7005 return -1;
7006
7007 if (priv->r_debug != 0)
2268b414 7008 {
b1fbec62
GB
7009 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7010 (unsigned char *) &r_version,
7011 sizeof (r_version)) != 0
7012 || r_version != 1)
7013 {
7014 warning ("unexpected r_debug version %d", r_version);
7015 }
7016 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7017 &lm_addr, ptr_size) != 0)
7018 {
7019 warning ("unable to read r_map from 0x%lx",
7020 (long) priv->r_debug + lmo->r_map_offset);
7021 }
2268b414 7022 }
b1fbec62 7023 }
2268b414 7024
f6e8a41e 7025 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
7026
7027 while (lm_addr
7028 && read_one_ptr (lm_addr + lmo->l_name_offset,
7029 &l_name, ptr_size) == 0
7030 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7031 &l_addr, ptr_size) == 0
7032 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7033 &l_ld, ptr_size) == 0
7034 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7035 &l_prev, ptr_size) == 0
7036 && read_one_ptr (lm_addr + lmo->l_next_offset,
7037 &l_next, ptr_size) == 0)
7038 {
7039 unsigned char libname[PATH_MAX];
7040
7041 if (lm_prev != l_prev)
2268b414 7042 {
b1fbec62
GB
7043 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7044 (long) lm_prev, (long) l_prev);
7045 break;
2268b414
JK
7046 }
7047
d878444c
JK
7048 /* Ignore the first entry even if it has valid name as the first entry
7049 corresponds to the main executable. The first entry should not be
7050 skipped if the dynamic loader was loaded late by a static executable
7051 (see solib-svr4.c parameter ignore_first). But in such case the main
7052 executable does not have PT_DYNAMIC present and this function already
7053 exited above due to failed get_r_debug. */
7054 if (lm_prev == 0)
f6e8a41e 7055 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
7056 else
7057 {
7058 /* Not checking for error because reading may stop before
7059 we've got PATH_MAX worth of characters. */
7060 libname[0] = '\0';
7061 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7062 libname[sizeof (libname) - 1] = '\0';
7063 if (libname[0] != '\0')
2268b414 7064 {
d878444c
JK
7065 if (!header_done)
7066 {
7067 /* Terminate `<library-list-svr4'. */
f6e8a41e 7068 document += '>';
d878444c
JK
7069 header_done = 1;
7070 }
2268b414 7071
e6a58aa8
SM
7072 string_appendf (document, "<library name=\"");
7073 xml_escape_text_append (&document, (char *) libname);
7074 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 7075 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
7076 (unsigned long) lm_addr, (unsigned long) l_addr,
7077 (unsigned long) l_ld);
d878444c 7078 }
0afae3cf 7079 }
b1fbec62
GB
7080
7081 lm_prev = lm_addr;
7082 lm_addr = l_next;
2268b414
JK
7083 }
7084
b1fbec62
GB
7085 if (!header_done)
7086 {
7087 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 7088 document += "/>";
b1fbec62
GB
7089 }
7090 else
f6e8a41e 7091 document += "</library-list-svr4>";
b1fbec62 7092
f6e8a41e 7093 int document_len = document.length ();
2268b414
JK
7094 if (offset < document_len)
7095 document_len -= offset;
7096 else
7097 document_len = 0;
7098 if (len > document_len)
7099 len = document_len;
7100
f6e8a41e 7101 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
7102
7103 return len;
7104}
7105
9accd112
MM
7106#ifdef HAVE_LINUX_BTRACE
7107
969c39fb 7108/* See to_disable_btrace target method. */
9accd112 7109
969c39fb
MM
7110static int
7111linux_low_disable_btrace (struct btrace_target_info *tinfo)
7112{
7113 enum btrace_error err;
7114
7115 err = linux_disable_btrace (tinfo);
7116 return (err == BTRACE_ERR_NONE ? 0 : -1);
7117}
7118
bc504a31 7119/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7120
7121static void
7122linux_low_encode_pt_config (struct buffer *buffer,
7123 const struct btrace_data_pt_config *config)
7124{
7125 buffer_grow_str (buffer, "<pt-config>\n");
7126
7127 switch (config->cpu.vendor)
7128 {
7129 case CV_INTEL:
7130 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7131 "model=\"%u\" stepping=\"%u\"/>\n",
7132 config->cpu.family, config->cpu.model,
7133 config->cpu.stepping);
7134 break;
7135
7136 default:
7137 break;
7138 }
7139
7140 buffer_grow_str (buffer, "</pt-config>\n");
7141}
7142
7143/* Encode a raw buffer. */
7144
7145static void
7146linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7147 unsigned int size)
7148{
7149 if (size == 0)
7150 return;
7151
268a13a5 7152 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
7153 buffer_grow_str (buffer, "<raw>\n");
7154
7155 while (size-- > 0)
7156 {
7157 char elem[2];
7158
7159 elem[0] = tohex ((*data >> 4) & 0xf);
7160 elem[1] = tohex (*data++ & 0xf);
7161
7162 buffer_grow (buffer, elem, 2);
7163 }
7164
7165 buffer_grow_str (buffer, "</raw>\n");
7166}
7167
969c39fb
MM
7168/* See to_read_btrace target method. */
7169
7170static int
9accd112 7171linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
add67df8 7172 enum btrace_read_type type)
9accd112 7173{
734b0e4b 7174 struct btrace_data btrace;
969c39fb 7175 enum btrace_error err;
9accd112 7176
969c39fb
MM
7177 err = linux_read_btrace (&btrace, tinfo, type);
7178 if (err != BTRACE_ERR_NONE)
7179 {
7180 if (err == BTRACE_ERR_OVERFLOW)
7181 buffer_grow_str0 (buffer, "E.Overflow.");
7182 else
7183 buffer_grow_str0 (buffer, "E.Generic Error.");
7184
8dcc53b3 7185 return -1;
969c39fb 7186 }
9accd112 7187
734b0e4b
MM
7188 switch (btrace.format)
7189 {
7190 case BTRACE_FORMAT_NONE:
7191 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7192 return -1;
734b0e4b
MM
7193
7194 case BTRACE_FORMAT_BTS:
7195 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7196 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7197
46f29a9a 7198 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7199 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7200 paddress (block.begin), paddress (block.end));
9accd112 7201
734b0e4b
MM
7202 buffer_grow_str0 (buffer, "</btrace>\n");
7203 break;
7204
b20a6524
MM
7205 case BTRACE_FORMAT_PT:
7206 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7207 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7208 buffer_grow_str (buffer, "<pt>\n");
7209
7210 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7211
b20a6524
MM
7212 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7213 btrace.variant.pt.size);
7214
7215 buffer_grow_str (buffer, "</pt>\n");
7216 buffer_grow_str0 (buffer, "</btrace>\n");
7217 break;
7218
7219 default:
7220 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7221 return -1;
734b0e4b 7222 }
969c39fb
MM
7223
7224 return 0;
9accd112 7225}
f4abbc16
MM
7226
7227/* See to_btrace_conf target method. */
7228
7229static int
7230linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7231 struct buffer *buffer)
7232{
7233 const struct btrace_config *conf;
7234
7235 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7236 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7237
7238 conf = linux_btrace_conf (tinfo);
7239 if (conf != NULL)
7240 {
7241 switch (conf->format)
7242 {
7243 case BTRACE_FORMAT_NONE:
7244 break;
7245
7246 case BTRACE_FORMAT_BTS:
d33501a5
MM
7247 buffer_xml_printf (buffer, "<bts");
7248 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7249 buffer_xml_printf (buffer, " />\n");
f4abbc16 7250 break;
b20a6524
MM
7251
7252 case BTRACE_FORMAT_PT:
7253 buffer_xml_printf (buffer, "<pt");
7254 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7255 buffer_xml_printf (buffer, "/>\n");
7256 break;
f4abbc16
MM
7257 }
7258 }
7259
7260 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7261 return 0;
7262}
9accd112
MM
7263#endif /* HAVE_LINUX_BTRACE */
7264
7b669087
GB
7265/* See nat/linux-nat.h. */
7266
7267ptid_t
7268current_lwp_ptid (void)
7269{
7270 return ptid_of (current_thread);
7271}
7272
dd373349
AT
7273/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7274
7275static int
7276linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7277{
7278 if (the_low_target.breakpoint_kind_from_pc != NULL)
7279 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7280 else
1652a986 7281 return default_breakpoint_kind_from_pc (pcptr);
dd373349
AT
7282}
7283
7284/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7285
7286static const gdb_byte *
7287linux_sw_breakpoint_from_kind (int kind, int *size)
7288{
7289 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7290
7291 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7292}
7293
769ef81f
AT
7294/* Implementation of the target_ops method
7295 "breakpoint_kind_from_current_state". */
7296
7297static int
7298linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7299{
7300 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7301 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7302 else
7303 return linux_breakpoint_kind_from_pc (pcptr);
7304}
7305
276d4552
YQ
7306/* Default implementation of linux_target_ops method "set_pc" for
7307 32-bit pc register which is literally named "pc". */
7308
7309void
7310linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7311{
7312 uint32_t newpc = pc;
7313
7314 supply_register_by_name (regcache, "pc", &newpc);
7315}
7316
7317/* Default implementation of linux_target_ops method "get_pc" for
7318 32-bit pc register which is literally named "pc". */
7319
7320CORE_ADDR
7321linux_get_pc_32bit (struct regcache *regcache)
7322{
7323 uint32_t pc;
7324
7325 collect_register_by_name (regcache, "pc", &pc);
7326 if (debug_threads)
7327 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7328 return pc;
7329}
7330
6f69e520
YQ
7331/* Default implementation of linux_target_ops method "set_pc" for
7332 64-bit pc register which is literally named "pc". */
7333
7334void
7335linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7336{
7337 uint64_t newpc = pc;
7338
7339 supply_register_by_name (regcache, "pc", &newpc);
7340}
7341
7342/* Default implementation of linux_target_ops method "get_pc" for
7343 64-bit pc register which is literally named "pc". */
7344
7345CORE_ADDR
7346linux_get_pc_64bit (struct regcache *regcache)
7347{
7348 uint64_t pc;
7349
7350 collect_register_by_name (regcache, "pc", &pc);
7351 if (debug_threads)
7352 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7353 return pc;
7354}
7355
0570503d 7356/* See linux-low.h. */
974c89e0 7357
0570503d
PFC
7358int
7359linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7360{
7361 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7362 int offset = 0;
7363
7364 gdb_assert (wordsize == 4 || wordsize == 8);
7365
eac215cc 7366 while (the_target->pt->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7367 {
7368 if (wordsize == 4)
7369 {
0570503d 7370 uint32_t *data_p = (uint32_t *) data;
974c89e0 7371 if (data_p[0] == match)
0570503d
PFC
7372 {
7373 *valp = data_p[1];
7374 return 1;
7375 }
974c89e0
AH
7376 }
7377 else
7378 {
0570503d 7379 uint64_t *data_p = (uint64_t *) data;
974c89e0 7380 if (data_p[0] == match)
0570503d
PFC
7381 {
7382 *valp = data_p[1];
7383 return 1;
7384 }
974c89e0
AH
7385 }
7386
7387 offset += 2 * wordsize;
7388 }
7389
7390 return 0;
7391}
7392
7393/* See linux-low.h. */
7394
7395CORE_ADDR
7396linux_get_hwcap (int wordsize)
7397{
0570503d
PFC
7398 CORE_ADDR hwcap = 0;
7399 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7400 return hwcap;
974c89e0
AH
7401}
7402
7403/* See linux-low.h. */
7404
7405CORE_ADDR
7406linux_get_hwcap2 (int wordsize)
7407{
0570503d
PFC
7408 CORE_ADDR hwcap2 = 0;
7409 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7410 return hwcap2;
974c89e0 7411}
6f69e520 7412
5ef9273d
TBA
7413/* The linux target ops object. */
7414
7415static linux_process_target the_linux_target;
7416
5b6d1e4f 7417static process_stratum_target linux_target_ops = {
59a016f0 7418 hostio_last_error_from_errno,
07e059b5 7419 linux_qxfer_osdata,
4aa995e1 7420 linux_xfer_siginfo,
bd99dc85
PA
7421 linux_supports_non_stop,
7422 linux_async,
7423 linux_start_non_stop,
cdbfd419 7424 linux_supports_multi_process,
89245bc0
DB
7425 linux_supports_fork_events,
7426 linux_supports_vfork_events,
94585166 7427 linux_supports_exec_events,
de0d863e 7428 linux_handle_new_gdb_connection,
cdbfd419 7429#ifdef USE_THREAD_DB
dc146f7c 7430 thread_db_handle_monitor_command,
cdbfd419 7431#else
dc146f7c 7432 NULL,
cdbfd419 7433#endif
d26e3629 7434 linux_common_core_of_thread,
78d85199 7435 linux_read_loadmap,
219f2f23
PA
7436 linux_process_qsupported,
7437 linux_supports_tracepoints,
7438 linux_read_pc,
8336d594
PA
7439 linux_write_pc,
7440 linux_thread_stopped,
7984d532 7441 NULL,
711e434b 7442 linux_pause_all,
7984d532 7443 linux_unpause_all,
fa593d66 7444 linux_stabilize_threads,
6a271cae 7445 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
7446 linux_emit_ops,
7447 linux_supports_disable_randomization,
405f8e94 7448 linux_get_min_fast_tracepoint_insn_len,
2268b414 7449 linux_qxfer_libraries_svr4,
d1feda86 7450 linux_supports_agent,
9accd112 7451#ifdef HAVE_LINUX_BTRACE
0568462b 7452 linux_enable_btrace,
969c39fb 7453 linux_low_disable_btrace,
9accd112 7454 linux_low_read_btrace,
f4abbc16 7455 linux_low_btrace_conf,
9accd112
MM
7456#else
7457 NULL,
7458 NULL,
7459 NULL,
7460 NULL,
9accd112 7461#endif
c2d6af84 7462 linux_supports_range_stepping,
e57f1de3 7463 linux_proc_pid_to_exec_file,
14d2069a
GB
7464 linux_mntns_open_cloexec,
7465 linux_mntns_unlink,
7466 linux_mntns_readlink,
dd373349 7467 linux_breakpoint_kind_from_pc,
79efa585
SM
7468 linux_sw_breakpoint_from_kind,
7469 linux_proc_tid_get_name,
7d00775e 7470 linux_breakpoint_kind_from_current_state,
82075af2
JS
7471 linux_supports_software_single_step,
7472 linux_supports_catch_syscall,
ae91f625 7473 linux_get_ipa_tdesc_idx,
f6327dcb
KB
7474#if USE_THREAD_DB
7475 thread_db_thread_handle,
7476#else
7477 NULL,
7478#endif
5ef9273d 7479 &the_linux_target,
ce3a066d
DJ
7480};
7481
3aee8918
PA
7482#ifdef HAVE_LINUX_REGSETS
7483void
7484initialize_regsets_info (struct regsets_info *info)
7485{
7486 for (info->num_regsets = 0;
7487 info->regsets[info->num_regsets].size >= 0;
7488 info->num_regsets++)
7489 ;
3aee8918
PA
7490}
7491#endif
7492
da6d8c04
DJ
7493void
7494initialize_low (void)
7495{
bd99dc85 7496 struct sigaction sigchld_action;
dd373349 7497
bd99dc85 7498 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 7499 set_target_ops (&linux_target_ops);
dd373349 7500
aa7c7447 7501 linux_ptrace_init_warnings ();
1b919490 7502 linux_proc_init_warnings ();
bd99dc85
PA
7503
7504 sigchld_action.sa_handler = sigchld_handler;
7505 sigemptyset (&sigchld_action.sa_mask);
7506 sigchld_action.sa_flags = SA_RESTART;
7507 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7508
7509 initialize_low_arch ();
89245bc0
DB
7510
7511 linux_check_ptrace_features ();
da6d8c04 7512}
This page took 2.120771 seconds and 4 git commands to generate.