gdbserver/linux-low: turn 'fetch_register' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
b811d2c2 2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
69f4c9cc
AH
74#ifndef AT_HWCAP2
75#define AT_HWCAP2 26
76#endif
77
db0dfaa0
LM
78/* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81#if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84#if defined(__mcoldfire__)
85/* These are still undefined in 3.10 kernels. */
86#define PT_TEXT_ADDR 49*4
87#define PT_DATA_ADDR 50*4
88#define PT_TEXT_END_ADDR 51*4
89/* BFIN already defines these since at least 2.6.32 kernels. */
90#elif defined(BFIN)
91#define PT_TEXT_ADDR 220
92#define PT_TEXT_END_ADDR 224
93#define PT_DATA_ADDR 228
94/* These are still undefined in 3.10 kernels. */
95#elif defined(__TMS320C6X__)
96#define PT_TEXT_ADDR (0x10000*4)
97#define PT_DATA_ADDR (0x10004*4)
98#define PT_TEXT_END_ADDR (0x10008*4)
99#endif
100#endif
101
5203ae1e
TBA
102#if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107#define SUPPORTS_READ_OFFSETS
108#endif
109
9accd112 110#ifdef HAVE_LINUX_BTRACE
125f8a3d 111# include "nat/linux-btrace.h"
268a13a5 112# include "gdbsupport/btrace-common.h"
9accd112
MM
113#endif
114
8365dcf5
TJB
115#ifndef HAVE_ELF32_AUXV_T
116/* Copied from glibc's elf.h. */
117typedef struct
118{
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127} Elf32_auxv_t;
128#endif
129
130#ifndef HAVE_ELF64_AUXV_T
131/* Copied from glibc's elf.h. */
132typedef struct
133{
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142} Elf64_auxv_t;
143#endif
144
ded48a5e
YQ
145/* Does the current host support PTRACE_GETREGSET? */
146int have_ptrace_getregset = -1;
147
cff068da
GB
148/* LWP accessors. */
149
150/* See nat/linux-nat.h. */
151
152ptid_t
153ptid_of_lwp (struct lwp_info *lwp)
154{
155 return ptid_of (get_lwp_thread (lwp));
156}
157
158/* See nat/linux-nat.h. */
159
4b134ca1
GB
160void
161lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163{
164 lwp->arch_private = info;
165}
166
167/* See nat/linux-nat.h. */
168
169struct arch_lwp_info *
170lwp_arch_private_info (struct lwp_info *lwp)
171{
172 return lwp->arch_private;
173}
174
175/* See nat/linux-nat.h. */
176
cff068da
GB
177int
178lwp_is_stopped (struct lwp_info *lwp)
179{
180 return lwp->stopped;
181}
182
183/* See nat/linux-nat.h. */
184
185enum target_stop_reason
186lwp_stop_reason (struct lwp_info *lwp)
187{
188 return lwp->stop_reason;
189}
190
0e00e962
AA
191/* See nat/linux-nat.h. */
192
193int
194lwp_is_stepping (struct lwp_info *lwp)
195{
196 return lwp->stepping;
197}
198
05044653
PA
199/* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
24a09b5f 202
05044653
PA
203struct simple_pid_list
204{
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
216/* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219static void
220add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221{
8d749320 222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228}
229
230static int
231pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232{
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246}
24a09b5f 247
bde24c0a
PA
248enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260/* This is set while stop_all_lwps is in effect. */
261enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
262
263/* FIXME make into a target method? */
24a09b5f 264int using_threads = 1;
24a09b5f 265
fa593d66
PA
266/* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268static int stabilizing_threads;
269
2acc282a 270static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 271 int step, int signal, siginfo_t *info);
f50bf8e5 272static void unsuspend_all_lwps (struct lwp_info *except);
b3312d80 273static struct lwp_info *add_lwp (ptid_t ptid);
95954743 274static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 275static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 276static int finish_step_over (struct lwp_info *lwp);
d50171e4 277static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 278static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 279static int linux_low_ptrace_options (int attached);
ced2dffb 280static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
e2b44075 281static void proceed_one_lwp (thread_info *thread, lwp_info *except);
d50171e4 282
582511be
PA
283/* When the event-loop is doing a step-over, this points at the thread
284 being stepped. */
285ptid_t step_over_bkpt;
286
7d00775e 287/* True if the low target can hardware single-step. */
d50171e4
PA
288
289static int
290can_hardware_single_step (void)
291{
7d00775e
AT
292 if (the_low_target.supports_hardware_single_step != NULL)
293 return the_low_target.supports_hardware_single_step ();
294 else
295 return 0;
296}
297
298/* True if the low target can software single-step. Such targets
fa5308bd 299 implement the GET_NEXT_PCS callback. */
7d00775e
AT
300
301static int
302can_software_single_step (void)
303{
fa5308bd 304 return (the_low_target.get_next_pcs != NULL);
d50171e4
PA
305}
306
307/* True if the low target supports memory breakpoints. If so, we'll
308 have a GET_PC implementation. */
309
310static int
311supports_breakpoints (void)
312{
313 return (the_low_target.get_pc != NULL);
314}
0d62e5e8 315
fa593d66
PA
316/* Returns true if this target can support fast tracepoints. This
317 does not mean that the in-process agent has been loaded in the
318 inferior. */
319
320static int
321supports_fast_tracepoints (void)
322{
323 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
324}
325
c2d6af84
PA
326/* True if LWP is stopped in its stepping range. */
327
328static int
329lwp_in_step_range (struct lwp_info *lwp)
330{
331 CORE_ADDR pc = lwp->stop_pc;
332
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334}
335
0d62e5e8
DJ
336struct pending_signals
337{
338 int signal;
32ca6d61 339 siginfo_t info;
0d62e5e8
DJ
340 struct pending_signals *prev;
341};
611cb4a5 342
bd99dc85
PA
343/* The read/write ends of the pipe registered as waitable file in the
344 event loop. */
345static int linux_event_pipe[2] = { -1, -1 };
346
347/* True if we're currently in async mode. */
348#define target_is_async_p() (linux_event_pipe[0] != -1)
349
02fc4de7 350static void send_sigstop (struct lwp_info *lwp);
bd99dc85 351
d0722149
DE
352/* Return non-zero if HEADER is a 64-bit ELF file. */
353
354static int
214d508e 355elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 356{
214d508e
L
357 if (header->e_ident[EI_MAG0] == ELFMAG0
358 && header->e_ident[EI_MAG1] == ELFMAG1
359 && header->e_ident[EI_MAG2] == ELFMAG2
360 && header->e_ident[EI_MAG3] == ELFMAG3)
361 {
362 *machine = header->e_machine;
363 return header->e_ident[EI_CLASS] == ELFCLASS64;
364
365 }
366 *machine = EM_NONE;
367 return -1;
d0722149
DE
368}
369
370/* Return non-zero if FILE is a 64-bit ELF file,
371 zero if the file is not a 64-bit ELF file,
372 and -1 if the file is not accessible or doesn't exist. */
373
be07f1a2 374static int
214d508e 375elf_64_file_p (const char *file, unsigned int *machine)
d0722149 376{
957f3f49 377 Elf64_Ehdr header;
d0722149
DE
378 int fd;
379
380 fd = open (file, O_RDONLY);
381 if (fd < 0)
382 return -1;
383
384 if (read (fd, &header, sizeof (header)) != sizeof (header))
385 {
386 close (fd);
387 return 0;
388 }
389 close (fd);
390
214d508e 391 return elf_64_header_p (&header, machine);
d0722149
DE
392}
393
be07f1a2
PA
394/* Accepts an integer PID; Returns true if the executable PID is
395 running is a 64-bit ELF file.. */
396
397int
214d508e 398linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 399{
d8d2a3ee 400 char file[PATH_MAX];
be07f1a2
PA
401
402 sprintf (file, "/proc/%d/exe", pid);
214d508e 403 return elf_64_file_p (file, machine);
be07f1a2
PA
404}
405
bd99dc85
PA
406static void
407delete_lwp (struct lwp_info *lwp)
408{
fa96cb38
PA
409 struct thread_info *thr = get_lwp_thread (lwp);
410
411 if (debug_threads)
412 debug_printf ("deleting %ld\n", lwpid_of (thr));
413
414 remove_thread (thr);
466eecee
SM
415
416 if (the_low_target.delete_thread != NULL)
417 the_low_target.delete_thread (lwp->arch_private);
418 else
419 gdb_assert (lwp->arch_private == NULL);
420
bd99dc85
PA
421 free (lwp);
422}
423
95954743
PA
424/* Add a process to the common process list, and set its private
425 data. */
426
427static struct process_info *
428linux_add_process (int pid, int attached)
429{
430 struct process_info *proc;
431
95954743 432 proc = add_process (pid, attached);
8d749320 433 proc->priv = XCNEW (struct process_info_private);
95954743 434
aa5ca48f 435 if (the_low_target.new_process != NULL)
fe978cb0 436 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 437
95954743
PA
438 return proc;
439}
440
582511be
PA
441static CORE_ADDR get_pc (struct lwp_info *lwp);
442
797bcff5
TBA
443void
444linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
445{
446 struct thread_info *saved_thread;
447
448 saved_thread = current_thread;
449 current_thread = thread;
450
797bcff5 451 low_arch_setup ();
94585166
DB
452
453 current_thread = saved_thread;
454}
455
d16f3f6c
TBA
456int
457linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
458 int wstat)
24a09b5f 459{
c12a5089 460 client_state &cs = get_client_state ();
94585166 461 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 462 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 463 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 464 struct lwp_info *new_lwp;
24a09b5f 465
65706a29
PA
466 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
467
82075af2
JS
468 /* All extended events we currently use are mid-syscall. Only
469 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
470 you have to be using PTRACE_SEIZE to get that. */
471 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
472
c269dbdb
DB
473 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
474 || (event == PTRACE_EVENT_CLONE))
24a09b5f 475 {
95954743 476 ptid_t ptid;
24a09b5f 477 unsigned long new_pid;
05044653 478 int ret, status;
24a09b5f 479
de0d863e 480 /* Get the pid of the new lwp. */
d86d4aaf 481 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 482 &new_pid);
24a09b5f
DJ
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 485 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489
97438e3f 490 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
491
492 if (ret == -1)
493 perror_with_name ("waiting for new child");
494 else if (ret != new_pid)
495 warning ("wait returned unexpected PID %d", ret);
da5898ce 496 else if (!WIFSTOPPED (status))
24a09b5f
DJ
497 warning ("wait returned unexpected status 0x%x", status);
498 }
499
c269dbdb 500 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
501 {
502 struct process_info *parent_proc;
503 struct process_info *child_proc;
504 struct lwp_info *child_lwp;
bfacd19d 505 struct thread_info *child_thr;
de0d863e
DB
506 struct target_desc *tdesc;
507
fd79271b 508 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
509
510 if (debug_threads)
511 {
512 debug_printf ("HEW: Got fork event from LWP %ld, "
513 "new child is %d\n",
e38504b3 514 ptid_of (event_thr).lwp (),
e99b03dc 515 ptid.pid ());
de0d863e
DB
516 }
517
518 /* Add the new process to the tables and clone the breakpoint
519 lists of the parent. We need to do this even if the new process
520 will be detached, since we will need the process object and the
521 breakpoints to remove any breakpoints from memory when we
522 detach, and the client side will access registers. */
523 child_proc = linux_add_process (new_pid, 0);
524 gdb_assert (child_proc != NULL);
525 child_lwp = add_lwp (ptid);
526 gdb_assert (child_lwp != NULL);
527 child_lwp->stopped = 1;
bfacd19d
DB
528 child_lwp->must_set_ptrace_flags = 1;
529 child_lwp->status_pending_p = 0;
530 child_thr = get_lwp_thread (child_lwp);
531 child_thr->last_resume_kind = resume_stop;
998d452a
PA
532 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
533
863d01bd 534 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
535 too. If the fork/clone parent is stepping over a breakpoint,
536 all other threads have been suspended already. Leave the
537 child suspended too. */
538 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
539 || event_lwp->bp_reinsert != 0)
863d01bd
PA
540 {
541 if (debug_threads)
542 debug_printf ("HEW: leaving child suspended\n");
543 child_lwp->suspended = 1;
544 }
545
de0d863e
DB
546 parent_proc = get_thread_process (event_thr);
547 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
548
549 if (event_lwp->bp_reinsert != 0
550 && can_software_single_step ()
551 && event == PTRACE_EVENT_VFORK)
552 {
3b9a79ef
YQ
553 /* If we leave single-step breakpoints there, child will
554 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
555 (and child). Once vfork child is done, reinsert
556 them back to parent. */
3b9a79ef 557 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
558 }
559
63c40ec7 560 clone_all_breakpoints (child_thr, event_thr);
de0d863e 561
cc397f3a 562 tdesc = allocate_target_description ();
de0d863e
DB
563 copy_target_description (tdesc, parent_proc->tdesc);
564 child_proc->tdesc = tdesc;
de0d863e 565
3a8a0396
DB
566 /* Clone arch-specific process data. */
567 if (the_low_target.new_fork != NULL)
568 the_low_target.new_fork (parent_proc, child_proc);
569
de0d863e 570 /* Save fork info in the parent thread. */
c269dbdb
DB
571 if (event == PTRACE_EVENT_FORK)
572 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
573 else if (event == PTRACE_EVENT_VFORK)
574 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
575
de0d863e 576 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 577
de0d863e
DB
578 /* The status_pending field contains bits denoting the
579 extended event, so when the pending event is handled,
580 the handler will look at lwp->waitstatus. */
581 event_lwp->status_pending_p = 1;
582 event_lwp->status_pending = wstat;
583
5a04c4cf
PA
584 /* Link the threads until the parent event is passed on to
585 higher layers. */
586 event_lwp->fork_relative = child_lwp;
587 child_lwp->fork_relative = event_lwp;
588
3b9a79ef
YQ
589 /* If the parent thread is doing step-over with single-step
590 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
591 from the parent's. Remove them from the child process.
592 In case of vfork, we'll reinsert them back once vforked
593 child is done. */
8a81c5d7 594 if (event_lwp->bp_reinsert != 0
2e7b624b 595 && can_software_single_step ())
8a81c5d7 596 {
8a81c5d7
YQ
597 /* The child process is forked and stopped, so it is safe
598 to access its memory without stopping all other threads
599 from other processes. */
3b9a79ef 600 delete_single_step_breakpoints (child_thr);
8a81c5d7 601
3b9a79ef
YQ
602 gdb_assert (has_single_step_breakpoints (event_thr));
603 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
604 }
605
de0d863e
DB
606 /* Report the event. */
607 return 0;
608 }
609
fa96cb38
PA
610 if (debug_threads)
611 debug_printf ("HEW: Got clone event "
612 "from LWP %ld, new child is LWP %ld\n",
613 lwpid_of (event_thr), new_pid);
614
fd79271b 615 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 616 new_lwp = add_lwp (ptid);
24a09b5f 617
e27d73f6
DE
618 /* Either we're going to immediately resume the new thread
619 or leave it stopped. linux_resume_one_lwp is a nop if it
620 thinks the thread is currently running, so set this first
621 before calling linux_resume_one_lwp. */
622 new_lwp->stopped = 1;
623
0f8288ae
YQ
624 /* If we're suspending all threads, leave this one suspended
625 too. If the fork/clone parent is stepping over a breakpoint,
626 all other threads have been suspended already. Leave the
627 child suspended too. */
628 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
629 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
630 new_lwp->suspended = 1;
631
da5898ce
DJ
632 /* Normally we will get the pending SIGSTOP. But in some cases
633 we might get another signal delivered to the group first.
f21cc1a2 634 If we do get another signal, be sure not to lose it. */
20ba1ce6 635 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 636 {
54a0b537 637 new_lwp->stop_expected = 1;
20ba1ce6
PA
638 new_lwp->status_pending_p = 1;
639 new_lwp->status_pending = status;
da5898ce 640 }
c12a5089 641 else if (cs.report_thread_events)
65706a29
PA
642 {
643 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
644 new_lwp->status_pending_p = 1;
645 new_lwp->status_pending = status;
646 }
de0d863e 647
a0aad537 648#ifdef USE_THREAD_DB
94c207e0 649 thread_db_notice_clone (event_thr, ptid);
a0aad537 650#endif
86299109 651
de0d863e
DB
652 /* Don't report the event. */
653 return 1;
24a09b5f 654 }
c269dbdb
DB
655 else if (event == PTRACE_EVENT_VFORK_DONE)
656 {
657 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
658
2e7b624b
YQ
659 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
660 {
3b9a79ef 661 reinsert_single_step_breakpoints (event_thr);
2e7b624b 662
3b9a79ef 663 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
664 }
665
c269dbdb
DB
666 /* Report the event. */
667 return 0;
668 }
c12a5089 669 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
670 {
671 struct process_info *proc;
f27866ba 672 std::vector<int> syscalls_to_catch;
94585166
DB
673 ptid_t event_ptid;
674 pid_t event_pid;
675
676 if (debug_threads)
677 {
678 debug_printf ("HEW: Got exec event from LWP %ld\n",
679 lwpid_of (event_thr));
680 }
681
682 /* Get the event ptid. */
683 event_ptid = ptid_of (event_thr);
e99b03dc 684 event_pid = event_ptid.pid ();
94585166 685
82075af2 686 /* Save the syscall list from the execing process. */
94585166 687 proc = get_thread_process (event_thr);
f27866ba 688 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
689
690 /* Delete the execing process and all its threads. */
d16f3f6c 691 mourn (proc);
94585166
DB
692 current_thread = NULL;
693
694 /* Create a new process/lwp/thread. */
695 proc = linux_add_process (event_pid, 0);
696 event_lwp = add_lwp (event_ptid);
697 event_thr = get_lwp_thread (event_lwp);
698 gdb_assert (current_thread == event_thr);
797bcff5 699 arch_setup_thread (event_thr);
94585166
DB
700
701 /* Set the event status. */
702 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
703 event_lwp->waitstatus.value.execd_pathname
704 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
705
706 /* Mark the exec status as pending. */
707 event_lwp->stopped = 1;
708 event_lwp->status_pending_p = 1;
709 event_lwp->status_pending = wstat;
710 event_thr->last_resume_kind = resume_continue;
711 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
712
82075af2
JS
713 /* Update syscall state in the new lwp, effectively mid-syscall too. */
714 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
715
716 /* Restore the list to catch. Don't rely on the client, which is free
717 to avoid sending a new list when the architecture doesn't change.
718 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 719 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 720
94585166
DB
721 /* Report the event. */
722 *orig_event_lwp = event_lwp;
723 return 0;
724 }
de0d863e
DB
725
726 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
727}
728
d50171e4
PA
729/* Return the PC as read from the regcache of LWP, without any
730 adjustment. */
731
732static CORE_ADDR
733get_pc (struct lwp_info *lwp)
734{
0bfdf32f 735 struct thread_info *saved_thread;
d50171e4
PA
736 struct regcache *regcache;
737 CORE_ADDR pc;
738
739 if (the_low_target.get_pc == NULL)
740 return 0;
741
0bfdf32f
GB
742 saved_thread = current_thread;
743 current_thread = get_lwp_thread (lwp);
d50171e4 744
0bfdf32f 745 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
746 pc = (*the_low_target.get_pc) (regcache);
747
748 if (debug_threads)
87ce2a04 749 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 750
0bfdf32f 751 current_thread = saved_thread;
d50171e4
PA
752 return pc;
753}
754
82075af2 755/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 756 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
757
758static void
4cc32bec 759get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
760{
761 struct thread_info *saved_thread;
762 struct regcache *regcache;
763
764 if (the_low_target.get_syscall_trapinfo == NULL)
765 {
766 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 767 system call number. */
82075af2 768 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
769 return;
770 }
771
772 saved_thread = current_thread;
773 current_thread = get_lwp_thread (lwp);
774
775 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 776 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
777
778 if (debug_threads)
4cc32bec 779 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
780
781 current_thread = saved_thread;
782}
783
e7ad2f14 784static int check_stopped_by_watchpoint (struct lwp_info *child);
0d62e5e8 785
e7ad2f14
PA
786/* Called when the LWP stopped for a signal/trap. If it stopped for a
787 trap check what caused it (breakpoint, watchpoint, trace, etc.),
788 and save the result in the LWP's stop_reason field. If it stopped
789 for a breakpoint, decrement the PC if necessary on the lwp's
790 architecture. Returns true if we now have the LWP's stop PC. */
0d62e5e8 791
582511be 792static int
e7ad2f14 793save_stop_reason (struct lwp_info *lwp)
0d62e5e8 794{
582511be
PA
795 CORE_ADDR pc;
796 CORE_ADDR sw_breakpoint_pc;
797 struct thread_info *saved_thread;
3e572f71
PA
798#if USE_SIGTRAP_SIGINFO
799 siginfo_t siginfo;
800#endif
d50171e4
PA
801
802 if (the_low_target.get_pc == NULL)
803 return 0;
0d62e5e8 804
582511be
PA
805 pc = get_pc (lwp);
806 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 807
582511be
PA
808 /* breakpoint_at reads from the current thread. */
809 saved_thread = current_thread;
810 current_thread = get_lwp_thread (lwp);
47c0c975 811
3e572f71
PA
812#if USE_SIGTRAP_SIGINFO
813 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
814 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
815 {
816 if (siginfo.si_signo == SIGTRAP)
817 {
e7ad2f14
PA
818 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
819 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 820 {
e7ad2f14
PA
821 /* The si_code is ambiguous on this arch -- check debug
822 registers. */
823 if (!check_stopped_by_watchpoint (lwp))
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
827 {
828 /* If we determine the LWP stopped for a SW breakpoint,
829 trust it. Particularly don't check watchpoint
830 registers, because at least on s390, we'd find
831 stopped-by-watchpoint as long as there's a watchpoint
832 set. */
3e572f71 833 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 834 }
e7ad2f14 835 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 836 {
e7ad2f14
PA
837 /* This can indicate either a hardware breakpoint or
838 hardware watchpoint. Check debug registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 841 }
2bf6fb9d
PA
842 else if (siginfo.si_code == TRAP_TRACE)
843 {
e7ad2f14
PA
844 /* We may have single stepped an instruction that
845 triggered a watchpoint. In that case, on some
846 architectures (such as x86), instead of TRAP_HWBKPT,
847 si_code indicates TRAP_TRACE, and we need to check
848 the debug registers separately. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 851 }
3e572f71
PA
852 }
853 }
854#else
582511be
PA
855 /* We may have just stepped a breakpoint instruction. E.g., in
856 non-stop mode, GDB first tells the thread A to step a range, and
857 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
858 case we need to report the breakpoint PC. */
859 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be 860 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
e7ad2f14
PA
861 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
862
863 if (hardware_breakpoint_inserted_here (pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
865
866 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
867 check_stopped_by_watchpoint (lwp);
868#endif
869
870 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
871 {
872 if (debug_threads)
873 {
874 struct thread_info *thr = get_lwp_thread (lwp);
875
876 debug_printf ("CSBB: %s stopped by software breakpoint\n",
877 target_pid_to_str (ptid_of (thr)));
878 }
879
880 /* Back up the PC if necessary. */
881 if (pc != sw_breakpoint_pc)
e7ad2f14 882 {
582511be
PA
883 struct regcache *regcache
884 = get_thread_regcache (current_thread, 1);
885 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
886 }
887
e7ad2f14
PA
888 /* Update this so we record the correct stop PC below. */
889 pc = sw_breakpoint_pc;
582511be 890 }
e7ad2f14 891 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
892 {
893 if (debug_threads)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896
897 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
898 target_pid_to_str (ptid_of (thr)));
899 }
e7ad2f14
PA
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 906
e7ad2f14
PA
907 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
582511be 910 }
e7ad2f14
PA
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
582511be 916
e7ad2f14
PA
917 debug_printf ("CSBB: %s stopped by trace\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921
922 lwp->stop_pc = pc;
582511be 923 current_thread = saved_thread;
e7ad2f14 924 return 1;
0d62e5e8 925}
ce3a066d 926
b3312d80 927static struct lwp_info *
95954743 928add_lwp (ptid_t ptid)
611cb4a5 929{
54a0b537 930 struct lwp_info *lwp;
0d62e5e8 931
8d749320 932 lwp = XCNEW (struct lwp_info);
00db26fa
PA
933
934 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 935
754e3168
AH
936 lwp->thread = add_thread (ptid, lwp);
937
aa5ca48f 938 if (the_low_target.new_thread != NULL)
34c703da 939 the_low_target.new_thread (lwp);
aa5ca48f 940
54a0b537 941 return lwp;
0d62e5e8 942}
611cb4a5 943
2090129c
SDJ
944/* Callback to be used when calling fork_inferior, responsible for
945 actually initiating the tracing of the inferior. */
946
947static void
948linux_ptrace_fun ()
949{
950 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
951 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 952 trace_start_error_with_name ("ptrace");
2090129c
SDJ
953
954 if (setpgid (0, 0) < 0)
955 trace_start_error_with_name ("setpgid");
956
957 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
958 stdout to stderr so that inferior i/o doesn't corrupt the connection.
959 Also, redirect stdin to /dev/null. */
960 if (remote_connection_is_stdio ())
961 {
962 if (close (0) < 0)
963 trace_start_error_with_name ("close");
964 if (open ("/dev/null", O_RDONLY) < 0)
965 trace_start_error_with_name ("open");
966 if (dup2 (2, 1) < 0)
967 trace_start_error_with_name ("dup2");
968 if (write (2, "stdin/stdout redirected\n",
969 sizeof ("stdin/stdout redirected\n") - 1) < 0)
970 {
971 /* Errors ignored. */;
972 }
973 }
974}
975
da6d8c04 976/* Start an inferior process and returns its pid.
2090129c
SDJ
977 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
978 are its arguments. */
da6d8c04 979
15295543
TBA
980int
981linux_process_target::create_inferior (const char *program,
982 const std::vector<char *> &program_args)
da6d8c04 983{
c12a5089 984 client_state &cs = get_client_state ();
a6dbe5df 985 struct lwp_info *new_lwp;
da6d8c04 986 int pid;
95954743 987 ptid_t ptid;
03583c20 988
41272101
TT
989 {
990 maybe_disable_address_space_randomization restore_personality
c12a5089 991 (cs.disable_randomization);
41272101
TT
992 std::string str_program_args = stringify_argv (program_args);
993
994 pid = fork_inferior (program,
995 str_program_args.c_str (),
996 get_environ ()->envp (), linux_ptrace_fun,
997 NULL, NULL, NULL, NULL);
998 }
03583c20 999
55d7b841 1000 linux_add_process (pid, 0);
95954743 1001
fd79271b 1002 ptid = ptid_t (pid, pid, 0);
95954743 1003 new_lwp = add_lwp (ptid);
a6dbe5df 1004 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1005
2090129c
SDJ
1006 post_fork_inferior (pid, program);
1007
a9fa9f7d 1008 return pid;
da6d8c04
DJ
1009}
1010
ece66d65
JS
1011/* Implement the post_create_inferior target_ops method. */
1012
6dee9afb
TBA
1013void
1014linux_process_target::post_create_inferior ()
ece66d65
JS
1015{
1016 struct lwp_info *lwp = get_thread_lwp (current_thread);
1017
797bcff5 1018 low_arch_setup ();
ece66d65
JS
1019
1020 if (lwp->must_set_ptrace_flags)
1021 {
1022 struct process_info *proc = current_process ();
1023 int options = linux_low_ptrace_options (proc->attached);
1024
1025 linux_enable_event_reporting (lwpid_of (current_thread), options);
1026 lwp->must_set_ptrace_flags = 0;
1027 }
1028}
1029
8784d563
PA
1030/* Attach to an inferior process. Returns 0 on success, ERRNO on
1031 error. */
da6d8c04 1032
7ae1a6a6
PA
1033int
1034linux_attach_lwp (ptid_t ptid)
da6d8c04 1035{
54a0b537 1036 struct lwp_info *new_lwp;
e38504b3 1037 int lwpid = ptid.lwp ();
611cb4a5 1038
b8e1b30e 1039 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1040 != 0)
7ae1a6a6 1041 return errno;
24a09b5f 1042
b3312d80 1043 new_lwp = add_lwp (ptid);
0d62e5e8 1044
a6dbe5df
PA
1045 /* We need to wait for SIGSTOP before being able to make the next
1046 ptrace call on this LWP. */
1047 new_lwp->must_set_ptrace_flags = 1;
1048
644cebc9 1049 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1050 {
1051 if (debug_threads)
87ce2a04 1052 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1053
1054 /* The process is definitely stopped. It is in a job control
1055 stop, unless the kernel predates the TASK_STOPPED /
1056 TASK_TRACED distinction, in which case it might be in a
1057 ptrace stop. Make sure it is in a ptrace stop; from there we
1058 can kill it, signal it, et cetera.
1059
1060 First make sure there is a pending SIGSTOP. Since we are
1061 already attached, the process can not transition from stopped
1062 to running without a PTRACE_CONT; so we know this signal will
1063 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1064 probably already in the queue (unless this kernel is old
1065 enough to use TASK_STOPPED for ptrace stops); but since
1066 SIGSTOP is not an RT signal, it can only be queued once. */
1067 kill_lwp (lwpid, SIGSTOP);
1068
1069 /* Finally, resume the stopped process. This will deliver the
1070 SIGSTOP (or a higher priority signal, just like normal
1071 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1072 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1073 }
1074
0d62e5e8 1075 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1076 brings it to a halt.
1077
1078 There are several cases to consider here:
1079
1080 1) gdbserver has already attached to the process and is being notified
1b3f6016 1081 of a new thread that is being created.
d50171e4
PA
1082 In this case we should ignore that SIGSTOP and resume the
1083 process. This is handled below by setting stop_expected = 1,
8336d594 1084 and the fact that add_thread sets last_resume_kind ==
d50171e4 1085 resume_continue.
0e21c1ec
DE
1086
1087 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1088 to it via attach_inferior.
1089 In this case we want the process thread to stop.
d50171e4
PA
1090 This is handled by having linux_attach set last_resume_kind ==
1091 resume_stop after we return.
e3deef73
LM
1092
1093 If the pid we are attaching to is also the tgid, we attach to and
1094 stop all the existing threads. Otherwise, we attach to pid and
1095 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1096
1097 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1098 existing threads.
1099 In this case we want the thread to stop.
1100 FIXME: This case is currently not properly handled.
1101 We should wait for the SIGSTOP but don't. Things work apparently
1102 because enough time passes between when we ptrace (ATTACH) and when
1103 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1104
1105 On the other hand, if we are currently trying to stop all threads, we
1106 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1107 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1108 end of the list, and so the new thread has not yet reached
1109 wait_for_sigstop (but will). */
d50171e4 1110 new_lwp->stop_expected = 1;
0d62e5e8 1111
7ae1a6a6 1112 return 0;
95954743
PA
1113}
1114
8784d563
PA
1115/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1116 already attached. Returns true if a new LWP is found, false
1117 otherwise. */
1118
1119static int
1120attach_proc_task_lwp_callback (ptid_t ptid)
1121{
1122 /* Is this a new thread? */
1123 if (find_thread_ptid (ptid) == NULL)
1124 {
e38504b3 1125 int lwpid = ptid.lwp ();
8784d563
PA
1126 int err;
1127
1128 if (debug_threads)
1129 debug_printf ("Found new lwp %d\n", lwpid);
1130
1131 err = linux_attach_lwp (ptid);
1132
1133 /* Be quiet if we simply raced with the thread exiting. EPERM
1134 is returned if the thread's task still exists, and is marked
1135 as exited or zombie, as well as other conditions, so in that
1136 case, confirm the status in /proc/PID/status. */
1137 if (err == ESRCH
1138 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1139 {
1140 if (debug_threads)
1141 {
1142 debug_printf ("Cannot attach to lwp %d: "
1143 "thread is gone (%d: %s)\n",
6d91ce9a 1144 lwpid, err, safe_strerror (err));
8784d563
PA
1145 }
1146 }
1147 else if (err != 0)
1148 {
4d9b86e1 1149 std::string reason
50fa3001 1150 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1151
1152 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1153 }
1154
1155 return 1;
1156 }
1157 return 0;
1158}
1159
500c1d85
PA
1160static void async_file_mark (void);
1161
e3deef73
LM
1162/* Attach to PID. If PID is the tgid, attach to it and all
1163 of its threads. */
1164
ef03dad8
TBA
1165int
1166linux_process_target::attach (unsigned long pid)
0d62e5e8 1167{
500c1d85
PA
1168 struct process_info *proc;
1169 struct thread_info *initial_thread;
fd79271b 1170 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1171 int err;
1172
df0da8a2
AH
1173 proc = linux_add_process (pid, 1);
1174
e3deef73
LM
1175 /* Attach to PID. We will check for other threads
1176 soon. */
7ae1a6a6
PA
1177 err = linux_attach_lwp (ptid);
1178 if (err != 0)
4d9b86e1 1179 {
df0da8a2 1180 remove_process (proc);
4d9b86e1 1181
50fa3001
SDJ
1182 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1183 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1184 }
7ae1a6a6 1185
500c1d85
PA
1186 /* Don't ignore the initial SIGSTOP if we just attached to this
1187 process. It will be collected by wait shortly. */
fd79271b 1188 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1189 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1190
8784d563
PA
1191 /* We must attach to every LWP. If /proc is mounted, use that to
1192 find them now. On the one hand, the inferior may be using raw
1193 clone instead of using pthreads. On the other hand, even if it
1194 is using pthreads, GDB may not be connected yet (thread_db needs
1195 to do symbol lookups, through qSymbol). Also, thread_db walks
1196 structures in the inferior's address space to find the list of
1197 threads/LWPs, and those structures may well be corrupted. Note
1198 that once thread_db is loaded, we'll still use it to list threads
1199 and associate pthread info with each LWP. */
1200 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1201
1202 /* GDB will shortly read the xml target description for this
1203 process, to figure out the process' architecture. But the target
1204 description is only filled in when the first process/thread in
1205 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1206 that now, otherwise, if GDB is fast enough, it could read the
1207 target description _before_ that initial stop. */
1208 if (non_stop)
1209 {
1210 struct lwp_info *lwp;
1211 int wstat, lwpid;
f2907e49 1212 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1213
d16f3f6c 1214 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1215 gdb_assert (lwpid > 0);
1216
f2907e49 1217 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1218
1219 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1220 {
1221 lwp->status_pending_p = 1;
1222 lwp->status_pending = wstat;
1223 }
1224
1225 initial_thread->last_resume_kind = resume_continue;
1226
1227 async_file_mark ();
1228
1229 gdb_assert (proc->tdesc != NULL);
1230 }
1231
95954743
PA
1232 return 0;
1233}
1234
95954743 1235static int
e4eb0dec 1236last_thread_of_process_p (int pid)
95954743 1237{
e4eb0dec 1238 bool seen_one = false;
95954743 1239
da4ae14a 1240 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1241 {
e4eb0dec
SM
1242 if (!seen_one)
1243 {
1244 /* This is the first thread of this process we see. */
1245 seen_one = true;
1246 return false;
1247 }
1248 else
1249 {
1250 /* This is the second thread of this process we see. */
1251 return true;
1252 }
1253 });
da6d8c04 1254
e4eb0dec 1255 return thread == NULL;
95954743
PA
1256}
1257
da84f473
PA
1258/* Kill LWP. */
1259
1260static void
1261linux_kill_one_lwp (struct lwp_info *lwp)
1262{
d86d4aaf
DE
1263 struct thread_info *thr = get_lwp_thread (lwp);
1264 int pid = lwpid_of (thr);
da84f473
PA
1265
1266 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1267 there is no signal context, and ptrace(PTRACE_KILL) (or
1268 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1269 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1270 alternative is to kill with SIGKILL. We only need one SIGKILL
1271 per process, not one for each thread. But since we still support
4a6ed09b
PA
1272 support debugging programs using raw clone without CLONE_THREAD,
1273 we send one for each thread. For years, we used PTRACE_KILL
1274 only, so we're being a bit paranoid about some old kernels where
1275 PTRACE_KILL might work better (dubious if there are any such, but
1276 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1277 second, and so we're fine everywhere. */
da84f473
PA
1278
1279 errno = 0;
69ff6be5 1280 kill_lwp (pid, SIGKILL);
da84f473 1281 if (debug_threads)
ce9e3fe7
PA
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1287 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1288 }
da84f473
PA
1289
1290 errno = 0;
b8e1b30e 1291 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1292 if (debug_threads)
ce9e3fe7
PA
1293 {
1294 int save_errno = errno;
1295
1296 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1297 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1298 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1299 }
da84f473
PA
1300}
1301
e76126e8
PA
1302/* Kill LWP and wait for it to die. */
1303
1304static void
1305kill_wait_lwp (struct lwp_info *lwp)
1306{
1307 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1308 int pid = ptid_of (thr).pid ();
e38504b3 1309 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1310 int wstat;
1311 int res;
1312
1313 if (debug_threads)
1314 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1315
1316 do
1317 {
1318 linux_kill_one_lwp (lwp);
1319
1320 /* Make sure it died. Notes:
1321
1322 - The loop is most likely unnecessary.
1323
d16f3f6c 1324 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1325 while we're iterating over them. We're not interested in
1326 any pending status at this point, only in making sure all
1327 wait status on the kernel side are collected until the
1328 process is reaped.
1329
1330 - We don't use __WALL here as the __WALL emulation relies on
1331 SIGCHLD, and killing a stopped process doesn't generate
1332 one, nor an exit status.
1333 */
1334 res = my_waitpid (lwpid, &wstat, 0);
1335 if (res == -1 && errno == ECHILD)
1336 res = my_waitpid (lwpid, &wstat, __WCLONE);
1337 } while (res > 0 && WIFSTOPPED (wstat));
1338
586b02a9
PA
1339 /* Even if it was stopped, the child may have already disappeared.
1340 E.g., if it was killed by SIGKILL. */
1341 if (res < 0 && errno != ECHILD)
1342 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1343}
1344
578290ec 1345/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1346 except the leader. */
95954743 1347
578290ec
SM
1348static void
1349kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1350{
54a0b537 1351 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1352
fd500816
DJ
1353 /* We avoid killing the first thread here, because of a Linux kernel (at
1354 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1355 the children get a chance to be reaped, it will remain a zombie
1356 forever. */
95954743 1357
d86d4aaf 1358 if (lwpid_of (thread) == pid)
95954743
PA
1359 {
1360 if (debug_threads)
87ce2a04 1361 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1362 target_pid_to_str (thread->id));
578290ec 1363 return;
95954743 1364 }
fd500816 1365
e76126e8 1366 kill_wait_lwp (lwp);
da6d8c04
DJ
1367}
1368
c6885a57
TBA
1369int
1370linux_process_target::kill (process_info *process)
0d62e5e8 1371{
a780ef4f 1372 int pid = process->pid;
9d606399 1373
f9e39928
PA
1374 /* If we're killing a running inferior, make sure it is stopped
1375 first, as PTRACE_KILL will not work otherwise. */
7984d532 1376 stop_all_lwps (0, NULL);
f9e39928 1377
578290ec
SM
1378 for_each_thread (pid, [&] (thread_info *thread)
1379 {
1380 kill_one_lwp_callback (thread, pid);
1381 });
fd500816 1382
54a0b537 1383 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1384 thread in the list, so do so now. */
a780ef4f 1385 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1386
784867a5 1387 if (lwp == NULL)
fd500816 1388 {
784867a5 1389 if (debug_threads)
d86d4aaf
DE
1390 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1391 pid);
784867a5
JK
1392 }
1393 else
e76126e8 1394 kill_wait_lwp (lwp);
2d717e4f 1395
8adb37b9 1396 mourn (process);
f9e39928
PA
1397
1398 /* Since we presently can only stop all lwps of all processes, we
1399 need to unstop lwps of other processes. */
7984d532 1400 unstop_all_lwps (0, NULL);
95954743 1401 return 0;
0d62e5e8
DJ
1402}
1403
9b224c5e
PA
1404/* Get pending signal of THREAD, for detaching purposes. This is the
1405 signal the thread last stopped for, which we need to deliver to the
1406 thread when detaching, otherwise, it'd be suppressed/lost. */
1407
1408static int
1409get_detach_signal (struct thread_info *thread)
1410{
c12a5089 1411 client_state &cs = get_client_state ();
a493e3e2 1412 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1413 int status;
1414 struct lwp_info *lp = get_thread_lwp (thread);
1415
1416 if (lp->status_pending_p)
1417 status = lp->status_pending;
1418 else
1419 {
1420 /* If the thread had been suspended by gdbserver, and it stopped
1421 cleanly, then it'll have stopped with SIGSTOP. But we don't
1422 want to deliver that SIGSTOP. */
1423 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1424 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1425 return 0;
1426
1427 /* Otherwise, we may need to deliver the signal we
1428 intercepted. */
1429 status = lp->last_status;
1430 }
1431
1432 if (!WIFSTOPPED (status))
1433 {
1434 if (debug_threads)
87ce2a04 1435 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1436 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1437 return 0;
1438 }
1439
1440 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1441 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1442 {
1443 if (debug_threads)
87ce2a04
DE
1444 debug_printf ("GPS: lwp %s had stopped with extended "
1445 "status: no pending signal\n",
d86d4aaf 1446 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1447 return 0;
1448 }
1449
2ea28649 1450 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1451
c12a5089 1452 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1453 {
1454 if (debug_threads)
87ce2a04 1455 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1456 target_pid_to_str (ptid_of (thread)),
87ce2a04 1457 gdb_signal_to_string (signo));
9b224c5e
PA
1458 return 0;
1459 }
c12a5089 1460 else if (!cs.program_signals_p
9b224c5e
PA
1461 /* If we have no way to know which signals GDB does not
1462 want to have passed to the program, assume
1463 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1464 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1465 {
1466 if (debug_threads)
87ce2a04
DE
1467 debug_printf ("GPS: lwp %s had signal %s, "
1468 "but we don't know if we should pass it. "
1469 "Default to not.\n",
d86d4aaf 1470 target_pid_to_str (ptid_of (thread)),
87ce2a04 1471 gdb_signal_to_string (signo));
9b224c5e
PA
1472 return 0;
1473 }
1474 else
1475 {
1476 if (debug_threads)
87ce2a04 1477 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1478 target_pid_to_str (ptid_of (thread)),
87ce2a04 1479 gdb_signal_to_string (signo));
9b224c5e
PA
1480
1481 return WSTOPSIG (status);
1482 }
1483}
1484
ced2dffb
PA
1485/* Detach from LWP. */
1486
1487static void
1488linux_detach_one_lwp (struct lwp_info *lwp)
6ad8ae5c 1489{
ced2dffb 1490 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1491 int sig;
ced2dffb 1492 int lwpid;
6ad8ae5c 1493
9b224c5e 1494 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1495 if (lwp->stop_expected)
ae13219e 1496 {
9b224c5e 1497 if (debug_threads)
87ce2a04 1498 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1499 target_pid_to_str (ptid_of (thread)));
9b224c5e 1500
d86d4aaf 1501 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1502 lwp->stop_expected = 0;
ae13219e
DJ
1503 }
1504
9b224c5e
PA
1505 /* Pass on any pending signal for this thread. */
1506 sig = get_detach_signal (thread);
1507
ced2dffb
PA
1508 /* Preparing to resume may try to write registers, and fail if the
1509 lwp is zombie. If that happens, ignore the error. We'll handle
1510 it below, when detach fails with ESRCH. */
a70b8144 1511 try
ced2dffb
PA
1512 {
1513 /* Flush any pending changes to the process's registers. */
1514 regcache_invalidate_thread (thread);
1515
1516 /* Finally, let it resume. */
1517 if (the_low_target.prepare_to_resume != NULL)
1518 the_low_target.prepare_to_resume (lwp);
1519 }
230d2906 1520 catch (const gdb_exception_error &ex)
ced2dffb
PA
1521 {
1522 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1523 throw;
ced2dffb 1524 }
ced2dffb
PA
1525
1526 lwpid = lwpid_of (thread);
1527 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1528 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1529 {
1530 int save_errno = errno;
1531
1532 /* We know the thread exists, so ESRCH must mean the lwp is
1533 zombie. This can happen if one of the already-detached
1534 threads exits the whole thread group. In that case we're
1535 still attached, and must reap the lwp. */
1536 if (save_errno == ESRCH)
1537 {
1538 int ret, status;
1539
1540 ret = my_waitpid (lwpid, &status, __WALL);
1541 if (ret == -1)
1542 {
1543 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1544 lwpid, safe_strerror (errno));
ced2dffb
PA
1545 }
1546 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1547 {
1548 warning (_("Reaping LWP %d while detaching "
1549 "returned unexpected status 0x%x"),
1550 lwpid, status);
1551 }
1552 }
1553 else
1554 {
1555 error (_("Can't detach %s: %s"),
1556 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1557 safe_strerror (save_errno));
ced2dffb
PA
1558 }
1559 }
1560 else if (debug_threads)
1561 {
1562 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1563 target_pid_to_str (ptid_of (thread)),
1564 strsignal (sig));
1565 }
bd99dc85
PA
1566
1567 delete_lwp (lwp);
ced2dffb
PA
1568}
1569
798a38e8 1570/* Callback for for_each_thread. Detaches from non-leader threads of a
ced2dffb
PA
1571 given process. */
1572
798a38e8
SM
1573static void
1574linux_detach_lwp_callback (thread_info *thread)
ced2dffb 1575{
ced2dffb
PA
1576 /* We don't actually detach from the thread group leader just yet.
1577 If the thread group exits, we must reap the zombie clone lwps
1578 before we're able to reap the leader. */
798a38e8
SM
1579 if (thread->id.pid () == thread->id.lwp ())
1580 return;
ced2dffb 1581
798a38e8 1582 lwp_info *lwp = get_thread_lwp (thread);
ced2dffb 1583 linux_detach_one_lwp (lwp);
6ad8ae5c
DJ
1584}
1585
9061c9cf
TBA
1586int
1587linux_process_target::detach (process_info *process)
95954743 1588{
ced2dffb 1589 struct lwp_info *main_lwp;
95954743 1590
863d01bd
PA
1591 /* As there's a step over already in progress, let it finish first,
1592 otherwise nesting a stabilize_threads operation on top gets real
1593 messy. */
1594 complete_ongoing_step_over ();
1595
f9e39928 1596 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1597 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1598 may need to uninstall thread event breakpoints from memory, which
1599 only works with a stopped process anyway. */
7984d532 1600 stop_all_lwps (0, NULL);
f9e39928 1601
ca5c370d 1602#ifdef USE_THREAD_DB
8336d594 1603 thread_db_detach (process);
ca5c370d
PA
1604#endif
1605
fa593d66 1606 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1607 target_stabilize_threads ();
fa593d66 1608
ced2dffb
PA
1609 /* Detach from the clone lwps first. If the thread group exits just
1610 while we're detaching, we must reap the clone lwps before we're
1611 able to reap the leader. */
ef2ddb33 1612 for_each_thread (process->pid, linux_detach_lwp_callback);
ced2dffb 1613
ef2ddb33 1614 main_lwp = find_lwp_pid (ptid_t (process->pid));
ced2dffb 1615 linux_detach_one_lwp (main_lwp);
8336d594 1616
8adb37b9 1617 mourn (process);
f9e39928
PA
1618
1619 /* Since we presently can only stop all lwps of all processes, we
1620 need to unstop lwps of other processes. */
7984d532 1621 unstop_all_lwps (0, NULL);
f9e39928
PA
1622 return 0;
1623}
1624
1625/* Remove all LWPs that belong to process PROC from the lwp list. */
1626
8adb37b9
TBA
1627void
1628linux_process_target::mourn (process_info *process)
8336d594
PA
1629{
1630 struct process_info_private *priv;
1631
1632#ifdef USE_THREAD_DB
1633 thread_db_mourn (process);
1634#endif
1635
6b2a85da
SM
1636 for_each_thread (process->pid, [] (thread_info *thread)
1637 {
1638 delete_lwp (get_thread_lwp (thread));
1639 });
f9e39928 1640
8336d594 1641 /* Freeing all private data. */
fe978cb0 1642 priv = process->priv;
04ec7890
SM
1643 if (the_low_target.delete_process != NULL)
1644 the_low_target.delete_process (priv->arch_private);
1645 else
1646 gdb_assert (priv->arch_private == NULL);
8336d594 1647 free (priv);
fe978cb0 1648 process->priv = NULL;
505106cd
PA
1649
1650 remove_process (process);
8336d594
PA
1651}
1652
95a49a39
TBA
1653void
1654linux_process_target::join (int pid)
444d6139 1655{
444d6139
PA
1656 int status, ret;
1657
1658 do {
d105de22 1659 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1660 if (WIFEXITED (status) || WIFSIGNALED (status))
1661 break;
1662 } while (ret != -1 || errno != ECHILD);
1663}
1664
13d3d99b
TBA
1665/* Return true if the given thread is still alive. */
1666
1667bool
1668linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1669{
95954743
PA
1670 struct lwp_info *lwp = find_lwp_pid (ptid);
1671
1672 /* We assume we always know if a thread exits. If a whole process
1673 exited but we still haven't been able to report it to GDB, we'll
1674 hold on to the last lwp of the dead process. */
1675 if (lwp != NULL)
00db26fa 1676 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1677 else
1678 return 0;
1679}
1680
582511be
PA
1681/* Return 1 if this lwp still has an interesting status pending. If
1682 not (e.g., it had stopped for a breakpoint that is gone), return
1683 false. */
1684
1685static int
1686thread_still_has_status_pending_p (struct thread_info *thread)
1687{
1688 struct lwp_info *lp = get_thread_lwp (thread);
1689
1690 if (!lp->status_pending_p)
1691 return 0;
1692
582511be 1693 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1694 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1695 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1696 {
1697 struct thread_info *saved_thread;
1698 CORE_ADDR pc;
1699 int discard = 0;
1700
1701 gdb_assert (lp->last_status != 0);
1702
1703 pc = get_pc (lp);
1704
1705 saved_thread = current_thread;
1706 current_thread = thread;
1707
1708 if (pc != lp->stop_pc)
1709 {
1710 if (debug_threads)
1711 debug_printf ("PC of %ld changed\n",
1712 lwpid_of (thread));
1713 discard = 1;
1714 }
3e572f71
PA
1715
1716#if !USE_SIGTRAP_SIGINFO
15c66dd6 1717 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1718 && !(*the_low_target.breakpoint_at) (pc))
1719 {
1720 if (debug_threads)
1721 debug_printf ("previous SW breakpoint of %ld gone\n",
1722 lwpid_of (thread));
1723 discard = 1;
1724 }
15c66dd6 1725 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1726 && !hardware_breakpoint_inserted_here (pc))
1727 {
1728 if (debug_threads)
1729 debug_printf ("previous HW breakpoint of %ld gone\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
3e572f71 1733#endif
582511be
PA
1734
1735 current_thread = saved_thread;
1736
1737 if (discard)
1738 {
1739 if (debug_threads)
1740 debug_printf ("discarding pending breakpoint status\n");
1741 lp->status_pending_p = 0;
1742 return 0;
1743 }
1744 }
1745
1746 return 1;
1747}
1748
a681f9c9
PA
1749/* Returns true if LWP is resumed from the client's perspective. */
1750
1751static int
1752lwp_resumed (struct lwp_info *lwp)
1753{
1754 struct thread_info *thread = get_lwp_thread (lwp);
1755
1756 if (thread->last_resume_kind != resume_stop)
1757 return 1;
1758
1759 /* Did gdb send us a `vCont;t', but we haven't reported the
1760 corresponding stop to gdb yet? If so, the thread is still
1761 resumed/running from gdb's perspective. */
1762 if (thread->last_resume_kind == resume_stop
1763 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1764 return 1;
1765
1766 return 0;
1767}
1768
83e1b6c1
SM
1769/* Return true if this lwp has an interesting status pending. */
1770static bool
1771status_pending_p_callback (thread_info *thread, ptid_t ptid)
0d62e5e8 1772{
582511be 1773 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1774
1775 /* Check if we're only interested in events from a specific process
afa8d396 1776 or a specific LWP. */
83e1b6c1 1777 if (!thread->id.matches (ptid))
95954743 1778 return 0;
0d62e5e8 1779
a681f9c9
PA
1780 if (!lwp_resumed (lp))
1781 return 0;
1782
582511be
PA
1783 if (lp->status_pending_p
1784 && !thread_still_has_status_pending_p (thread))
1785 {
1786 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1787 return 0;
1788 }
0d62e5e8 1789
582511be 1790 return lp->status_pending_p;
0d62e5e8
DJ
1791}
1792
95954743
PA
1793struct lwp_info *
1794find_lwp_pid (ptid_t ptid)
1795{
da4ae14a 1796 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1797 {
1798 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1799 return thr_arg->id.lwp () == lwp;
454296a2 1800 });
d86d4aaf
DE
1801
1802 if (thread == NULL)
1803 return NULL;
1804
9c80ecd6 1805 return get_thread_lwp (thread);
95954743
PA
1806}
1807
fa96cb38 1808/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1809
fa96cb38
PA
1810static int
1811num_lwps (int pid)
1812{
fa96cb38 1813 int count = 0;
0d62e5e8 1814
4d3bb80e
SM
1815 for_each_thread (pid, [&] (thread_info *thread)
1816 {
9c80ecd6 1817 count++;
4d3bb80e 1818 });
3aee8918 1819
fa96cb38
PA
1820 return count;
1821}
d61ddec4 1822
6d4ee8c6
GB
1823/* See nat/linux-nat.h. */
1824
1825struct lwp_info *
1826iterate_over_lwps (ptid_t filter,
d3a70e03 1827 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1828{
da4ae14a 1829 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1830 {
da4ae14a 1831 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1832
d3a70e03 1833 return callback (lwp);
6d1e5673 1834 });
6d4ee8c6 1835
9c80ecd6 1836 if (thread == NULL)
6d4ee8c6
GB
1837 return NULL;
1838
9c80ecd6 1839 return get_thread_lwp (thread);
6d4ee8c6
GB
1840}
1841
fa96cb38
PA
1842/* Detect zombie thread group leaders, and "exit" them. We can't reap
1843 their exits until all other threads in the group have exited. */
c3adc08c 1844
fa96cb38
PA
1845static void
1846check_zombie_leaders (void)
1847{
9179355e
SM
1848 for_each_process ([] (process_info *proc) {
1849 pid_t leader_pid = pid_of (proc);
1850 struct lwp_info *leader_lp;
1851
f2907e49 1852 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1853
1854 if (debug_threads)
1855 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1856 "num_lwps=%d, zombie=%d\n",
1857 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1858 linux_proc_pid_is_zombie (leader_pid));
1859
1860 if (leader_lp != NULL && !leader_lp->stopped
1861 /* Check if there are other threads in the group, as we may
1862 have raced with the inferior simply exiting. */
1863 && !last_thread_of_process_p (leader_pid)
1864 && linux_proc_pid_is_zombie (leader_pid))
1865 {
1866 /* A leader zombie can mean one of two things:
1867
1868 - It exited, and there's an exit status pending
1869 available, or only the leader exited (not the whole
1870 program). In the latter case, we can't waitpid the
1871 leader's exit status until all other threads are gone.
1872
1873 - There are 3 or more threads in the group, and a thread
1874 other than the leader exec'd. On an exec, the Linux
1875 kernel destroys all other threads (except the execing
1876 one) in the thread group, and resets the execing thread's
1877 tid to the tgid. No exit notification is sent for the
1878 execing thread -- from the ptracer's perspective, it
1879 appears as though the execing thread just vanishes.
1880 Until we reap all other threads except the leader and the
1881 execing thread, the leader will be zombie, and the
1882 execing thread will be in `D (disc sleep)'. As soon as
1883 all other threads are reaped, the execing thread changes
1884 it's tid to the tgid, and the previous (zombie) leader
1885 vanishes, giving place to the "new" leader. We could try
1886 distinguishing the exit and exec cases, by waiting once
1887 more, and seeing if something comes out, but it doesn't
1888 sound useful. The previous leader _does_ go away, and
1889 we'll re-add the new one once we see the exec event
1890 (which is just the same as what would happen if the
1891 previous leader did exit voluntarily before some other
1892 thread execs). */
1893
1894 if (debug_threads)
1895 debug_printf ("CZL: Thread group leader %d zombie "
1896 "(it exited, or another thread execd).\n",
1897 leader_pid);
1898
1899 delete_lwp (leader_lp);
1900 }
1901 });
fa96cb38 1902}
c3adc08c 1903
a1385b7b
SM
1904/* Callback for `find_thread'. Returns the first LWP that is not
1905 stopped. */
d50171e4 1906
a1385b7b
SM
1907static bool
1908not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1909{
a1385b7b
SM
1910 if (!thread->id.matches (filter))
1911 return false;
47c0c975 1912
a1385b7b 1913 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1914
a1385b7b 1915 return !lwp->stopped;
0d62e5e8 1916}
611cb4a5 1917
863d01bd
PA
1918/* Increment LWP's suspend count. */
1919
1920static void
1921lwp_suspended_inc (struct lwp_info *lwp)
1922{
1923 lwp->suspended++;
1924
1925 if (debug_threads && lwp->suspended > 4)
1926 {
1927 struct thread_info *thread = get_lwp_thread (lwp);
1928
1929 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1930 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1931 }
1932}
1933
1934/* Decrement LWP's suspend count. */
1935
1936static void
1937lwp_suspended_decr (struct lwp_info *lwp)
1938{
1939 lwp->suspended--;
1940
1941 if (lwp->suspended < 0)
1942 {
1943 struct thread_info *thread = get_lwp_thread (lwp);
1944
1945 internal_error (__FILE__, __LINE__,
1946 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1947 lwp->suspended);
1948 }
1949}
1950
219f2f23
PA
1951/* This function should only be called if the LWP got a SIGTRAP.
1952
1953 Handle any tracepoint steps or hits. Return true if a tracepoint
1954 event was handled, 0 otherwise. */
1955
1956static int
1957handle_tracepoints (struct lwp_info *lwp)
1958{
1959 struct thread_info *tinfo = get_lwp_thread (lwp);
1960 int tpoint_related_event = 0;
1961
582511be
PA
1962 gdb_assert (lwp->suspended == 0);
1963
7984d532
PA
1964 /* If this tracepoint hit causes a tracing stop, we'll immediately
1965 uninsert tracepoints. To do this, we temporarily pause all
1966 threads, unpatch away, and then unpause threads. We need to make
1967 sure the unpausing doesn't resume LWP too. */
863d01bd 1968 lwp_suspended_inc (lwp);
7984d532 1969
219f2f23
PA
1970 /* And we need to be sure that any all-threads-stopping doesn't try
1971 to move threads out of the jump pads, as it could deadlock the
1972 inferior (LWP could be in the jump pad, maybe even holding the
1973 lock.) */
1974
1975 /* Do any necessary step collect actions. */
1976 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1977
fa593d66
PA
1978 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1979
219f2f23
PA
1980 /* See if we just hit a tracepoint and do its main collect
1981 actions. */
1982 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1983
863d01bd 1984 lwp_suspended_decr (lwp);
7984d532
PA
1985
1986 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1987 gdb_assert (!stabilizing_threads
1988 || (lwp->collecting_fast_tracepoint
1989 != fast_tpoint_collect_result::not_collecting));
7984d532 1990
219f2f23
PA
1991 if (tpoint_related_event)
1992 {
1993 if (debug_threads)
87ce2a04 1994 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1995 return 1;
1996 }
1997
1998 return 0;
1999}
2000
229d26fc
SM
2001/* Convenience wrapper. Returns information about LWP's fast tracepoint
2002 collection status. */
fa593d66 2003
229d26fc 2004static fast_tpoint_collect_result
fa593d66
PA
2005linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2006 struct fast_tpoint_collect_status *status)
2007{
2008 CORE_ADDR thread_area;
d86d4aaf 2009 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2010
2011 if (the_low_target.get_thread_area == NULL)
229d26fc 2012 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2013
2014 /* Get the thread area address. This is used to recognize which
2015 thread is which when tracing with the in-process agent library.
2016 We don't read anything from the address, and treat it as opaque;
2017 it's the address itself that we assume is unique per-thread. */
d86d4aaf 2018 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
229d26fc 2019 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2020
2021 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2022}
2023
d16f3f6c
TBA
2024bool
2025linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2026{
0bfdf32f 2027 struct thread_info *saved_thread;
fa593d66 2028
0bfdf32f
GB
2029 saved_thread = current_thread;
2030 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2031
2032 if ((wstat == NULL
2033 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2034 && supports_fast_tracepoints ()
58b4daa5 2035 && agent_loaded_p ())
fa593d66
PA
2036 {
2037 struct fast_tpoint_collect_status status;
fa593d66
PA
2038
2039 if (debug_threads)
87ce2a04
DE
2040 debug_printf ("Checking whether LWP %ld needs to move out of the "
2041 "jump pad.\n",
0bfdf32f 2042 lwpid_of (current_thread));
fa593d66 2043
229d26fc
SM
2044 fast_tpoint_collect_result r
2045 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2046
2047 if (wstat == NULL
2048 || (WSTOPSIG (*wstat) != SIGILL
2049 && WSTOPSIG (*wstat) != SIGFPE
2050 && WSTOPSIG (*wstat) != SIGSEGV
2051 && WSTOPSIG (*wstat) != SIGBUS))
2052 {
2053 lwp->collecting_fast_tracepoint = r;
2054
229d26fc 2055 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2056 {
229d26fc
SM
2057 if (r == fast_tpoint_collect_result::before_insn
2058 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2059 {
2060 /* Haven't executed the original instruction yet.
2061 Set breakpoint there, and wait till it's hit,
2062 then single-step until exiting the jump pad. */
2063 lwp->exit_jump_pad_bkpt
2064 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2065 }
2066
2067 if (debug_threads)
87ce2a04
DE
2068 debug_printf ("Checking whether LWP %ld needs to move out of "
2069 "the jump pad...it does\n",
0bfdf32f
GB
2070 lwpid_of (current_thread));
2071 current_thread = saved_thread;
fa593d66 2072
d16f3f6c 2073 return true;
fa593d66
PA
2074 }
2075 }
2076 else
2077 {
2078 /* If we get a synchronous signal while collecting, *and*
2079 while executing the (relocated) original instruction,
2080 reset the PC to point at the tpoint address, before
2081 reporting to GDB. Otherwise, it's an IPA lib bug: just
2082 report the signal to GDB, and pray for the best. */
2083
229d26fc
SM
2084 lwp->collecting_fast_tracepoint
2085 = fast_tpoint_collect_result::not_collecting;
fa593d66 2086
229d26fc 2087 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2088 && (status.adjusted_insn_addr <= lwp->stop_pc
2089 && lwp->stop_pc < status.adjusted_insn_addr_end))
2090 {
2091 siginfo_t info;
2092 struct regcache *regcache;
2093
2094 /* The si_addr on a few signals references the address
2095 of the faulting instruction. Adjust that as
2096 well. */
2097 if ((WSTOPSIG (*wstat) == SIGILL
2098 || WSTOPSIG (*wstat) == SIGFPE
2099 || WSTOPSIG (*wstat) == SIGBUS
2100 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2101 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2102 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2103 /* Final check just to make sure we don't clobber
2104 the siginfo of non-kernel-sent signals. */
2105 && (uintptr_t) info.si_addr == lwp->stop_pc)
2106 {
2107 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2108 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2109 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2110 }
2111
0bfdf32f 2112 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
2113 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2114 lwp->stop_pc = status.tpoint_addr;
2115
2116 /* Cancel any fast tracepoint lock this thread was
2117 holding. */
2118 force_unlock_trace_buffer ();
2119 }
2120
2121 if (lwp->exit_jump_pad_bkpt != NULL)
2122 {
2123 if (debug_threads)
87ce2a04
DE
2124 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2125 "stopping all threads momentarily.\n");
fa593d66
PA
2126
2127 stop_all_lwps (1, lwp);
fa593d66
PA
2128
2129 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2130 lwp->exit_jump_pad_bkpt = NULL;
2131
2132 unstop_all_lwps (1, lwp);
2133
2134 gdb_assert (lwp->suspended >= 0);
2135 }
2136 }
2137 }
2138
2139 if (debug_threads)
87ce2a04
DE
2140 debug_printf ("Checking whether LWP %ld needs to move out of the "
2141 "jump pad...no\n",
0bfdf32f 2142 lwpid_of (current_thread));
0cccb683 2143
0bfdf32f 2144 current_thread = saved_thread;
d16f3f6c 2145 return false;
fa593d66
PA
2146}
2147
2148/* Enqueue one signal in the "signals to report later when out of the
2149 jump pad" list. */
2150
2151static void
2152enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2153{
2154 struct pending_signals *p_sig;
d86d4aaf 2155 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2156
2157 if (debug_threads)
87ce2a04 2158 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2159 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2160
2161 if (debug_threads)
2162 {
2163 struct pending_signals *sig;
2164
2165 for (sig = lwp->pending_signals_to_report;
2166 sig != NULL;
2167 sig = sig->prev)
87ce2a04
DE
2168 debug_printf (" Already queued %d\n",
2169 sig->signal);
fa593d66 2170
87ce2a04 2171 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2172 }
2173
1a981360
PA
2174 /* Don't enqueue non-RT signals if they are already in the deferred
2175 queue. (SIGSTOP being the easiest signal to see ending up here
2176 twice) */
2177 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2178 {
2179 struct pending_signals *sig;
2180
2181 for (sig = lwp->pending_signals_to_report;
2182 sig != NULL;
2183 sig = sig->prev)
2184 {
2185 if (sig->signal == WSTOPSIG (*wstat))
2186 {
2187 if (debug_threads)
87ce2a04
DE
2188 debug_printf ("Not requeuing already queued non-RT signal %d"
2189 " for LWP %ld\n",
2190 sig->signal,
d86d4aaf 2191 lwpid_of (thread));
1a981360
PA
2192 return;
2193 }
2194 }
2195 }
2196
8d749320 2197 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2198 p_sig->prev = lwp->pending_signals_to_report;
2199 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2200
d86d4aaf 2201 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2202 &p_sig->info);
fa593d66
PA
2203
2204 lwp->pending_signals_to_report = p_sig;
2205}
2206
2207/* Dequeue one signal from the "signals to report later when out of
2208 the jump pad" list. */
2209
2210static int
2211dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2212{
d86d4aaf
DE
2213 struct thread_info *thread = get_lwp_thread (lwp);
2214
fa593d66
PA
2215 if (lwp->pending_signals_to_report != NULL)
2216 {
2217 struct pending_signals **p_sig;
2218
2219 p_sig = &lwp->pending_signals_to_report;
2220 while ((*p_sig)->prev != NULL)
2221 p_sig = &(*p_sig)->prev;
2222
2223 *wstat = W_STOPCODE ((*p_sig)->signal);
2224 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2225 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2226 &(*p_sig)->info);
fa593d66
PA
2227 free (*p_sig);
2228 *p_sig = NULL;
2229
2230 if (debug_threads)
87ce2a04 2231 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2232 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2233
2234 if (debug_threads)
2235 {
2236 struct pending_signals *sig;
2237
2238 for (sig = lwp->pending_signals_to_report;
2239 sig != NULL;
2240 sig = sig->prev)
87ce2a04
DE
2241 debug_printf (" Still queued %d\n",
2242 sig->signal);
fa593d66 2243
87ce2a04 2244 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2245 }
2246
2247 return 1;
2248 }
2249
2250 return 0;
2251}
2252
582511be
PA
2253/* Fetch the possibly triggered data watchpoint info and store it in
2254 CHILD.
d50171e4 2255
582511be
PA
2256 On some archs, like x86, that use debug registers to set
2257 watchpoints, it's possible that the way to know which watched
2258 address trapped, is to check the register that is used to select
2259 which address to watch. Problem is, between setting the watchpoint
2260 and reading back which data address trapped, the user may change
2261 the set of watchpoints, and, as a consequence, GDB changes the
2262 debug registers in the inferior. To avoid reading back a stale
2263 stopped-data-address when that happens, we cache in LP the fact
2264 that a watchpoint trapped, and the corresponding data address, as
2265 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2266 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2267
582511be
PA
2268static int
2269check_stopped_by_watchpoint (struct lwp_info *child)
2270{
2271 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2272 {
582511be 2273 struct thread_info *saved_thread;
d50171e4 2274
582511be
PA
2275 saved_thread = current_thread;
2276 current_thread = get_lwp_thread (child);
2277
2278 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2279 {
15c66dd6 2280 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2281
2282 if (the_low_target.stopped_data_address != NULL)
2283 child->stopped_data_address
2284 = the_low_target.stopped_data_address ();
2285 else
2286 child->stopped_data_address = 0;
d50171e4
PA
2287 }
2288
0bfdf32f 2289 current_thread = saved_thread;
d50171e4
PA
2290 }
2291
15c66dd6 2292 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2293}
2294
de0d863e
DB
2295/* Return the ptrace options that we want to try to enable. */
2296
2297static int
2298linux_low_ptrace_options (int attached)
2299{
c12a5089 2300 client_state &cs = get_client_state ();
de0d863e
DB
2301 int options = 0;
2302
2303 if (!attached)
2304 options |= PTRACE_O_EXITKILL;
2305
c12a5089 2306 if (cs.report_fork_events)
de0d863e
DB
2307 options |= PTRACE_O_TRACEFORK;
2308
c12a5089 2309 if (cs.report_vfork_events)
c269dbdb
DB
2310 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2311
c12a5089 2312 if (cs.report_exec_events)
94585166
DB
2313 options |= PTRACE_O_TRACEEXEC;
2314
82075af2
JS
2315 options |= PTRACE_O_TRACESYSGOOD;
2316
de0d863e
DB
2317 return options;
2318}
2319
d16f3f6c
TBA
2320lwp_info *
2321linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2322{
c12a5089 2323 client_state &cs = get_client_state ();
fa96cb38
PA
2324 struct lwp_info *child;
2325 struct thread_info *thread;
582511be 2326 int have_stop_pc = 0;
fa96cb38 2327
f2907e49 2328 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2329
94585166
DB
2330 /* Check for stop events reported by a process we didn't already
2331 know about - anything not already in our LWP list.
2332
2333 If we're expecting to receive stopped processes after
2334 fork, vfork, and clone events, then we'll just add the
2335 new one to our list and go back to waiting for the event
2336 to be reported - the stopped process might be returned
2337 from waitpid before or after the event is.
2338
2339 But note the case of a non-leader thread exec'ing after the
2340 leader having exited, and gone from our lists (because
2341 check_zombie_leaders deleted it). The non-leader thread
2342 changes its tid to the tgid. */
2343
2344 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2345 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2346 {
2347 ptid_t child_ptid;
2348
2349 /* A multi-thread exec after we had seen the leader exiting. */
2350 if (debug_threads)
2351 {
2352 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2353 "after exec.\n", lwpid);
2354 }
2355
fd79271b 2356 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2357 child = add_lwp (child_ptid);
2358 child->stopped = 1;
2359 current_thread = child->thread;
2360 }
2361
fa96cb38
PA
2362 /* If we didn't find a process, one of two things presumably happened:
2363 - A process we started and then detached from has exited. Ignore it.
2364 - A process we are controlling has forked and the new child's stop
2365 was reported to us by the kernel. Save its PID. */
2366 if (child == NULL && WIFSTOPPED (wstat))
2367 {
2368 add_to_pid_list (&stopped_pids, lwpid, wstat);
2369 return NULL;
2370 }
2371 else if (child == NULL)
2372 return NULL;
2373
2374 thread = get_lwp_thread (child);
2375
2376 child->stopped = 1;
2377
2378 child->last_status = wstat;
2379
582511be
PA
2380 /* Check if the thread has exited. */
2381 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2382 {
2383 if (debug_threads)
2384 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2385
2386 if (finish_step_over (child))
2387 {
2388 /* Unsuspend all other LWPs, and set them back running again. */
2389 unsuspend_all_lwps (child);
2390 }
2391
65706a29
PA
2392 /* If there is at least one more LWP, then the exit signal was
2393 not the end of the debugged application and should be
2394 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2395 if (cs.report_thread_events
65706a29 2396 || last_thread_of_process_p (pid_of (thread)))
582511be 2397 {
65706a29
PA
2398 /* Since events are serialized to GDB core, and we can't
2399 report this one right now. Leave the status pending for
2400 the next time we're able to report it. */
2401 mark_lwp_dead (child, wstat);
2402 return child;
582511be
PA
2403 }
2404 else
2405 {
65706a29
PA
2406 delete_lwp (child);
2407 return NULL;
582511be
PA
2408 }
2409 }
2410
2411 gdb_assert (WIFSTOPPED (wstat));
2412
fa96cb38
PA
2413 if (WIFSTOPPED (wstat))
2414 {
2415 struct process_info *proc;
2416
c06cbd92 2417 /* Architecture-specific setup after inferior is running. */
fa96cb38 2418 proc = find_process_pid (pid_of (thread));
c06cbd92 2419 if (proc->tdesc == NULL)
fa96cb38 2420 {
c06cbd92
YQ
2421 if (proc->attached)
2422 {
c06cbd92
YQ
2423 /* This needs to happen after we have attached to the
2424 inferior and it is stopped for the first time, but
2425 before we access any inferior registers. */
797bcff5 2426 arch_setup_thread (thread);
c06cbd92
YQ
2427 }
2428 else
2429 {
2430 /* The process is started, but GDBserver will do
2431 architecture-specific setup after the program stops at
2432 the first instruction. */
2433 child->status_pending_p = 1;
2434 child->status_pending = wstat;
2435 return child;
2436 }
fa96cb38
PA
2437 }
2438 }
2439
fa96cb38
PA
2440 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2441 {
beed38b8 2442 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2443 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2444
de0d863e 2445 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2446 child->must_set_ptrace_flags = 0;
2447 }
2448
82075af2
JS
2449 /* Always update syscall_state, even if it will be filtered later. */
2450 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2451 {
2452 child->syscall_state
2453 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2454 ? TARGET_WAITKIND_SYSCALL_RETURN
2455 : TARGET_WAITKIND_SYSCALL_ENTRY);
2456 }
2457 else
2458 {
2459 /* Almost all other ptrace-stops are known to be outside of system
2460 calls, with further exceptions in handle_extended_wait. */
2461 child->syscall_state = TARGET_WAITKIND_IGNORE;
2462 }
2463
e7ad2f14
PA
2464 /* Be careful to not overwrite stop_pc until save_stop_reason is
2465 called. */
fa96cb38 2466 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2467 && linux_is_extended_waitstatus (wstat))
fa96cb38 2468 {
582511be 2469 child->stop_pc = get_pc (child);
94585166 2470 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2471 {
2472 /* The event has been handled, so just return without
2473 reporting it. */
2474 return NULL;
2475 }
fa96cb38
PA
2476 }
2477
80aea927 2478 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2479 {
e7ad2f14 2480 if (save_stop_reason (child))
582511be
PA
2481 have_stop_pc = 1;
2482 }
2483
2484 if (!have_stop_pc)
2485 child->stop_pc = get_pc (child);
2486
fa96cb38
PA
2487 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2488 && child->stop_expected)
2489 {
2490 if (debug_threads)
2491 debug_printf ("Expected stop.\n");
2492 child->stop_expected = 0;
2493
2494 if (thread->last_resume_kind == resume_stop)
2495 {
2496 /* We want to report the stop to the core. Treat the
2497 SIGSTOP as a normal event. */
2bf6fb9d
PA
2498 if (debug_threads)
2499 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2500 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2501 }
2502 else if (stopping_threads != NOT_STOPPING_THREADS)
2503 {
2504 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2505 pending. */
2bf6fb9d
PA
2506 if (debug_threads)
2507 debug_printf ("LLW: SIGSTOP caught for %s "
2508 "while stopping threads.\n",
2509 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2510 return NULL;
2511 }
2512 else
2513 {
2bf6fb9d
PA
2514 /* This is a delayed SIGSTOP. Filter out the event. */
2515 if (debug_threads)
2516 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2517 child->stepping ? "step" : "continue",
2518 target_pid_to_str (ptid_of (thread)));
2519
fa96cb38
PA
2520 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2521 return NULL;
2522 }
2523 }
2524
582511be
PA
2525 child->status_pending_p = 1;
2526 child->status_pending = wstat;
fa96cb38
PA
2527 return child;
2528}
2529
f79b145d
YQ
2530/* Return true if THREAD is doing hardware single step. */
2531
2532static int
2533maybe_hw_step (struct thread_info *thread)
2534{
2535 if (can_hardware_single_step ())
2536 return 1;
2537 else
2538 {
3b9a79ef 2539 /* GDBserver must insert single-step breakpoint for software
f79b145d 2540 single step. */
3b9a79ef 2541 gdb_assert (has_single_step_breakpoints (thread));
f79b145d
YQ
2542 return 0;
2543 }
2544}
2545
20ba1ce6
PA
2546/* Resume LWPs that are currently stopped without any pending status
2547 to report, but are resumed from the core's perspective. */
2548
2549static void
9c80ecd6 2550resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2551{
20ba1ce6
PA
2552 struct lwp_info *lp = get_thread_lwp (thread);
2553
2554 if (lp->stopped
863d01bd 2555 && !lp->suspended
20ba1ce6 2556 && !lp->status_pending_p
20ba1ce6
PA
2557 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2558 {
8901d193
YQ
2559 int step = 0;
2560
2561 if (thread->last_resume_kind == resume_step)
2562 step = maybe_hw_step (thread);
20ba1ce6
PA
2563
2564 if (debug_threads)
2565 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2566 target_pid_to_str (ptid_of (thread)),
2567 paddress (lp->stop_pc),
2568 step);
2569
2570 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2571 }
2572}
2573
d16f3f6c
TBA
2574int
2575linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2576 ptid_t filter_ptid,
2577 int *wstatp, int options)
0d62e5e8 2578{
d86d4aaf 2579 struct thread_info *event_thread;
d50171e4 2580 struct lwp_info *event_child, *requested_child;
fa96cb38 2581 sigset_t block_mask, prev_mask;
d50171e4 2582
fa96cb38 2583 retry:
d86d4aaf
DE
2584 /* N.B. event_thread points to the thread_info struct that contains
2585 event_child. Keep them in sync. */
2586 event_thread = NULL;
d50171e4
PA
2587 event_child = NULL;
2588 requested_child = NULL;
0d62e5e8 2589
95954743 2590 /* Check for a lwp with a pending status. */
bd99dc85 2591
d7e15655 2592 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2593 {
83e1b6c1
SM
2594 event_thread = find_thread_in_random ([&] (thread_info *thread)
2595 {
2596 return status_pending_p_callback (thread, filter_ptid);
2597 });
2598
d86d4aaf
DE
2599 if (event_thread != NULL)
2600 event_child = get_thread_lwp (event_thread);
2601 if (debug_threads && event_thread)
2602 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2603 }
d7e15655 2604 else if (filter_ptid != null_ptid)
0d62e5e8 2605 {
fa96cb38 2606 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2607
bde24c0a 2608 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2609 && requested_child->status_pending_p
229d26fc
SM
2610 && (requested_child->collecting_fast_tracepoint
2611 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2612 {
2613 enqueue_one_deferred_signal (requested_child,
2614 &requested_child->status_pending);
2615 requested_child->status_pending_p = 0;
2616 requested_child->status_pending = 0;
2617 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2618 }
2619
2620 if (requested_child->suspended
2621 && requested_child->status_pending_p)
38e08fca
GB
2622 {
2623 internal_error (__FILE__, __LINE__,
2624 "requesting an event out of a"
2625 " suspended child?");
2626 }
fa593d66 2627
d50171e4 2628 if (requested_child->status_pending_p)
d86d4aaf
DE
2629 {
2630 event_child = requested_child;
2631 event_thread = get_lwp_thread (event_child);
2632 }
0d62e5e8 2633 }
611cb4a5 2634
0d62e5e8
DJ
2635 if (event_child != NULL)
2636 {
bd99dc85 2637 if (debug_threads)
87ce2a04 2638 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2639 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2640 *wstatp = event_child->status_pending;
bd99dc85
PA
2641 event_child->status_pending_p = 0;
2642 event_child->status_pending = 0;
0bfdf32f 2643 current_thread = event_thread;
d86d4aaf 2644 return lwpid_of (event_thread);
0d62e5e8
DJ
2645 }
2646
fa96cb38
PA
2647 /* But if we don't find a pending event, we'll have to wait.
2648
2649 We only enter this loop if no process has a pending wait status.
2650 Thus any action taken in response to a wait status inside this
2651 loop is responding as soon as we detect the status, not after any
2652 pending events. */
d8301ad1 2653
fa96cb38
PA
2654 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2655 all signals while here. */
2656 sigfillset (&block_mask);
21987b9c 2657 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2658
582511be
PA
2659 /* Always pull all events out of the kernel. We'll randomly select
2660 an event LWP out of all that have events, to prevent
2661 starvation. */
fa96cb38 2662 while (event_child == NULL)
0d62e5e8 2663 {
fa96cb38 2664 pid_t ret = 0;
0d62e5e8 2665
fa96cb38
PA
2666 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2667 quirks:
0d62e5e8 2668
fa96cb38
PA
2669 - If the thread group leader exits while other threads in the
2670 thread group still exist, waitpid(TGID, ...) hangs. That
2671 waitpid won't return an exit status until the other threads
2672 in the group are reaped.
611cb4a5 2673
fa96cb38
PA
2674 - When a non-leader thread execs, that thread just vanishes
2675 without reporting an exit (so we'd hang if we waited for it
2676 explicitly in that case). The exec event is reported to
94585166 2677 the TGID pid. */
fa96cb38
PA
2678 errno = 0;
2679 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2680
fa96cb38
PA
2681 if (debug_threads)
2682 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2683 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2684
fa96cb38 2685 if (ret > 0)
0d62e5e8 2686 {
89be2091 2687 if (debug_threads)
bd99dc85 2688 {
fa96cb38
PA
2689 debug_printf ("LLW: waitpid %ld received %s\n",
2690 (long) ret, status_to_str (*wstatp));
bd99dc85 2691 }
89be2091 2692
582511be
PA
2693 /* Filter all events. IOW, leave all events pending. We'll
2694 randomly select an event LWP out of all that have events
2695 below. */
d16f3f6c 2696 filter_event (ret, *wstatp);
fa96cb38
PA
2697 /* Retry until nothing comes out of waitpid. A single
2698 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2699 continue;
2700 }
2701
20ba1ce6
PA
2702 /* Now that we've pulled all events out of the kernel, resume
2703 LWPs that don't have an interesting event to report. */
2704 if (stopping_threads == NOT_STOPPING_THREADS)
f0045347 2705 for_each_thread (resume_stopped_resumed_lwps);
20ba1ce6
PA
2706
2707 /* ... and find an LWP with a status to report to the core, if
2708 any. */
83e1b6c1
SM
2709 event_thread = find_thread_in_random ([&] (thread_info *thread)
2710 {
2711 return status_pending_p_callback (thread, filter_ptid);
2712 });
2713
582511be
PA
2714 if (event_thread != NULL)
2715 {
2716 event_child = get_thread_lwp (event_thread);
2717 *wstatp = event_child->status_pending;
2718 event_child->status_pending_p = 0;
2719 event_child->status_pending = 0;
2720 break;
2721 }
2722
fa96cb38
PA
2723 /* Check for zombie thread group leaders. Those can't be reaped
2724 until all other threads in the thread group are. */
2725 check_zombie_leaders ();
2726
a1385b7b
SM
2727 auto not_stopped = [&] (thread_info *thread)
2728 {
2729 return not_stopped_callback (thread, wait_ptid);
2730 };
2731
fa96cb38
PA
2732 /* If there are no resumed children left in the set of LWPs we
2733 want to wait for, bail. We can't just block in
2734 waitpid/sigsuspend, because lwps might have been left stopped
2735 in trace-stop state, and we'd be stuck forever waiting for
2736 their status to change (which would only happen if we resumed
2737 them). Even if WNOHANG is set, this return code is preferred
2738 over 0 (below), as it is more detailed. */
a1385b7b 2739 if (find_thread (not_stopped) == NULL)
a6dbe5df 2740 {
fa96cb38
PA
2741 if (debug_threads)
2742 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2743 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2744 return -1;
a6dbe5df
PA
2745 }
2746
fa96cb38
PA
2747 /* No interesting event to report to the caller. */
2748 if ((options & WNOHANG))
24a09b5f 2749 {
fa96cb38
PA
2750 if (debug_threads)
2751 debug_printf ("WNOHANG set, no event found\n");
2752
21987b9c 2753 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2754 return 0;
24a09b5f
DJ
2755 }
2756
fa96cb38
PA
2757 /* Block until we get an event reported with SIGCHLD. */
2758 if (debug_threads)
2759 debug_printf ("sigsuspend'ing\n");
d50171e4 2760
fa96cb38 2761 sigsuspend (&prev_mask);
21987b9c 2762 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2763 goto retry;
2764 }
d50171e4 2765
21987b9c 2766 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2767
0bfdf32f 2768 current_thread = event_thread;
d50171e4 2769
fa96cb38
PA
2770 return lwpid_of (event_thread);
2771}
2772
d16f3f6c
TBA
2773int
2774linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2775{
d16f3f6c 2776 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2777}
2778
6bf5e0ba
PA
2779/* Select one LWP out of those that have events pending. */
2780
2781static void
2782select_event_lwp (struct lwp_info **orig_lp)
2783{
582511be
PA
2784 struct thread_info *event_thread = NULL;
2785
2786 /* In all-stop, give preference to the LWP that is being
2787 single-stepped. There will be at most one, and it's the LWP that
2788 the core is most interested in. If we didn't do this, then we'd
2789 have to handle pending step SIGTRAPs somehow in case the core
2790 later continues the previously-stepped thread, otherwise we'd
2791 report the pending SIGTRAP, and the core, not having stepped the
2792 thread, wouldn't understand what the trap was for, and therefore
2793 would report it to the user as a random signal. */
2794 if (!non_stop)
6bf5e0ba 2795 {
39a64da5
SM
2796 event_thread = find_thread ([] (thread_info *thread)
2797 {
2798 lwp_info *lp = get_thread_lwp (thread);
2799
2800 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2801 && thread->last_resume_kind == resume_step
2802 && lp->status_pending_p);
2803 });
2804
582511be
PA
2805 if (event_thread != NULL)
2806 {
2807 if (debug_threads)
2808 debug_printf ("SEL: Select single-step %s\n",
2809 target_pid_to_str (ptid_of (event_thread)));
2810 }
6bf5e0ba 2811 }
582511be 2812 if (event_thread == NULL)
6bf5e0ba
PA
2813 {
2814 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2815 which have had events. */
6bf5e0ba 2816
b0319eaa 2817 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2818 {
2819 lwp_info *lp = get_thread_lwp (thread);
2820
b0319eaa
TT
2821 /* Only resumed LWPs that have an event pending. */
2822 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2823 && lp->status_pending_p);
39a64da5 2824 });
6bf5e0ba
PA
2825 }
2826
d86d4aaf 2827 if (event_thread != NULL)
6bf5e0ba 2828 {
d86d4aaf
DE
2829 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2830
6bf5e0ba
PA
2831 /* Switch the event LWP. */
2832 *orig_lp = event_lp;
2833 }
2834}
2835
7984d532
PA
2836/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2837 NULL. */
2838
2839static void
2840unsuspend_all_lwps (struct lwp_info *except)
2841{
139720c5
SM
2842 for_each_thread ([&] (thread_info *thread)
2843 {
2844 lwp_info *lwp = get_thread_lwp (thread);
2845
2846 if (lwp != except)
2847 lwp_suspended_decr (lwp);
2848 });
7984d532
PA
2849}
2850
fcb056a5 2851static bool stuck_in_jump_pad_callback (thread_info *thread);
5a6b0a41 2852static bool lwp_running (thread_info *thread);
fa593d66
PA
2853
2854/* Stabilize threads (move out of jump pads).
2855
2856 If a thread is midway collecting a fast tracepoint, we need to
2857 finish the collection and move it out of the jump pad before
2858 reporting the signal.
2859
2860 This avoids recursion while collecting (when a signal arrives
2861 midway, and the signal handler itself collects), which would trash
2862 the trace buffer. In case the user set a breakpoint in a signal
2863 handler, this avoids the backtrace showing the jump pad, etc..
2864 Most importantly, there are certain things we can't do safely if
2865 threads are stopped in a jump pad (or in its callee's). For
2866 example:
2867
2868 - starting a new trace run. A thread still collecting the
2869 previous run, could trash the trace buffer when resumed. The trace
2870 buffer control structures would have been reset but the thread had
2871 no way to tell. The thread could even midway memcpy'ing to the
2872 buffer, which would mean that when resumed, it would clobber the
2873 trace buffer that had been set for a new run.
2874
2875 - we can't rewrite/reuse the jump pads for new tracepoints
2876 safely. Say you do tstart while a thread is stopped midway while
2877 collecting. When the thread is later resumed, it finishes the
2878 collection, and returns to the jump pad, to execute the original
2879 instruction that was under the tracepoint jump at the time the
2880 older run had been started. If the jump pad had been rewritten
2881 since for something else in the new run, the thread would now
2882 execute the wrong / random instructions. */
2883
5c9eb2f2
TBA
2884void
2885linux_process_target::stabilize_threads ()
fa593d66 2886{
fcb056a5 2887 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
fa593d66 2888
d86d4aaf 2889 if (thread_stuck != NULL)
fa593d66 2890 {
b4d51a55 2891 if (debug_threads)
87ce2a04 2892 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2893 lwpid_of (thread_stuck));
fa593d66
PA
2894 return;
2895 }
2896
fcb056a5 2897 thread_info *saved_thread = current_thread;
fa593d66
PA
2898
2899 stabilizing_threads = 1;
2900
2901 /* Kick 'em all. */
d16f3f6c
TBA
2902 for_each_thread ([this] (thread_info *thread)
2903 {
2904 move_out_of_jump_pad (thread);
2905 });
fa593d66
PA
2906
2907 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2908 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2909 {
2910 struct target_waitstatus ourstatus;
2911 struct lwp_info *lwp;
fa593d66
PA
2912 int wstat;
2913
2914 /* Note that we go through the full wait even loop. While
2915 moving threads out of jump pad, we need to be able to step
2916 over internal breakpoints and such. */
d16f3f6c 2917 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2918
2919 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2920 {
0bfdf32f 2921 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2922
2923 /* Lock it. */
863d01bd 2924 lwp_suspended_inc (lwp);
fa593d66 2925
a493e3e2 2926 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2927 || current_thread->last_resume_kind == resume_stop)
fa593d66 2928 {
2ea28649 2929 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2930 enqueue_one_deferred_signal (lwp, &wstat);
2931 }
2932 }
2933 }
2934
fcdad592 2935 unsuspend_all_lwps (NULL);
fa593d66
PA
2936
2937 stabilizing_threads = 0;
2938
0bfdf32f 2939 current_thread = saved_thread;
fa593d66 2940
b4d51a55 2941 if (debug_threads)
fa593d66 2942 {
fcb056a5
SM
2943 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2944
d86d4aaf 2945 if (thread_stuck != NULL)
87ce2a04 2946 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2947 lwpid_of (thread_stuck));
fa593d66
PA
2948 }
2949}
2950
582511be
PA
2951/* Convenience function that is called when the kernel reports an
2952 event that is not passed out to GDB. */
2953
2954static ptid_t
2955ignore_event (struct target_waitstatus *ourstatus)
2956{
2957 /* If we got an event, there may still be others, as a single
2958 SIGCHLD can indicate more than one child stopped. This forces
2959 another target_wait call. */
2960 async_file_mark ();
2961
2962 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2963 return null_ptid;
2964}
2965
65706a29
PA
2966/* Convenience function that is called when the kernel reports an exit
2967 event. This decides whether to report the event to GDB as a
2968 process exit event, a thread exit event, or to suppress the
2969 event. */
2970
2971static ptid_t
2972filter_exit_event (struct lwp_info *event_child,
2973 struct target_waitstatus *ourstatus)
2974{
c12a5089 2975 client_state &cs = get_client_state ();
65706a29
PA
2976 struct thread_info *thread = get_lwp_thread (event_child);
2977 ptid_t ptid = ptid_of (thread);
2978
2979 if (!last_thread_of_process_p (pid_of (thread)))
2980 {
c12a5089 2981 if (cs.report_thread_events)
65706a29
PA
2982 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2983 else
2984 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2985
2986 delete_lwp (event_child);
2987 }
2988 return ptid;
2989}
2990
82075af2
JS
2991/* Returns 1 if GDB is interested in any event_child syscalls. */
2992
2993static int
2994gdb_catching_syscalls_p (struct lwp_info *event_child)
2995{
2996 struct thread_info *thread = get_lwp_thread (event_child);
2997 struct process_info *proc = get_thread_process (thread);
2998
f27866ba 2999 return !proc->syscalls_to_catch.empty ();
82075af2
JS
3000}
3001
3002/* Returns 1 if GDB is interested in the event_child syscall.
3003 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3004
3005static int
3006gdb_catch_this_syscall_p (struct lwp_info *event_child)
3007{
4cc32bec 3008 int sysno;
82075af2
JS
3009 struct thread_info *thread = get_lwp_thread (event_child);
3010 struct process_info *proc = get_thread_process (thread);
3011
f27866ba 3012 if (proc->syscalls_to_catch.empty ())
82075af2
JS
3013 return 0;
3014
f27866ba 3015 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
82075af2
JS
3016 return 1;
3017
4cc32bec 3018 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
3019
3020 for (int iter : proc->syscalls_to_catch)
82075af2
JS
3021 if (iter == sysno)
3022 return 1;
3023
3024 return 0;
3025}
3026
d16f3f6c
TBA
3027ptid_t
3028linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3029 int target_options)
da6d8c04 3030{
c12a5089 3031 client_state &cs = get_client_state ();
e5f1222d 3032 int w;
fc7238bb 3033 struct lwp_info *event_child;
bd99dc85 3034 int options;
bd99dc85 3035 int pid;
6bf5e0ba
PA
3036 int step_over_finished;
3037 int bp_explains_trap;
3038 int maybe_internal_trap;
3039 int report_to_gdb;
219f2f23 3040 int trace_event;
c2d6af84 3041 int in_step_range;
f2faf941 3042 int any_resumed;
bd99dc85 3043
87ce2a04
DE
3044 if (debug_threads)
3045 {
3046 debug_enter ();
d16f3f6c 3047 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
3048 }
3049
bd99dc85
PA
3050 /* Translate generic target options into linux options. */
3051 options = __WALL;
3052 if (target_options & TARGET_WNOHANG)
3053 options |= WNOHANG;
0d62e5e8 3054
fa593d66
PA
3055 bp_explains_trap = 0;
3056 trace_event = 0;
c2d6af84 3057 in_step_range = 0;
bd99dc85
PA
3058 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3059
83e1b6c1
SM
3060 auto status_pending_p_any = [&] (thread_info *thread)
3061 {
3062 return status_pending_p_callback (thread, minus_one_ptid);
3063 };
3064
a1385b7b
SM
3065 auto not_stopped = [&] (thread_info *thread)
3066 {
3067 return not_stopped_callback (thread, minus_one_ptid);
3068 };
3069
f2faf941 3070 /* Find a resumed LWP, if any. */
83e1b6c1 3071 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3072 any_resumed = 1;
a1385b7b 3073 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3074 any_resumed = 1;
3075 else
3076 any_resumed = 0;
3077
d7e15655 3078 if (step_over_bkpt == null_ptid)
d16f3f6c 3079 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3080 else
3081 {
3082 if (debug_threads)
87ce2a04
DE
3083 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3084 target_pid_to_str (step_over_bkpt));
d16f3f6c 3085 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3086 }
3087
f2faf941 3088 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3089 {
fa96cb38
PA
3090 gdb_assert (target_options & TARGET_WNOHANG);
3091
87ce2a04
DE
3092 if (debug_threads)
3093 {
d16f3f6c 3094 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3095 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3096 debug_exit ();
3097 }
fa96cb38
PA
3098
3099 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3100 return null_ptid;
3101 }
fa96cb38
PA
3102 else if (pid == -1)
3103 {
3104 if (debug_threads)
3105 {
d16f3f6c 3106 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3107 "TARGET_WAITKIND_NO_RESUMED\n");
3108 debug_exit ();
3109 }
bd99dc85 3110
fa96cb38
PA
3111 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3112 return null_ptid;
3113 }
0d62e5e8 3114
0bfdf32f 3115 event_child = get_thread_lwp (current_thread);
0d62e5e8 3116
d16f3f6c 3117 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3118 child of a process. Report it. */
3119 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3120 {
fa96cb38 3121 if (WIFEXITED (w))
0d62e5e8 3122 {
fa96cb38
PA
3123 ourstatus->kind = TARGET_WAITKIND_EXITED;
3124 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3125
fa96cb38 3126 if (debug_threads)
bd99dc85 3127 {
d16f3f6c 3128 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3129 "retcode %d\n",
0bfdf32f 3130 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3131 WEXITSTATUS (w));
3132 debug_exit ();
bd99dc85 3133 }
fa96cb38
PA
3134 }
3135 else
3136 {
3137 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3138 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3139
fa96cb38
PA
3140 if (debug_threads)
3141 {
d16f3f6c 3142 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3143 "signal %d\n",
0bfdf32f 3144 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3145 WTERMSIG (w));
3146 debug_exit ();
3147 }
0d62e5e8 3148 }
fa96cb38 3149
65706a29
PA
3150 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3151 return filter_exit_event (event_child, ourstatus);
3152
0bfdf32f 3153 return ptid_of (current_thread);
da6d8c04
DJ
3154 }
3155
2d97cd35
AT
3156 /* If step-over executes a breakpoint instruction, in the case of a
3157 hardware single step it means a gdb/gdbserver breakpoint had been
3158 planted on top of a permanent breakpoint, in the case of a software
3159 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3160 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3161 the breakpoint address.
3162 So in the case of the hardware single step advance the PC manually
3163 past the breakpoint and in the case of software single step advance only
3b9a79ef 3164 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3165 This avoids that a program would keep trapping a permanent breakpoint
3166 forever. */
d7e15655 3167 if (step_over_bkpt != null_ptid
2d97cd35
AT
3168 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3169 && (event_child->stepping
3b9a79ef 3170 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3171 {
dd373349
AT
3172 int increment_pc = 0;
3173 int breakpoint_kind = 0;
3174 CORE_ADDR stop_pc = event_child->stop_pc;
3175
d16f3f6c
TBA
3176 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3177 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3178
3179 if (debug_threads)
3180 {
3181 debug_printf ("step-over for %s executed software breakpoint\n",
3182 target_pid_to_str (ptid_of (current_thread)));
3183 }
3184
3185 if (increment_pc != 0)
3186 {
3187 struct regcache *regcache
3188 = get_thread_regcache (current_thread, 1);
3189
3190 event_child->stop_pc += increment_pc;
3191 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3192
3193 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3194 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3195 }
3196 }
3197
6bf5e0ba
PA
3198 /* If this event was not handled before, and is not a SIGTRAP, we
3199 report it. SIGILL and SIGSEGV are also treated as traps in case
3200 a breakpoint is inserted at the current PC. If this target does
3201 not support internal breakpoints at all, we also report the
3202 SIGTRAP without further processing; it's of no concern to us. */
3203 maybe_internal_trap
3204 = (supports_breakpoints ()
3205 && (WSTOPSIG (w) == SIGTRAP
3206 || ((WSTOPSIG (w) == SIGILL
3207 || WSTOPSIG (w) == SIGSEGV)
3208 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3209
3210 if (maybe_internal_trap)
3211 {
3212 /* Handle anything that requires bookkeeping before deciding to
3213 report the event or continue waiting. */
3214
3215 /* First check if we can explain the SIGTRAP with an internal
3216 breakpoint, or if we should possibly report the event to GDB.
3217 Do this before anything that may remove or insert a
3218 breakpoint. */
3219 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3220
3221 /* We have a SIGTRAP, possibly a step-over dance has just
3222 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3223 reinsert breakpoints and delete any single-step
3224 breakpoints. */
6bf5e0ba
PA
3225 step_over_finished = finish_step_over (event_child);
3226
3227 /* Now invoke the callbacks of any internal breakpoints there. */
3228 check_breakpoints (event_child->stop_pc);
3229
219f2f23
PA
3230 /* Handle tracepoint data collecting. This may overflow the
3231 trace buffer, and cause a tracing stop, removing
3232 breakpoints. */
3233 trace_event = handle_tracepoints (event_child);
3234
6bf5e0ba
PA
3235 if (bp_explains_trap)
3236 {
6bf5e0ba 3237 if (debug_threads)
87ce2a04 3238 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3239 }
3240 }
3241 else
3242 {
3243 /* We have some other signal, possibly a step-over dance was in
3244 progress, and it should be cancelled too. */
3245 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3246 }
3247
3248 /* We have all the data we need. Either report the event to GDB, or
3249 resume threads and keep waiting for more. */
3250
3251 /* If we're collecting a fast tracepoint, finish the collection and
3252 move out of the jump pad before delivering a signal. See
3253 linux_stabilize_threads. */
3254
3255 if (WIFSTOPPED (w)
3256 && WSTOPSIG (w) != SIGTRAP
3257 && supports_fast_tracepoints ()
58b4daa5 3258 && agent_loaded_p ())
fa593d66
PA
3259 {
3260 if (debug_threads)
87ce2a04
DE
3261 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3262 "to defer or adjust it.\n",
0bfdf32f 3263 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3264
3265 /* Allow debugging the jump pad itself. */
0bfdf32f 3266 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3267 && maybe_move_out_of_jump_pad (event_child, &w))
3268 {
3269 enqueue_one_deferred_signal (event_child, &w);
3270
3271 if (debug_threads)
87ce2a04 3272 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3273 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3274
3275 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be 3276
edeeb602
YQ
3277 if (debug_threads)
3278 debug_exit ();
582511be 3279 return ignore_event (ourstatus);
fa593d66
PA
3280 }
3281 }
219f2f23 3282
229d26fc
SM
3283 if (event_child->collecting_fast_tracepoint
3284 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3285 {
3286 if (debug_threads)
87ce2a04
DE
3287 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3288 "Check if we're already there.\n",
0bfdf32f 3289 lwpid_of (current_thread),
229d26fc 3290 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3291
3292 trace_event = 1;
3293
3294 event_child->collecting_fast_tracepoint
3295 = linux_fast_tracepoint_collecting (event_child, NULL);
3296
229d26fc
SM
3297 if (event_child->collecting_fast_tracepoint
3298 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3299 {
3300 /* No longer need this breakpoint. */
3301 if (event_child->exit_jump_pad_bkpt != NULL)
3302 {
3303 if (debug_threads)
87ce2a04
DE
3304 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3305 "stopping all threads momentarily.\n");
fa593d66
PA
3306
3307 /* Other running threads could hit this breakpoint.
3308 We don't handle moribund locations like GDB does,
3309 instead we always pause all threads when removing
3310 breakpoints, so that any step-over or
3311 decr_pc_after_break adjustment is always taken
3312 care of while the breakpoint is still
3313 inserted. */
3314 stop_all_lwps (1, event_child);
fa593d66
PA
3315
3316 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3317 event_child->exit_jump_pad_bkpt = NULL;
3318
3319 unstop_all_lwps (1, event_child);
3320
3321 gdb_assert (event_child->suspended >= 0);
3322 }
3323 }
3324
229d26fc
SM
3325 if (event_child->collecting_fast_tracepoint
3326 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3327 {
3328 if (debug_threads)
87ce2a04
DE
3329 debug_printf ("fast tracepoint finished "
3330 "collecting successfully.\n");
fa593d66
PA
3331
3332 /* We may have a deferred signal to report. */
3333 if (dequeue_one_deferred_signal (event_child, &w))
3334 {
3335 if (debug_threads)
87ce2a04 3336 debug_printf ("dequeued one signal.\n");
fa593d66 3337 }
3c11dd79 3338 else
fa593d66 3339 {
3c11dd79 3340 if (debug_threads)
87ce2a04 3341 debug_printf ("no deferred signals.\n");
fa593d66
PA
3342
3343 if (stabilizing_threads)
3344 {
3345 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3346 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3347
3348 if (debug_threads)
3349 {
d16f3f6c 3350 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3351 "while stabilizing threads\n",
0bfdf32f 3352 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3353 debug_exit ();
3354 }
3355
0bfdf32f 3356 return ptid_of (current_thread);
fa593d66
PA
3357 }
3358 }
3359 }
6bf5e0ba
PA
3360 }
3361
e471f25b
PA
3362 /* Check whether GDB would be interested in this event. */
3363
82075af2
JS
3364 /* Check if GDB is interested in this syscall. */
3365 if (WIFSTOPPED (w)
3366 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3367 && !gdb_catch_this_syscall_p (event_child))
3368 {
3369 if (debug_threads)
3370 {
3371 debug_printf ("Ignored syscall for LWP %ld.\n",
3372 lwpid_of (current_thread));
3373 }
3374
3375 linux_resume_one_lwp (event_child, event_child->stepping,
3376 0, NULL);
edeeb602
YQ
3377
3378 if (debug_threads)
3379 debug_exit ();
82075af2
JS
3380 return ignore_event (ourstatus);
3381 }
3382
e471f25b
PA
3383 /* If GDB is not interested in this signal, don't stop other
3384 threads, and don't report it to GDB. Just resume the inferior
3385 right away. We do this for threading-related signals as well as
3386 any that GDB specifically requested we ignore. But never ignore
3387 SIGSTOP if we sent it ourselves, and do not ignore signals when
3388 stepping - they may require special handling to skip the signal
c9587f88
AT
3389 handler. Also never ignore signals that could be caused by a
3390 breakpoint. */
e471f25b 3391 if (WIFSTOPPED (w)
0bfdf32f 3392 && current_thread->last_resume_kind != resume_step
e471f25b 3393 && (
1a981360 3394#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3395 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3396 && (WSTOPSIG (w) == __SIGRTMIN
3397 || WSTOPSIG (w) == __SIGRTMIN + 1))
3398 ||
3399#endif
c12a5089 3400 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3401 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3402 && current_thread->last_resume_kind == resume_stop)
3403 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3404 {
3405 siginfo_t info, *info_p;
3406
3407 if (debug_threads)
87ce2a04 3408 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3409 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3410
0bfdf32f 3411 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3412 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3413 info_p = &info;
3414 else
3415 info_p = NULL;
863d01bd
PA
3416
3417 if (step_over_finished)
3418 {
3419 /* We cancelled this thread's step-over above. We still
3420 need to unsuspend all other LWPs, and set them back
3421 running again while the signal handler runs. */
3422 unsuspend_all_lwps (event_child);
3423
3424 /* Enqueue the pending signal info so that proceed_all_lwps
3425 doesn't lose it. */
3426 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3427
3428 proceed_all_lwps ();
3429 }
3430 else
3431 {
3432 linux_resume_one_lwp (event_child, event_child->stepping,
3433 WSTOPSIG (w), info_p);
3434 }
edeeb602
YQ
3435
3436 if (debug_threads)
3437 debug_exit ();
3438
582511be 3439 return ignore_event (ourstatus);
e471f25b
PA
3440 }
3441
c2d6af84
PA
3442 /* Note that all addresses are always "out of the step range" when
3443 there's no range to begin with. */
3444 in_step_range = lwp_in_step_range (event_child);
3445
3446 /* If GDB wanted this thread to single step, and the thread is out
3447 of the step range, we always want to report the SIGTRAP, and let
3448 GDB handle it. Watchpoints should always be reported. So should
3449 signals we can't explain. A SIGTRAP we can't explain could be a
3450 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3451 do, we're be able to handle GDB breakpoints on top of internal
3452 breakpoints, by handling the internal breakpoint and still
3453 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3454 won't see the breakpoint hit. If we see a single-step event but
3455 the thread should be continuing, don't pass the trap to gdb.
3456 That indicates that we had previously finished a single-step but
3457 left the single-step pending -- see
3458 complete_ongoing_step_over. */
6bf5e0ba 3459 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3460 || (current_thread->last_resume_kind == resume_step
c2d6af84 3461 && !in_step_range)
15c66dd6 3462 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3463 || (!in_step_range
3464 && !bp_explains_trap
3465 && !trace_event
3466 && !step_over_finished
3467 && !(current_thread->last_resume_kind == resume_continue
3468 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3469 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3470 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3471 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3472 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3473
3474 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3475
3476 /* We found no reason GDB would want us to stop. We either hit one
3477 of our own breakpoints, or finished an internal step GDB
3478 shouldn't know about. */
3479 if (!report_to_gdb)
3480 {
3481 if (debug_threads)
3482 {
3483 if (bp_explains_trap)
87ce2a04 3484 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3485 if (step_over_finished)
87ce2a04 3486 debug_printf ("Step-over finished.\n");
219f2f23 3487 if (trace_event)
87ce2a04 3488 debug_printf ("Tracepoint event.\n");
c2d6af84 3489 if (lwp_in_step_range (event_child))
87ce2a04
DE
3490 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3491 paddress (event_child->stop_pc),
3492 paddress (event_child->step_range_start),
3493 paddress (event_child->step_range_end));
6bf5e0ba
PA
3494 }
3495
3496 /* We're not reporting this breakpoint to GDB, so apply the
3497 decr_pc_after_break adjustment to the inferior's regcache
3498 ourselves. */
3499
3500 if (the_low_target.set_pc != NULL)
3501 {
3502 struct regcache *regcache
0bfdf32f 3503 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3504 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3505 }
3506
7984d532 3507 if (step_over_finished)
e3652c84
YQ
3508 {
3509 /* If we have finished stepping over a breakpoint, we've
3510 stopped and suspended all LWPs momentarily except the
3511 stepping one. This is where we resume them all again.
3512 We're going to keep waiting, so use proceed, which
3513 handles stepping over the next breakpoint. */
3514 unsuspend_all_lwps (event_child);
3515 }
3516 else
3517 {
3518 /* Remove the single-step breakpoints if any. Note that
3519 there isn't single-step breakpoint if we finished stepping
3520 over. */
3521 if (can_software_single_step ()
3522 && has_single_step_breakpoints (current_thread))
3523 {
3524 stop_all_lwps (0, event_child);
3525 delete_single_step_breakpoints (current_thread);
3526 unstop_all_lwps (0, event_child);
3527 }
3528 }
7984d532 3529
e3652c84
YQ
3530 if (debug_threads)
3531 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3532 proceed_all_lwps ();
edeeb602
YQ
3533
3534 if (debug_threads)
3535 debug_exit ();
3536
582511be 3537 return ignore_event (ourstatus);
6bf5e0ba
PA
3538 }
3539
3540 if (debug_threads)
3541 {
00db26fa 3542 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3543 {
23fdd69e
SM
3544 std::string str
3545 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3546
ad071a30 3547 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3548 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3549 }
0bfdf32f 3550 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3551 {
3552 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3553 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3554 else if (!lwp_in_step_range (event_child))
87ce2a04 3555 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3556 }
15c66dd6 3557 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3558 debug_printf ("Stopped by watchpoint.\n");
582511be 3559 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3560 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3561 if (debug_threads)
87ce2a04 3562 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3563 }
3564
3565 /* Alright, we're going to report a stop. */
3566
3b9a79ef 3567 /* Remove single-step breakpoints. */
8901d193
YQ
3568 if (can_software_single_step ())
3569 {
3b9a79ef 3570 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3571 lwps, so that other threads won't hit the breakpoint in the
3572 staled memory. */
3b9a79ef 3573 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3574
3575 if (non_stop)
3576 {
3b9a79ef
YQ
3577 remove_single_step_breakpoints_p
3578 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3579 }
3580 else
3581 {
3582 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3583 requests. Delete all single-step breakpoints. */
8901d193 3584
9c80ecd6
SM
3585 find_thread ([&] (thread_info *thread) {
3586 if (has_single_step_breakpoints (thread))
3587 {
3588 remove_single_step_breakpoints_p = 1;
3589 return true;
3590 }
8901d193 3591
9c80ecd6
SM
3592 return false;
3593 });
8901d193
YQ
3594 }
3595
3b9a79ef 3596 if (remove_single_step_breakpoints_p)
8901d193 3597 {
3b9a79ef 3598 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3599 so that other threads won't hit the breakpoint in the staled
3600 memory. */
3601 stop_all_lwps (0, event_child);
3602
3603 if (non_stop)
3604 {
3b9a79ef
YQ
3605 gdb_assert (has_single_step_breakpoints (current_thread));
3606 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3607 }
3608 else
3609 {
9c80ecd6
SM
3610 for_each_thread ([] (thread_info *thread){
3611 if (has_single_step_breakpoints (thread))
3612 delete_single_step_breakpoints (thread);
3613 });
8901d193
YQ
3614 }
3615
3616 unstop_all_lwps (0, event_child);
3617 }
3618 }
3619
582511be 3620 if (!stabilizing_threads)
6bf5e0ba
PA
3621 {
3622 /* In all-stop, stop all threads. */
582511be
PA
3623 if (!non_stop)
3624 stop_all_lwps (0, NULL);
6bf5e0ba 3625
c03e6ccc 3626 if (step_over_finished)
582511be
PA
3627 {
3628 if (!non_stop)
3629 {
3630 /* If we were doing a step-over, all other threads but
3631 the stepping one had been paused in start_step_over,
3632 with their suspend counts incremented. We don't want
3633 to do a full unstop/unpause, because we're in
3634 all-stop mode (so we want threads stopped), but we
3635 still need to unsuspend the other threads, to
3636 decrement their `suspended' count back. */
3637 unsuspend_all_lwps (event_child);
3638 }
3639 else
3640 {
3641 /* If we just finished a step-over, then all threads had
3642 been momentarily paused. In all-stop, that's fine,
3643 we want threads stopped by now anyway. In non-stop,
3644 we need to re-resume threads that GDB wanted to be
3645 running. */
3646 unstop_all_lwps (1, event_child);
3647 }
3648 }
c03e6ccc 3649
3aa5cfa0
AT
3650 /* If we're not waiting for a specific LWP, choose an event LWP
3651 from among those that have had events. Giving equal priority
3652 to all LWPs that have had events helps prevent
3653 starvation. */
d7e15655 3654 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3655 {
3656 event_child->status_pending_p = 1;
3657 event_child->status_pending = w;
3658
3659 select_event_lwp (&event_child);
3660
3661 /* current_thread and event_child must stay in sync. */
3662 current_thread = get_lwp_thread (event_child);
3663
3664 event_child->status_pending_p = 0;
3665 w = event_child->status_pending;
3666 }
3667
3668
fa593d66 3669 /* Stabilize threads (move out of jump pads). */
582511be 3670 if (!non_stop)
5c9eb2f2 3671 target_stabilize_threads ();
6bf5e0ba
PA
3672 }
3673 else
3674 {
3675 /* If we just finished a step-over, then all threads had been
3676 momentarily paused. In all-stop, that's fine, we want
3677 threads stopped by now anyway. In non-stop, we need to
3678 re-resume threads that GDB wanted to be running. */
3679 if (step_over_finished)
7984d532 3680 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3681 }
3682
00db26fa 3683 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3684 {
00db26fa
PA
3685 /* If the reported event is an exit, fork, vfork or exec, let
3686 GDB know. */
5a04c4cf
PA
3687
3688 /* Break the unreported fork relationship chain. */
3689 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3690 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3691 {
3692 event_child->fork_relative->fork_relative = NULL;
3693 event_child->fork_relative = NULL;
3694 }
3695
00db26fa 3696 *ourstatus = event_child->waitstatus;
de0d863e
DB
3697 /* Clear the event lwp's waitstatus since we handled it already. */
3698 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3699 }
3700 else
3701 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3702
582511be 3703 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3704 it was a software breakpoint, and the client doesn't know we can
3705 adjust the breakpoint ourselves. */
3706 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3707 && !cs.swbreak_feature)
582511be
PA
3708 {
3709 int decr_pc = the_low_target.decr_pc_after_break;
3710
3711 if (decr_pc != 0)
3712 {
3713 struct regcache *regcache
3714 = get_thread_regcache (current_thread, 1);
3715 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3716 }
3717 }
3718
82075af2
JS
3719 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3720 {
82075af2 3721 get_syscall_trapinfo (event_child,
4cc32bec 3722 &ourstatus->value.syscall_number);
82075af2
JS
3723 ourstatus->kind = event_child->syscall_state;
3724 }
3725 else if (current_thread->last_resume_kind == resume_stop
3726 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3727 {
3728 /* A thread that has been requested to stop by GDB with vCont;t,
3729 and it stopped cleanly, so report as SIG0. The use of
3730 SIGSTOP is an implementation detail. */
a493e3e2 3731 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3732 }
0bfdf32f 3733 else if (current_thread->last_resume_kind == resume_stop
8336d594 3734 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3735 {
3736 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3737 but, it stopped for other reasons. */
2ea28649 3738 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3739 }
de0d863e 3740 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3741 {
2ea28649 3742 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3743 }
3744
d7e15655 3745 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3746
bd99dc85 3747 if (debug_threads)
87ce2a04 3748 {
d16f3f6c 3749 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3750 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3751 ourstatus->kind, ourstatus->value.sig);
3752 debug_exit ();
3753 }
bd99dc85 3754
65706a29
PA
3755 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3756 return filter_exit_event (event_child, ourstatus);
3757
0bfdf32f 3758 return ptid_of (current_thread);
bd99dc85
PA
3759}
3760
3761/* Get rid of any pending event in the pipe. */
3762static void
3763async_file_flush (void)
3764{
3765 int ret;
3766 char buf;
3767
3768 do
3769 ret = read (linux_event_pipe[0], &buf, 1);
3770 while (ret >= 0 || (ret == -1 && errno == EINTR));
3771}
3772
3773/* Put something in the pipe, so the event loop wakes up. */
3774static void
3775async_file_mark (void)
3776{
3777 int ret;
3778
3779 async_file_flush ();
3780
3781 do
3782 ret = write (linux_event_pipe[1], "+", 1);
3783 while (ret == 0 || (ret == -1 && errno == EINTR));
3784
3785 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3786 be awakened anyway. */
3787}
3788
6532e7e3
TBA
3789ptid_t
3790linux_process_target::wait (ptid_t ptid,
3791 target_waitstatus *ourstatus,
3792 int target_options)
bd99dc85 3793{
95954743 3794 ptid_t event_ptid;
bd99dc85 3795
bd99dc85
PA
3796 /* Flush the async file first. */
3797 if (target_is_async_p ())
3798 async_file_flush ();
3799
582511be
PA
3800 do
3801 {
d16f3f6c 3802 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3803 }
3804 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3805 && event_ptid == null_ptid
582511be 3806 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3807
3808 /* If at least one stop was reported, there may be more. A single
3809 SIGCHLD can signal more than one child stop. */
3810 if (target_is_async_p ()
3811 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3812 && event_ptid != null_ptid)
bd99dc85
PA
3813 async_file_mark ();
3814
3815 return event_ptid;
da6d8c04
DJ
3816}
3817
c5f62d5f 3818/* Send a signal to an LWP. */
fd500816
DJ
3819
3820static int
a1928bad 3821kill_lwp (unsigned long lwpid, int signo)
fd500816 3822{
4a6ed09b 3823 int ret;
fd500816 3824
4a6ed09b
PA
3825 errno = 0;
3826 ret = syscall (__NR_tkill, lwpid, signo);
3827 if (errno == ENOSYS)
3828 {
3829 /* If tkill fails, then we are not using nptl threads, a
3830 configuration we no longer support. */
3831 perror_with_name (("tkill"));
3832 }
3833 return ret;
fd500816
DJ
3834}
3835
964e4306
PA
3836void
3837linux_stop_lwp (struct lwp_info *lwp)
3838{
3839 send_sigstop (lwp);
3840}
3841
0d62e5e8 3842static void
02fc4de7 3843send_sigstop (struct lwp_info *lwp)
0d62e5e8 3844{
bd99dc85 3845 int pid;
0d62e5e8 3846
d86d4aaf 3847 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3848
0d62e5e8
DJ
3849 /* If we already have a pending stop signal for this process, don't
3850 send another. */
54a0b537 3851 if (lwp->stop_expected)
0d62e5e8 3852 {
ae13219e 3853 if (debug_threads)
87ce2a04 3854 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3855
0d62e5e8
DJ
3856 return;
3857 }
3858
3859 if (debug_threads)
87ce2a04 3860 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3861
d50171e4 3862 lwp->stop_expected = 1;
bd99dc85 3863 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3864}
3865
df3e4dbe
SM
3866static void
3867send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3868{
d86d4aaf 3869 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3870
7984d532
PA
3871 /* Ignore EXCEPT. */
3872 if (lwp == except)
df3e4dbe 3873 return;
7984d532 3874
02fc4de7 3875 if (lwp->stopped)
df3e4dbe 3876 return;
02fc4de7
PA
3877
3878 send_sigstop (lwp);
7984d532
PA
3879}
3880
3881/* Increment the suspend count of an LWP, and stop it, if not stopped
3882 yet. */
df3e4dbe
SM
3883static void
3884suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3885{
d86d4aaf 3886 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3887
3888 /* Ignore EXCEPT. */
3889 if (lwp == except)
df3e4dbe 3890 return;
7984d532 3891
863d01bd 3892 lwp_suspended_inc (lwp);
7984d532 3893
df3e4dbe 3894 send_sigstop (thread, except);
02fc4de7
PA
3895}
3896
95954743
PA
3897static void
3898mark_lwp_dead (struct lwp_info *lwp, int wstat)
3899{
95954743
PA
3900 /* Store the exit status for later. */
3901 lwp->status_pending_p = 1;
3902 lwp->status_pending = wstat;
3903
00db26fa
PA
3904 /* Store in waitstatus as well, as there's nothing else to process
3905 for this event. */
3906 if (WIFEXITED (wstat))
3907 {
3908 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3909 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3910 }
3911 else if (WIFSIGNALED (wstat))
3912 {
3913 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3914 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3915 }
3916
95954743
PA
3917 /* Prevent trying to stop it. */
3918 lwp->stopped = 1;
3919
3920 /* No further stops are expected from a dead lwp. */
3921 lwp->stop_expected = 0;
3922}
3923
00db26fa
PA
3924/* Return true if LWP has exited already, and has a pending exit event
3925 to report to GDB. */
3926
3927static int
3928lwp_is_marked_dead (struct lwp_info *lwp)
3929{
3930 return (lwp->status_pending_p
3931 && (WIFEXITED (lwp->status_pending)
3932 || WIFSIGNALED (lwp->status_pending)));
3933}
3934
d16f3f6c
TBA
3935void
3936linux_process_target::wait_for_sigstop ()
0d62e5e8 3937{
0bfdf32f 3938 struct thread_info *saved_thread;
95954743 3939 ptid_t saved_tid;
fa96cb38
PA
3940 int wstat;
3941 int ret;
0d62e5e8 3942
0bfdf32f
GB
3943 saved_thread = current_thread;
3944 if (saved_thread != NULL)
9c80ecd6 3945 saved_tid = saved_thread->id;
bd99dc85 3946 else
95954743 3947 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3948
d50171e4 3949 if (debug_threads)
fa96cb38 3950 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3951
fa96cb38
PA
3952 /* Passing NULL_PTID as filter indicates we want all events to be
3953 left pending. Eventually this returns when there are no
3954 unwaited-for children left. */
d16f3f6c 3955 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3956 gdb_assert (ret == -1);
0d62e5e8 3957
13d3d99b 3958 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3959 current_thread = saved_thread;
0d62e5e8
DJ
3960 else
3961 {
3962 if (debug_threads)
87ce2a04 3963 debug_printf ("Previously current thread died.\n");
0d62e5e8 3964
f0db101d
PA
3965 /* We can't change the current inferior behind GDB's back,
3966 otherwise, a subsequent command may apply to the wrong
3967 process. */
3968 current_thread = NULL;
0d62e5e8
DJ
3969 }
3970}
3971
fcb056a5 3972/* Returns true if THREAD is stopped in a jump pad, and we can't
fa593d66
PA
3973 move it out, because we need to report the stop event to GDB. For
3974 example, if the user puts a breakpoint in the jump pad, it's
3975 because she wants to debug it. */
3976
fcb056a5
SM
3977static bool
3978stuck_in_jump_pad_callback (thread_info *thread)
fa593d66 3979{
d86d4aaf 3980 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3981
863d01bd
PA
3982 if (lwp->suspended != 0)
3983 {
3984 internal_error (__FILE__, __LINE__,
3985 "LWP %ld is suspended, suspended=%d\n",
3986 lwpid_of (thread), lwp->suspended);
3987 }
fa593d66
PA
3988 gdb_assert (lwp->stopped);
3989
3990 /* Allow debugging the jump pad, gdb_collect, etc.. */
3991 return (supports_fast_tracepoints ()
58b4daa5 3992 && agent_loaded_p ()
fa593d66 3993 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3994 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3995 || thread->last_resume_kind == resume_step)
229d26fc
SM
3996 && (linux_fast_tracepoint_collecting (lwp, NULL)
3997 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3998}
3999
d16f3f6c
TBA
4000void
4001linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 4002{
f0ce0d3a 4003 struct thread_info *saved_thread;
d86d4aaf 4004 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
4005 int *wstat;
4006
863d01bd
PA
4007 if (lwp->suspended != 0)
4008 {
4009 internal_error (__FILE__, __LINE__,
4010 "LWP %ld is suspended, suspended=%d\n",
4011 lwpid_of (thread), lwp->suspended);
4012 }
fa593d66
PA
4013 gdb_assert (lwp->stopped);
4014
f0ce0d3a
PA
4015 /* For gdb_breakpoint_here. */
4016 saved_thread = current_thread;
4017 current_thread = thread;
4018
fa593d66
PA
4019 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4020
4021 /* Allow debugging the jump pad, gdb_collect, etc. */
4022 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4023 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4024 && thread->last_resume_kind != resume_step
4025 && maybe_move_out_of_jump_pad (lwp, wstat))
4026 {
4027 if (debug_threads)
87ce2a04 4028 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4029 lwpid_of (thread));
fa593d66
PA
4030
4031 if (wstat)
4032 {
4033 lwp->status_pending_p = 0;
4034 enqueue_one_deferred_signal (lwp, wstat);
4035
4036 if (debug_threads)
87ce2a04
DE
4037 debug_printf ("Signal %d for LWP %ld deferred "
4038 "(in jump pad)\n",
d86d4aaf 4039 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4040 }
4041
4042 linux_resume_one_lwp (lwp, 0, 0, NULL);
4043 }
4044 else
863d01bd 4045 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4046
4047 current_thread = saved_thread;
fa593d66
PA
4048}
4049
5a6b0a41
SM
4050static bool
4051lwp_running (thread_info *thread)
fa593d66 4052{
d86d4aaf 4053 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4054
00db26fa 4055 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4056 return false;
4057
4058 return !lwp->stopped;
fa593d66
PA
4059}
4060
d16f3f6c
TBA
4061void
4062linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 4063{
bde24c0a
PA
4064 /* Should not be called recursively. */
4065 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4066
87ce2a04
DE
4067 if (debug_threads)
4068 {
4069 debug_enter ();
4070 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4071 suspend ? "stop-and-suspend" : "stop",
4072 except != NULL
d86d4aaf 4073 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4074 : "none");
4075 }
4076
bde24c0a
PA
4077 stopping_threads = (suspend
4078 ? STOPPING_AND_SUSPENDING_THREADS
4079 : STOPPING_THREADS);
7984d532
PA
4080
4081 if (suspend)
df3e4dbe
SM
4082 for_each_thread ([&] (thread_info *thread)
4083 {
4084 suspend_and_send_sigstop (thread, except);
4085 });
7984d532 4086 else
df3e4dbe
SM
4087 for_each_thread ([&] (thread_info *thread)
4088 {
4089 send_sigstop (thread, except);
4090 });
4091
fa96cb38 4092 wait_for_sigstop ();
bde24c0a 4093 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4094
4095 if (debug_threads)
4096 {
4097 debug_printf ("stop_all_lwps done, setting stopping_threads "
4098 "back to !stopping\n");
4099 debug_exit ();
4100 }
0d62e5e8
DJ
4101}
4102
863d01bd
PA
4103/* Enqueue one signal in the chain of signals which need to be
4104 delivered to this process on next resume. */
4105
4106static void
4107enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4108{
8d749320 4109 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4110
863d01bd
PA
4111 p_sig->prev = lwp->pending_signals;
4112 p_sig->signal = signal;
4113 if (info == NULL)
4114 memset (&p_sig->info, 0, sizeof (siginfo_t));
4115 else
4116 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4117 lwp->pending_signals = p_sig;
4118}
4119
fa5308bd
AT
4120/* Install breakpoints for software single stepping. */
4121
4122static void
4123install_software_single_step_breakpoints (struct lwp_info *lwp)
4124{
984a2c04
YQ
4125 struct thread_info *thread = get_lwp_thread (lwp);
4126 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4127
4128 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4129
984a2c04 4130 current_thread = thread;
a0ff9e1a 4131 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
fa5308bd 4132
a0ff9e1a 4133 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4134 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4135}
4136
7fe5e27e
AT
4137/* Single step via hardware or software single step.
4138 Return 1 if hardware single stepping, 0 if software single stepping
4139 or can't single step. */
4140
4141static int
4142single_step (struct lwp_info* lwp)
4143{
4144 int step = 0;
4145
4146 if (can_hardware_single_step ())
4147 {
4148 step = 1;
4149 }
4150 else if (can_software_single_step ())
4151 {
4152 install_software_single_step_breakpoints (lwp);
4153 step = 0;
4154 }
4155 else
4156 {
4157 if (debug_threads)
4158 debug_printf ("stepping is not implemented on this target");
4159 }
4160
4161 return step;
4162}
4163
35ac8b3e 4164/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4165 finish a fast tracepoint collect. Since signal can be delivered in
4166 the step-over, the program may go to signal handler and trap again
4167 after return from the signal handler. We can live with the spurious
4168 double traps. */
35ac8b3e
YQ
4169
4170static int
4171lwp_signal_can_be_delivered (struct lwp_info *lwp)
4172{
229d26fc
SM
4173 return (lwp->collecting_fast_tracepoint
4174 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4175}
4176
23f238d3
PA
4177/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4178 SIGNAL is nonzero, give it that signal. */
da6d8c04 4179
ce3a066d 4180static void
23f238d3
PA
4181linux_resume_one_lwp_throw (struct lwp_info *lwp,
4182 int step, int signal, siginfo_t *info)
da6d8c04 4183{
d86d4aaf 4184 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4185 struct thread_info *saved_thread;
82075af2 4186 int ptrace_request;
c06cbd92
YQ
4187 struct process_info *proc = get_thread_process (thread);
4188
4189 /* Note that target description may not be initialised
4190 (proc->tdesc == NULL) at this point because the program hasn't
4191 stopped at the first instruction yet. It means GDBserver skips
4192 the extra traps from the wrapper program (see option --wrapper).
4193 Code in this function that requires register access should be
4194 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4195
54a0b537 4196 if (lwp->stopped == 0)
0d62e5e8
DJ
4197 return;
4198
65706a29
PA
4199 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4200
229d26fc
SM
4201 fast_tpoint_collect_result fast_tp_collecting
4202 = lwp->collecting_fast_tracepoint;
fa593d66 4203
229d26fc
SM
4204 gdb_assert (!stabilizing_threads
4205 || (fast_tp_collecting
4206 != fast_tpoint_collect_result::not_collecting));
fa593d66 4207
219f2f23
PA
4208 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4209 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4210 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4211 {
4212 /* Collecting 'while-stepping' actions doesn't make sense
4213 anymore. */
d86d4aaf 4214 release_while_stepping_state_list (thread);
219f2f23
PA
4215 }
4216
0d62e5e8 4217 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4218 signal. Also enqueue the signal if it can't be delivered to the
4219 inferior right now. */
0d62e5e8 4220 if (signal != 0
fa593d66
PA
4221 && (lwp->status_pending_p
4222 || lwp->pending_signals != NULL
35ac8b3e 4223 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4224 {
4225 enqueue_pending_signal (lwp, signal, info);
4226
4227 /* Postpone any pending signal. It was enqueued above. */
4228 signal = 0;
4229 }
0d62e5e8 4230
d50171e4
PA
4231 if (lwp->status_pending_p)
4232 {
4233 if (debug_threads)
94610ec4 4234 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4235 " has pending status\n",
94610ec4 4236 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4237 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4238 return;
4239 }
0d62e5e8 4240
0bfdf32f
GB
4241 saved_thread = current_thread;
4242 current_thread = thread;
0d62e5e8 4243
0d62e5e8
DJ
4244 /* This bit needs some thinking about. If we get a signal that
4245 we must report while a single-step reinsert is still pending,
4246 we often end up resuming the thread. It might be better to
4247 (ew) allow a stack of pending events; then we could be sure that
4248 the reinsert happened right away and not lose any signals.
4249
4250 Making this stack would also shrink the window in which breakpoints are
54a0b537 4251 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4252 complete correctness, so it won't solve that problem. It may be
4253 worthwhile just to solve this one, however. */
54a0b537 4254 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4255 {
4256 if (debug_threads)
87ce2a04
DE
4257 debug_printf (" pending reinsert at 0x%s\n",
4258 paddress (lwp->bp_reinsert));
d50171e4 4259
85e00e85 4260 if (can_hardware_single_step ())
d50171e4 4261 {
229d26fc 4262 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4263 {
4264 if (step == 0)
9986ba08 4265 warning ("BAD - reinserting but not stepping.");
fa593d66 4266 if (lwp->suspended)
9986ba08
PA
4267 warning ("BAD - reinserting and suspended(%d).",
4268 lwp->suspended);
fa593d66 4269 }
d50171e4 4270 }
f79b145d
YQ
4271
4272 step = maybe_hw_step (thread);
0d62e5e8
DJ
4273 }
4274
229d26fc 4275 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4276 {
4277 if (debug_threads)
87ce2a04
DE
4278 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4279 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4280 lwpid_of (thread));
fa593d66 4281 }
229d26fc 4282 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4283 {
4284 if (debug_threads)
87ce2a04
DE
4285 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4286 " single-stepping\n",
d86d4aaf 4287 lwpid_of (thread));
fa593d66
PA
4288
4289 if (can_hardware_single_step ())
4290 step = 1;
4291 else
38e08fca
GB
4292 {
4293 internal_error (__FILE__, __LINE__,
4294 "moving out of jump pad single-stepping"
4295 " not implemented on this target");
4296 }
fa593d66
PA
4297 }
4298
219f2f23
PA
4299 /* If we have while-stepping actions in this thread set it stepping.
4300 If we have a signal to deliver, it may or may not be set to
4301 SIG_IGN, we don't know. Assume so, and allow collecting
4302 while-stepping into a signal handler. A possible smart thing to
4303 do would be to set an internal breakpoint at the signal return
4304 address, continue, and carry on catching this while-stepping
4305 action only when that breakpoint is hit. A future
4306 enhancement. */
7fe5e27e 4307 if (thread->while_stepping != NULL)
219f2f23
PA
4308 {
4309 if (debug_threads)
87ce2a04 4310 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4311 lwpid_of (thread));
7fe5e27e
AT
4312
4313 step = single_step (lwp);
219f2f23
PA
4314 }
4315
c06cbd92 4316 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4317 {
0bfdf32f 4318 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4319
4320 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4321
4322 if (debug_threads)
4323 {
4324 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4325 (long) lwp->stop_pc);
4326 }
0d62e5e8
DJ
4327 }
4328
35ac8b3e
YQ
4329 /* If we have pending signals, consume one if it can be delivered to
4330 the inferior. */
4331 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4332 {
4333 struct pending_signals **p_sig;
4334
54a0b537 4335 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4336 while ((*p_sig)->prev != NULL)
4337 p_sig = &(*p_sig)->prev;
4338
4339 signal = (*p_sig)->signal;
32ca6d61 4340 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4341 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4342 &(*p_sig)->info);
32ca6d61 4343
0d62e5e8
DJ
4344 free (*p_sig);
4345 *p_sig = NULL;
4346 }
4347
94610ec4
YQ
4348 if (debug_threads)
4349 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4350 lwpid_of (thread), step ? "step" : "continue", signal,
4351 lwp->stop_expected ? "expected" : "not expected");
4352
aa5ca48f
DE
4353 if (the_low_target.prepare_to_resume != NULL)
4354 the_low_target.prepare_to_resume (lwp);
4355
d86d4aaf 4356 regcache_invalidate_thread (thread);
da6d8c04 4357 errno = 0;
54a0b537 4358 lwp->stepping = step;
82075af2
JS
4359 if (step)
4360 ptrace_request = PTRACE_SINGLESTEP;
4361 else if (gdb_catching_syscalls_p (lwp))
4362 ptrace_request = PTRACE_SYSCALL;
4363 else
4364 ptrace_request = PTRACE_CONT;
4365 ptrace (ptrace_request,
4366 lwpid_of (thread),
b8e1b30e 4367 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4368 /* Coerce to a uintptr_t first to avoid potential gcc warning
4369 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4370 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4371
0bfdf32f 4372 current_thread = saved_thread;
da6d8c04 4373 if (errno)
23f238d3
PA
4374 perror_with_name ("resuming thread");
4375
4376 /* Successfully resumed. Clear state that no longer makes sense,
4377 and mark the LWP as running. Must not do this before resuming
4378 otherwise if that fails other code will be confused. E.g., we'd
4379 later try to stop the LWP and hang forever waiting for a stop
4380 status. Note that we must not throw after this is cleared,
4381 otherwise handle_zombie_lwp_error would get confused. */
4382 lwp->stopped = 0;
4383 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4384}
4385
4386/* Called when we try to resume a stopped LWP and that errors out. If
4387 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4388 or about to become), discard the error, clear any pending status
4389 the LWP may have, and return true (we'll collect the exit status
4390 soon enough). Otherwise, return false. */
4391
4392static int
4393check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4394{
4395 struct thread_info *thread = get_lwp_thread (lp);
4396
4397 /* If we get an error after resuming the LWP successfully, we'd
4398 confuse !T state for the LWP being gone. */
4399 gdb_assert (lp->stopped);
4400
4401 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4402 because even if ptrace failed with ESRCH, the tracee may be "not
4403 yet fully dead", but already refusing ptrace requests. In that
4404 case the tracee has 'R (Running)' state for a little bit
4405 (observed in Linux 3.18). See also the note on ESRCH in the
4406 ptrace(2) man page. Instead, check whether the LWP has any state
4407 other than ptrace-stopped. */
4408
4409 /* Don't assume anything if /proc/PID/status can't be read. */
4410 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4411 {
23f238d3
PA
4412 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4413 lp->status_pending_p = 0;
4414 return 1;
4415 }
4416 return 0;
4417}
4418
4419/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4420 disappears while we try to resume it. */
3221518c 4421
23f238d3
PA
4422static void
4423linux_resume_one_lwp (struct lwp_info *lwp,
4424 int step, int signal, siginfo_t *info)
4425{
a70b8144 4426 try
23f238d3
PA
4427 {
4428 linux_resume_one_lwp_throw (lwp, step, signal, info);
4429 }
230d2906 4430 catch (const gdb_exception_error &ex)
23f238d3
PA
4431 {
4432 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4433 throw;
3221518c 4434 }
da6d8c04
DJ
4435}
4436
5fdda392
SM
4437/* This function is called once per thread via for_each_thread.
4438 We look up which resume request applies to THREAD and mark it with a
4439 pointer to the appropriate resume request.
5544ad89
DJ
4440
4441 This algorithm is O(threads * resume elements), but resume elements
4442 is small (and will remain small at least until GDB supports thread
4443 suspension). */
ebcf782c 4444
5fdda392
SM
4445static void
4446linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4447{
d86d4aaf 4448 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4449
5fdda392 4450 for (int ndx = 0; ndx < n; ndx++)
95954743 4451 {
5fdda392 4452 ptid_t ptid = resume[ndx].thread;
d7e15655 4453 if (ptid == minus_one_ptid
9c80ecd6 4454 || ptid == thread->id
0c9070b3
YQ
4455 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4456 of PID'. */
e99b03dc 4457 || (ptid.pid () == pid_of (thread)
0e998d96 4458 && (ptid.is_pid ()
e38504b3 4459 || ptid.lwp () == -1)))
95954743 4460 {
5fdda392 4461 if (resume[ndx].kind == resume_stop
8336d594 4462 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4463 {
4464 if (debug_threads)
87ce2a04
DE
4465 debug_printf ("already %s LWP %ld at GDB's request\n",
4466 (thread->last_status.kind
4467 == TARGET_WAITKIND_STOPPED)
4468 ? "stopped"
4469 : "stopping",
d86d4aaf 4470 lwpid_of (thread));
d50171e4
PA
4471
4472 continue;
4473 }
4474
5a04c4cf
PA
4475 /* Ignore (wildcard) resume requests for already-resumed
4476 threads. */
5fdda392 4477 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4478 && thread->last_resume_kind != resume_stop)
4479 {
4480 if (debug_threads)
4481 debug_printf ("already %s LWP %ld at GDB's request\n",
4482 (thread->last_resume_kind
4483 == resume_step)
4484 ? "stepping"
4485 : "continuing",
4486 lwpid_of (thread));
4487 continue;
4488 }
4489
4490 /* Don't let wildcard resumes resume fork children that GDB
4491 does not yet know are new fork children. */
4492 if (lwp->fork_relative != NULL)
4493 {
5a04c4cf
PA
4494 struct lwp_info *rel = lwp->fork_relative;
4495
4496 if (rel->status_pending_p
4497 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4498 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4499 {
4500 if (debug_threads)
4501 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4502 lwpid_of (thread));
4503 continue;
4504 }
4505 }
4506
4507 /* If the thread has a pending event that has already been
4508 reported to GDBserver core, but GDB has not pulled the
4509 event out of the vStopped queue yet, likewise, ignore the
4510 (wildcard) resume request. */
9c80ecd6 4511 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4512 {
4513 if (debug_threads)
4514 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4515 lwpid_of (thread));
4516 continue;
4517 }
4518
5fdda392 4519 lwp->resume = &resume[ndx];
8336d594 4520 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4521
c2d6af84
PA
4522 lwp->step_range_start = lwp->resume->step_range_start;
4523 lwp->step_range_end = lwp->resume->step_range_end;
4524
fa593d66
PA
4525 /* If we had a deferred signal to report, dequeue one now.
4526 This can happen if LWP gets more than one signal while
4527 trying to get out of a jump pad. */
4528 if (lwp->stopped
4529 && !lwp->status_pending_p
4530 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4531 {
4532 lwp->status_pending_p = 1;
4533
4534 if (debug_threads)
87ce2a04
DE
4535 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4536 "leaving status pending.\n",
d86d4aaf
DE
4537 WSTOPSIG (lwp->status_pending),
4538 lwpid_of (thread));
fa593d66
PA
4539 }
4540
5fdda392 4541 return;
95954743
PA
4542 }
4543 }
2bd7c093
PA
4544
4545 /* No resume action for this thread. */
4546 lwp->resume = NULL;
5544ad89
DJ
4547}
4548
8f86d7aa
SM
4549/* find_thread callback for linux_resume. Return true if this lwp has an
4550 interesting status pending. */
5544ad89 4551
25c28b4d
SM
4552static bool
4553resume_status_pending_p (thread_info *thread)
5544ad89 4554{
d86d4aaf 4555 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4556
bd99dc85
PA
4557 /* LWPs which will not be resumed are not interesting, because
4558 we might not wait for them next time through linux_wait. */
2bd7c093 4559 if (lwp->resume == NULL)
25c28b4d 4560 return false;
64386c31 4561
25c28b4d 4562 return thread_still_has_status_pending_p (thread);
d50171e4
PA
4563}
4564
4565/* Return 1 if this lwp that GDB wants running is stopped at an
4566 internal breakpoint that we need to step over. It assumes that any
4567 required STOP_PC adjustment has already been propagated to the
4568 inferior's regcache. */
4569
eca55aec
SM
4570static bool
4571need_step_over_p (thread_info *thread)
d50171e4 4572{
d86d4aaf 4573 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4574 struct thread_info *saved_thread;
d50171e4 4575 CORE_ADDR pc;
c06cbd92
YQ
4576 struct process_info *proc = get_thread_process (thread);
4577
4578 /* GDBserver is skipping the extra traps from the wrapper program,
4579 don't have to do step over. */
4580 if (proc->tdesc == NULL)
eca55aec 4581 return false;
d50171e4
PA
4582
4583 /* LWPs which will not be resumed are not interesting, because we
4584 might not wait for them next time through linux_wait. */
4585
4586 if (!lwp->stopped)
4587 {
4588 if (debug_threads)
87ce2a04 4589 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4590 lwpid_of (thread));
eca55aec 4591 return false;
d50171e4
PA
4592 }
4593
8336d594 4594 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4595 {
4596 if (debug_threads)
87ce2a04
DE
4597 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4598 " stopped\n",
d86d4aaf 4599 lwpid_of (thread));
eca55aec 4600 return false;
d50171e4
PA
4601 }
4602
7984d532
PA
4603 gdb_assert (lwp->suspended >= 0);
4604
4605 if (lwp->suspended)
4606 {
4607 if (debug_threads)
87ce2a04 4608 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4609 lwpid_of (thread));
eca55aec 4610 return false;
7984d532
PA
4611 }
4612
bd99dc85 4613 if (lwp->status_pending_p)
d50171e4
PA
4614 {
4615 if (debug_threads)
87ce2a04
DE
4616 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4617 " status.\n",
d86d4aaf 4618 lwpid_of (thread));
eca55aec 4619 return false;
d50171e4
PA
4620 }
4621
4622 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4623 or we have. */
4624 pc = get_pc (lwp);
4625
4626 /* If the PC has changed since we stopped, then don't do anything,
4627 and let the breakpoint/tracepoint be hit. This happens if, for
4628 instance, GDB handled the decr_pc_after_break subtraction itself,
4629 GDB is OOL stepping this thread, or the user has issued a "jump"
4630 command, or poked thread's registers herself. */
4631 if (pc != lwp->stop_pc)
4632 {
4633 if (debug_threads)
87ce2a04
DE
4634 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4635 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4636 lwpid_of (thread),
4637 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4638 return false;
d50171e4
PA
4639 }
4640
484b3c32
YQ
4641 /* On software single step target, resume the inferior with signal
4642 rather than stepping over. */
4643 if (can_software_single_step ()
4644 && lwp->pending_signals != NULL
4645 && lwp_signal_can_be_delivered (lwp))
4646 {
4647 if (debug_threads)
4648 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4649 " signals.\n",
4650 lwpid_of (thread));
4651
eca55aec 4652 return false;
484b3c32
YQ
4653 }
4654
0bfdf32f
GB
4655 saved_thread = current_thread;
4656 current_thread = thread;
d50171e4 4657
8b07ae33 4658 /* We can only step over breakpoints we know about. */
fa593d66 4659 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4660 {
8b07ae33 4661 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4662 though. If the condition is being evaluated on the target's side
4663 and it evaluate to false, step over this breakpoint as well. */
4664 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4665 && gdb_condition_true_at_breakpoint (pc)
4666 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4667 {
4668 if (debug_threads)
87ce2a04
DE
4669 debug_printf ("Need step over [LWP %ld]? yes, but found"
4670 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4671 lwpid_of (thread), paddress (pc));
d50171e4 4672
0bfdf32f 4673 current_thread = saved_thread;
eca55aec 4674 return false;
8b07ae33
PA
4675 }
4676 else
4677 {
4678 if (debug_threads)
87ce2a04
DE
4679 debug_printf ("Need step over [LWP %ld]? yes, "
4680 "found breakpoint at 0x%s\n",
d86d4aaf 4681 lwpid_of (thread), paddress (pc));
d50171e4 4682
8b07ae33 4683 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4684 that find_thread stops looking. */
0bfdf32f 4685 current_thread = saved_thread;
8b07ae33 4686
eca55aec 4687 return true;
8b07ae33 4688 }
d50171e4
PA
4689 }
4690
0bfdf32f 4691 current_thread = saved_thread;
d50171e4
PA
4692
4693 if (debug_threads)
87ce2a04
DE
4694 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4695 " at 0x%s\n",
d86d4aaf 4696 lwpid_of (thread), paddress (pc));
c6ecbae5 4697
eca55aec 4698 return false;
5544ad89
DJ
4699}
4700
d16f3f6c
TBA
4701void
4702linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4703{
d86d4aaf 4704 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4705 struct thread_info *saved_thread;
d50171e4
PA
4706 CORE_ADDR pc;
4707 int step;
4708
4709 if (debug_threads)
87ce2a04 4710 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4711 lwpid_of (thread));
d50171e4 4712
7984d532 4713 stop_all_lwps (1, lwp);
863d01bd
PA
4714
4715 if (lwp->suspended != 0)
4716 {
4717 internal_error (__FILE__, __LINE__,
4718 "LWP %ld suspended=%d\n", lwpid_of (thread),
4719 lwp->suspended);
4720 }
d50171e4
PA
4721
4722 if (debug_threads)
87ce2a04 4723 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4724
4725 /* Note, we should always reach here with an already adjusted PC,
4726 either by GDB (if we're resuming due to GDB's request), or by our
4727 caller, if we just finished handling an internal breakpoint GDB
4728 shouldn't care about. */
4729 pc = get_pc (lwp);
4730
0bfdf32f
GB
4731 saved_thread = current_thread;
4732 current_thread = thread;
d50171e4
PA
4733
4734 lwp->bp_reinsert = pc;
4735 uninsert_breakpoints_at (pc);
fa593d66 4736 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4737
7fe5e27e 4738 step = single_step (lwp);
d50171e4 4739
0bfdf32f 4740 current_thread = saved_thread;
d50171e4
PA
4741
4742 linux_resume_one_lwp (lwp, step, 0, NULL);
4743
4744 /* Require next event from this LWP. */
9c80ecd6 4745 step_over_bkpt = thread->id;
d50171e4
PA
4746}
4747
4748/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3b9a79ef 4749 start_step_over, if still there, and delete any single-step
d50171e4
PA
4750 breakpoints we've set, on non hardware single-step targets. */
4751
4752static int
4753finish_step_over (struct lwp_info *lwp)
4754{
4755 if (lwp->bp_reinsert != 0)
4756 {
f79b145d
YQ
4757 struct thread_info *saved_thread = current_thread;
4758
d50171e4 4759 if (debug_threads)
87ce2a04 4760 debug_printf ("Finished step over.\n");
d50171e4 4761
f79b145d
YQ
4762 current_thread = get_lwp_thread (lwp);
4763
d50171e4
PA
4764 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4765 may be no breakpoint to reinsert there by now. */
4766 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4767 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4768
4769 lwp->bp_reinsert = 0;
4770
3b9a79ef
YQ
4771 /* Delete any single-step breakpoints. No longer needed. We
4772 don't have to worry about other threads hitting this trap,
4773 and later not being able to explain it, because we were
4774 stepping over a breakpoint, and we hold all threads but
4775 LWP stopped while doing that. */
d50171e4 4776 if (!can_hardware_single_step ())
f79b145d 4777 {
3b9a79ef
YQ
4778 gdb_assert (has_single_step_breakpoints (current_thread));
4779 delete_single_step_breakpoints (current_thread);
f79b145d 4780 }
d50171e4
PA
4781
4782 step_over_bkpt = null_ptid;
f79b145d 4783 current_thread = saved_thread;
d50171e4
PA
4784 return 1;
4785 }
4786 else
4787 return 0;
4788}
4789
d16f3f6c
TBA
4790void
4791linux_process_target::complete_ongoing_step_over ()
863d01bd 4792{
d7e15655 4793 if (step_over_bkpt != null_ptid)
863d01bd
PA
4794 {
4795 struct lwp_info *lwp;
4796 int wstat;
4797 int ret;
4798
4799 if (debug_threads)
4800 debug_printf ("detach: step over in progress, finish it first\n");
4801
4802 /* Passing NULL_PTID as filter indicates we want all events to
4803 be left pending. Eventually this returns when there are no
4804 unwaited-for children left. */
d16f3f6c
TBA
4805 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4806 __WALL);
863d01bd
PA
4807 gdb_assert (ret == -1);
4808
4809 lwp = find_lwp_pid (step_over_bkpt);
4810 if (lwp != NULL)
4811 finish_step_over (lwp);
4812 step_over_bkpt = null_ptid;
4813 unsuspend_all_lwps (lwp);
4814 }
4815}
4816
5544ad89
DJ
4817/* This function is called once per thread. We check the thread's resume
4818 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4819 stopped; and what signal, if any, it should be sent.
5544ad89 4820
bd99dc85
PA
4821 For threads which we aren't explicitly told otherwise, we preserve
4822 the stepping flag; this is used for stepping over gdbserver-placed
4823 breakpoints.
4824
4825 If pending_flags was set in any thread, we queue any needed
4826 signals, since we won't actually resume. We already have a pending
4827 event to report, so we don't need to preserve any step requests;
4828 they should be re-issued if necessary. */
4829
c80825ff
SM
4830static void
4831linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
5544ad89 4832{
d86d4aaf 4833 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4834 int leave_pending;
5544ad89 4835
2bd7c093 4836 if (lwp->resume == NULL)
c80825ff 4837 return;
5544ad89 4838
bd99dc85 4839 if (lwp->resume->kind == resume_stop)
5544ad89 4840 {
bd99dc85 4841 if (debug_threads)
d86d4aaf 4842 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4843
4844 if (!lwp->stopped)
4845 {
4846 if (debug_threads)
d86d4aaf 4847 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4848
d50171e4
PA
4849 /* Stop the thread, and wait for the event asynchronously,
4850 through the event loop. */
02fc4de7 4851 send_sigstop (lwp);
bd99dc85
PA
4852 }
4853 else
4854 {
4855 if (debug_threads)
87ce2a04 4856 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4857 lwpid_of (thread));
d50171e4
PA
4858
4859 /* The LWP may have been stopped in an internal event that
4860 was not meant to be notified back to GDB (e.g., gdbserver
4861 breakpoint), so we should be reporting a stop event in
4862 this case too. */
4863
4864 /* If the thread already has a pending SIGSTOP, this is a
4865 no-op. Otherwise, something later will presumably resume
4866 the thread and this will cause it to cancel any pending
4867 operation, due to last_resume_kind == resume_stop. If
4868 the thread already has a pending status to report, we
4869 will still report it the next time we wait - see
4870 status_pending_p_callback. */
1a981360
PA
4871
4872 /* If we already have a pending signal to report, then
4873 there's no need to queue a SIGSTOP, as this means we're
4874 midway through moving the LWP out of the jumppad, and we
4875 will report the pending signal as soon as that is
4876 finished. */
4877 if (lwp->pending_signals_to_report == NULL)
4878 send_sigstop (lwp);
bd99dc85 4879 }
32ca6d61 4880
bd99dc85
PA
4881 /* For stop requests, we're done. */
4882 lwp->resume = NULL;
fc7238bb 4883 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4884 return;
5544ad89
DJ
4885 }
4886
bd99dc85 4887 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4888 then don't resume it - we can just report the pending status.
4889 Likewise if it is suspended, because e.g., another thread is
4890 stepping past a breakpoint. Make sure to queue any signals that
4891 would otherwise be sent. In all-stop mode, we do this decision
4892 based on if *any* thread has a pending status. If there's a
4893 thread that needs the step-over-breakpoint dance, then don't
4894 resume any other thread but that particular one. */
4895 leave_pending = (lwp->suspended
4896 || lwp->status_pending_p
4897 || leave_all_stopped);
5544ad89 4898
0e9a339e
YQ
4899 /* If we have a new signal, enqueue the signal. */
4900 if (lwp->resume->sig != 0)
4901 {
4902 siginfo_t info, *info_p;
4903
4904 /* If this is the same signal we were previously stopped by,
4905 make sure to queue its siginfo. */
4906 if (WIFSTOPPED (lwp->last_status)
4907 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4908 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4909 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4910 info_p = &info;
4911 else
4912 info_p = NULL;
4913
4914 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4915 }
4916
d50171e4 4917 if (!leave_pending)
bd99dc85
PA
4918 {
4919 if (debug_threads)
d86d4aaf 4920 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4921
9c80ecd6 4922 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4923 }
4924 else
4925 {
4926 if (debug_threads)
d86d4aaf 4927 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4928 }
5544ad89 4929
fc7238bb 4930 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4931 lwp->resume = NULL;
0d62e5e8
DJ
4932}
4933
0e4d7e35
TBA
4934void
4935linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4936{
d86d4aaf 4937 struct thread_info *need_step_over = NULL;
c6ecbae5 4938
87ce2a04
DE
4939 if (debug_threads)
4940 {
4941 debug_enter ();
4942 debug_printf ("linux_resume:\n");
4943 }
4944
5fdda392
SM
4945 for_each_thread ([&] (thread_info *thread)
4946 {
4947 linux_set_resume_request (thread, resume_info, n);
4948 });
5544ad89 4949
d50171e4
PA
4950 /* If there is a thread which would otherwise be resumed, which has
4951 a pending status, then don't resume any threads - we can just
4952 report the pending status. Make sure to queue any signals that
4953 would otherwise be sent. In non-stop mode, we'll apply this
4954 logic to each thread individually. We consume all pending events
4955 before considering to start a step-over (in all-stop). */
25c28b4d 4956 bool any_pending = false;
bd99dc85 4957 if (!non_stop)
25c28b4d 4958 any_pending = find_thread (resume_status_pending_p) != NULL;
d50171e4
PA
4959
4960 /* If there is a thread which would otherwise be resumed, which is
4961 stopped at a breakpoint that needs stepping over, then don't
4962 resume any threads - have it step over the breakpoint with all
4963 other threads stopped, then resume all threads again. Make sure
4964 to queue any signals that would otherwise be delivered or
4965 queued. */
4966 if (!any_pending && supports_breakpoints ())
eca55aec 4967 need_step_over = find_thread (need_step_over_p);
d50171e4 4968
c80825ff 4969 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4970
4971 if (debug_threads)
4972 {
4973 if (need_step_over != NULL)
87ce2a04 4974 debug_printf ("Not resuming all, need step over\n");
d50171e4 4975 else if (any_pending)
87ce2a04
DE
4976 debug_printf ("Not resuming, all-stop and found "
4977 "an LWP with pending status\n");
d50171e4 4978 else
87ce2a04 4979 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4980 }
4981
4982 /* Even if we're leaving threads stopped, queue all signals we'd
4983 otherwise deliver. */
c80825ff
SM
4984 for_each_thread ([&] (thread_info *thread)
4985 {
4986 linux_resume_one_thread (thread, leave_all_stopped);
4987 });
d50171e4
PA
4988
4989 if (need_step_over)
d86d4aaf 4990 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4991
4992 if (debug_threads)
4993 {
4994 debug_printf ("linux_resume done\n");
4995 debug_exit ();
4996 }
1bebeeca
PA
4997
4998 /* We may have events that were pending that can/should be sent to
4999 the client now. Trigger a linux_wait call. */
5000 if (target_is_async_p ())
5001 async_file_mark ();
d50171e4
PA
5002}
5003
5004/* This function is called once per thread. We check the thread's
5005 last resume request, which will tell us whether to resume, step, or
5006 leave the thread stopped. Any signal the client requested to be
5007 delivered has already been enqueued at this point.
5008
5009 If any thread that GDB wants running is stopped at an internal
5010 breakpoint that needs stepping over, we start a step-over operation
5011 on that particular thread, and leave all others stopped. */
5012
e2b44075
SM
5013static void
5014proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 5015{
d86d4aaf 5016 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
5017 int step;
5018
7984d532 5019 if (lwp == except)
e2b44075 5020 return;
d50171e4
PA
5021
5022 if (debug_threads)
d86d4aaf 5023 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
5024
5025 if (!lwp->stopped)
5026 {
5027 if (debug_threads)
d86d4aaf 5028 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 5029 return;
d50171e4
PA
5030 }
5031
02fc4de7
PA
5032 if (thread->last_resume_kind == resume_stop
5033 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
5034 {
5035 if (debug_threads)
87ce2a04 5036 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 5037 lwpid_of (thread));
e2b44075 5038 return;
d50171e4
PA
5039 }
5040
5041 if (lwp->status_pending_p)
5042 {
5043 if (debug_threads)
87ce2a04 5044 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 5045 lwpid_of (thread));
e2b44075 5046 return;
d50171e4
PA
5047 }
5048
7984d532
PA
5049 gdb_assert (lwp->suspended >= 0);
5050
d50171e4
PA
5051 if (lwp->suspended)
5052 {
5053 if (debug_threads)
d86d4aaf 5054 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 5055 return;
d50171e4
PA
5056 }
5057
1a981360
PA
5058 if (thread->last_resume_kind == resume_stop
5059 && lwp->pending_signals_to_report == NULL
229d26fc
SM
5060 && (lwp->collecting_fast_tracepoint
5061 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
5062 {
5063 /* We haven't reported this LWP as stopped yet (otherwise, the
5064 last_status.kind check above would catch it, and we wouldn't
5065 reach here. This LWP may have been momentarily paused by a
5066 stop_all_lwps call while handling for example, another LWP's
5067 step-over. In that case, the pending expected SIGSTOP signal
5068 that was queued at vCont;t handling time will have already
5069 been consumed by wait_for_sigstop, and so we need to requeue
5070 another one here. Note that if the LWP already has a SIGSTOP
5071 pending, this is a no-op. */
5072
5073 if (debug_threads)
87ce2a04
DE
5074 debug_printf ("Client wants LWP %ld to stop. "
5075 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5076 lwpid_of (thread));
02fc4de7
PA
5077
5078 send_sigstop (lwp);
5079 }
5080
863d01bd
PA
5081 if (thread->last_resume_kind == resume_step)
5082 {
5083 if (debug_threads)
5084 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5085 lwpid_of (thread));
8901d193 5086
3b9a79ef 5087 /* If resume_step is requested by GDB, install single-step
8901d193 5088 breakpoints when the thread is about to be actually resumed if
3b9a79ef
YQ
5089 the single-step breakpoints weren't removed. */
5090 if (can_software_single_step ()
5091 && !has_single_step_breakpoints (thread))
8901d193
YQ
5092 install_software_single_step_breakpoints (lwp);
5093
5094 step = maybe_hw_step (thread);
863d01bd
PA
5095 }
5096 else if (lwp->bp_reinsert != 0)
5097 {
5098 if (debug_threads)
5099 debug_printf (" stepping LWP %ld, reinsert set\n",
5100 lwpid_of (thread));
f79b145d
YQ
5101
5102 step = maybe_hw_step (thread);
863d01bd
PA
5103 }
5104 else
5105 step = 0;
5106
d50171e4 5107 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5108}
5109
e2b44075
SM
5110static void
5111unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
7984d532 5112{
d86d4aaf 5113 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5114
5115 if (lwp == except)
e2b44075 5116 return;
7984d532 5117
863d01bd 5118 lwp_suspended_decr (lwp);
7984d532 5119
e2b44075 5120 proceed_one_lwp (thread, except);
d50171e4
PA
5121}
5122
d16f3f6c
TBA
5123void
5124linux_process_target::proceed_all_lwps ()
d50171e4 5125{
d86d4aaf 5126 struct thread_info *need_step_over;
d50171e4
PA
5127
5128 /* If there is a thread which would otherwise be resumed, which is
5129 stopped at a breakpoint that needs stepping over, then don't
5130 resume any threads - have it step over the breakpoint with all
5131 other threads stopped, then resume all threads again. */
5132
5133 if (supports_breakpoints ())
5134 {
eca55aec 5135 need_step_over = find_thread (need_step_over_p);
d50171e4
PA
5136
5137 if (need_step_over != NULL)
5138 {
5139 if (debug_threads)
87ce2a04
DE
5140 debug_printf ("proceed_all_lwps: found "
5141 "thread %ld needing a step-over\n",
5142 lwpid_of (need_step_over));
d50171e4 5143
d86d4aaf 5144 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5145 return;
5146 }
5147 }
5544ad89 5148
d50171e4 5149 if (debug_threads)
87ce2a04 5150 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5151
e2b44075
SM
5152 for_each_thread ([] (thread_info *thread)
5153 {
5154 proceed_one_lwp (thread, NULL);
5155 });
d50171e4
PA
5156}
5157
d16f3f6c
TBA
5158void
5159linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5160{
5544ad89
DJ
5161 if (debug_threads)
5162 {
87ce2a04 5163 debug_enter ();
d50171e4 5164 if (except)
87ce2a04 5165 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5166 lwpid_of (get_lwp_thread (except)));
5544ad89 5167 else
87ce2a04 5168 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5169 }
5170
7984d532 5171 if (unsuspend)
e2b44075
SM
5172 for_each_thread ([&] (thread_info *thread)
5173 {
5174 unsuspend_and_proceed_one_lwp (thread, except);
5175 });
7984d532 5176 else
e2b44075
SM
5177 for_each_thread ([&] (thread_info *thread)
5178 {
5179 proceed_one_lwp (thread, except);
5180 });
87ce2a04
DE
5181
5182 if (debug_threads)
5183 {
5184 debug_printf ("unstop_all_lwps done\n");
5185 debug_exit ();
5186 }
0d62e5e8
DJ
5187}
5188
58caa3dc
DJ
5189
5190#ifdef HAVE_LINUX_REGSETS
5191
1faeff08
MR
5192#define use_linux_regsets 1
5193
030031ee
PA
5194/* Returns true if REGSET has been disabled. */
5195
5196static int
5197regset_disabled (struct regsets_info *info, struct regset_info *regset)
5198{
5199 return (info->disabled_regsets != NULL
5200 && info->disabled_regsets[regset - info->regsets]);
5201}
5202
5203/* Disable REGSET. */
5204
5205static void
5206disable_regset (struct regsets_info *info, struct regset_info *regset)
5207{
5208 int dr_offset;
5209
5210 dr_offset = regset - info->regsets;
5211 if (info->disabled_regsets == NULL)
224c3ddb 5212 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5213 info->disabled_regsets[dr_offset] = 1;
5214}
5215
58caa3dc 5216static int
3aee8918
PA
5217regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5218 struct regcache *regcache)
58caa3dc
DJ
5219{
5220 struct regset_info *regset;
e9d25b98 5221 int saw_general_regs = 0;
95954743 5222 int pid;
1570b33e 5223 struct iovec iov;
58caa3dc 5224
0bfdf32f 5225 pid = lwpid_of (current_thread);
28eef672 5226 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5227 {
1570b33e
L
5228 void *buf, *data;
5229 int nt_type, res;
58caa3dc 5230
030031ee 5231 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5232 continue;
58caa3dc 5233
bca929d3 5234 buf = xmalloc (regset->size);
1570b33e
L
5235
5236 nt_type = regset->nt_type;
5237 if (nt_type)
5238 {
5239 iov.iov_base = buf;
5240 iov.iov_len = regset->size;
5241 data = (void *) &iov;
5242 }
5243 else
5244 data = buf;
5245
dfb64f85 5246#ifndef __sparc__
f15f9948 5247 res = ptrace (regset->get_request, pid,
b8e1b30e 5248 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5249#else
1570b33e 5250 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5251#endif
58caa3dc
DJ
5252 if (res < 0)
5253 {
1ef53e6b
AH
5254 if (errno == EIO
5255 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5256 {
1ef53e6b
AH
5257 /* If we get EIO on a regset, or an EINVAL and the regset is
5258 optional, do not try it again for this process mode. */
030031ee 5259 disable_regset (regsets_info, regset);
58caa3dc 5260 }
e5a9158d
AA
5261 else if (errno == ENODATA)
5262 {
5263 /* ENODATA may be returned if the regset is currently
5264 not "active". This can happen in normal operation,
5265 so suppress the warning in this case. */
5266 }
fcd4a73d
YQ
5267 else if (errno == ESRCH)
5268 {
5269 /* At this point, ESRCH should mean the process is
5270 already gone, in which case we simply ignore attempts
5271 to read its registers. */
5272 }
58caa3dc
DJ
5273 else
5274 {
0d62e5e8 5275 char s[256];
95954743
PA
5276 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5277 pid);
0d62e5e8 5278 perror (s);
58caa3dc
DJ
5279 }
5280 }
098dbe61
AA
5281 else
5282 {
5283 if (regset->type == GENERAL_REGS)
5284 saw_general_regs = 1;
5285 regset->store_function (regcache, buf);
5286 }
fdeb2a12 5287 free (buf);
58caa3dc 5288 }
e9d25b98
DJ
5289 if (saw_general_regs)
5290 return 0;
5291 else
5292 return 1;
58caa3dc
DJ
5293}
5294
5295static int
3aee8918
PA
5296regsets_store_inferior_registers (struct regsets_info *regsets_info,
5297 struct regcache *regcache)
58caa3dc
DJ
5298{
5299 struct regset_info *regset;
e9d25b98 5300 int saw_general_regs = 0;
95954743 5301 int pid;
1570b33e 5302 struct iovec iov;
58caa3dc 5303
0bfdf32f 5304 pid = lwpid_of (current_thread);
28eef672 5305 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5306 {
1570b33e
L
5307 void *buf, *data;
5308 int nt_type, res;
58caa3dc 5309
feea5f36
AA
5310 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5311 || regset->fill_function == NULL)
28eef672 5312 continue;
58caa3dc 5313
bca929d3 5314 buf = xmalloc (regset->size);
545587ee
DJ
5315
5316 /* First fill the buffer with the current register set contents,
5317 in case there are any items in the kernel's regset that are
5318 not in gdbserver's regcache. */
1570b33e
L
5319
5320 nt_type = regset->nt_type;
5321 if (nt_type)
5322 {
5323 iov.iov_base = buf;
5324 iov.iov_len = regset->size;
5325 data = (void *) &iov;
5326 }
5327 else
5328 data = buf;
5329
dfb64f85 5330#ifndef __sparc__
f15f9948 5331 res = ptrace (regset->get_request, pid,
b8e1b30e 5332 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5333#else
689cc2ae 5334 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5335#endif
545587ee
DJ
5336
5337 if (res == 0)
5338 {
5339 /* Then overlay our cached registers on that. */
442ea881 5340 regset->fill_function (regcache, buf);
545587ee
DJ
5341
5342 /* Only now do we write the register set. */
dfb64f85 5343#ifndef __sparc__
f15f9948 5344 res = ptrace (regset->set_request, pid,
b8e1b30e 5345 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5346#else
1570b33e 5347 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5348#endif
545587ee
DJ
5349 }
5350
58caa3dc
DJ
5351 if (res < 0)
5352 {
1ef53e6b
AH
5353 if (errno == EIO
5354 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5355 {
1ef53e6b
AH
5356 /* If we get EIO on a regset, or an EINVAL and the regset is
5357 optional, do not try it again for this process mode. */
030031ee 5358 disable_regset (regsets_info, regset);
58caa3dc 5359 }
3221518c
UW
5360 else if (errno == ESRCH)
5361 {
1b3f6016
PA
5362 /* At this point, ESRCH should mean the process is
5363 already gone, in which case we simply ignore attempts
5364 to change its registers. See also the related
5365 comment in linux_resume_one_lwp. */
fdeb2a12 5366 free (buf);
3221518c
UW
5367 return 0;
5368 }
58caa3dc
DJ
5369 else
5370 {
ce3a066d 5371 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5372 }
5373 }
e9d25b98
DJ
5374 else if (regset->type == GENERAL_REGS)
5375 saw_general_regs = 1;
09ec9b38 5376 free (buf);
58caa3dc 5377 }
e9d25b98
DJ
5378 if (saw_general_regs)
5379 return 0;
5380 else
5381 return 1;
58caa3dc
DJ
5382}
5383
1faeff08 5384#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5385
1faeff08 5386#define use_linux_regsets 0
3aee8918
PA
5387#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5388#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5389
58caa3dc 5390#endif
1faeff08
MR
5391
5392/* Return 1 if register REGNO is supported by one of the regset ptrace
5393 calls or 0 if it has to be transferred individually. */
5394
5395static int
3aee8918 5396linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5397{
5398 unsigned char mask = 1 << (regno % 8);
5399 size_t index = regno / 8;
5400
5401 return (use_linux_regsets
3aee8918
PA
5402 && (regs_info->regset_bitmap == NULL
5403 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5404}
5405
58caa3dc 5406#ifdef HAVE_LINUX_USRREGS
1faeff08 5407
5b3da067 5408static int
3aee8918 5409register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5410{
5411 int addr;
5412
3aee8918 5413 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5414 error ("Invalid register number %d.", regnum);
5415
3aee8918 5416 addr = usrregs->regmap[regnum];
1faeff08
MR
5417
5418 return addr;
5419}
5420
daca57a7
TBA
5421
5422void
5423linux_process_target::fetch_register (const usrregs_info *usrregs,
5424 regcache *regcache, int regno)
1faeff08
MR
5425{
5426 CORE_ADDR regaddr;
5427 int i, size;
5428 char *buf;
5429 int pid;
5430
3aee8918 5431 if (regno >= usrregs->num_regs)
1faeff08 5432 return;
daca57a7 5433 if (low_cannot_fetch_register (regno))
1faeff08
MR
5434 return;
5435
3aee8918 5436 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5437 if (regaddr == -1)
5438 return;
5439
3aee8918
PA
5440 size = ((register_size (regcache->tdesc, regno)
5441 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5442 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5443 buf = (char *) alloca (size);
1faeff08 5444
0bfdf32f 5445 pid = lwpid_of (current_thread);
1faeff08
MR
5446 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5447 {
5448 errno = 0;
5449 *(PTRACE_XFER_TYPE *) (buf + i) =
5450 ptrace (PTRACE_PEEKUSER, pid,
5451 /* Coerce to a uintptr_t first to avoid potential gcc warning
5452 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5453 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5454 regaddr += sizeof (PTRACE_XFER_TYPE);
5455 if (errno != 0)
9a70f35c
YQ
5456 {
5457 /* Mark register REGNO unavailable. */
5458 supply_register (regcache, regno, NULL);
5459 return;
5460 }
1faeff08
MR
5461 }
5462
5463 if (the_low_target.supply_ptrace_register)
5464 the_low_target.supply_ptrace_register (regcache, regno, buf);
5465 else
5466 supply_register (regcache, regno, buf);
5467}
5468
daca57a7
TBA
5469void
5470linux_process_target::store_register (const usrregs_info *usrregs,
5471 regcache *regcache, int regno)
1faeff08
MR
5472{
5473 CORE_ADDR regaddr;
5474 int i, size;
5475 char *buf;
5476 int pid;
5477
3aee8918 5478 if (regno >= usrregs->num_regs)
1faeff08 5479 return;
daca57a7 5480 if (low_cannot_store_register (regno))
1faeff08
MR
5481 return;
5482
3aee8918 5483 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5484 if (regaddr == -1)
5485 return;
5486
3aee8918
PA
5487 size = ((register_size (regcache->tdesc, regno)
5488 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5489 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5490 buf = (char *) alloca (size);
1faeff08
MR
5491 memset (buf, 0, size);
5492
5493 if (the_low_target.collect_ptrace_register)
5494 the_low_target.collect_ptrace_register (regcache, regno, buf);
5495 else
5496 collect_register (regcache, regno, buf);
5497
0bfdf32f 5498 pid = lwpid_of (current_thread);
1faeff08
MR
5499 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5500 {
5501 errno = 0;
5502 ptrace (PTRACE_POKEUSER, pid,
5503 /* Coerce to a uintptr_t first to avoid potential gcc warning
5504 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5505 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5506 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5507 if (errno != 0)
5508 {
5509 /* At this point, ESRCH should mean the process is
5510 already gone, in which case we simply ignore attempts
5511 to change its registers. See also the related
5512 comment in linux_resume_one_lwp. */
5513 if (errno == ESRCH)
5514 return;
5515
daca57a7
TBA
5516
5517 if (!low_cannot_store_register (regno))
6d91ce9a 5518 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5519 }
5520 regaddr += sizeof (PTRACE_XFER_TYPE);
5521 }
5522}
daca57a7 5523#endif /* HAVE_LINUX_USRREGS */
1faeff08 5524
daca57a7
TBA
5525void
5526linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5527 regcache *regcache,
5528 int regno, int all)
1faeff08 5529{
daca57a7 5530#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5531 struct usrregs_info *usr = regs_info->usrregs;
5532
1faeff08
MR
5533 if (regno == -1)
5534 {
3aee8918
PA
5535 for (regno = 0; regno < usr->num_regs; regno++)
5536 if (all || !linux_register_in_regsets (regs_info, regno))
5537 fetch_register (usr, regcache, regno);
1faeff08
MR
5538 }
5539 else
3aee8918 5540 fetch_register (usr, regcache, regno);
daca57a7 5541#endif
1faeff08
MR
5542}
5543
daca57a7
TBA
5544void
5545linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5546 regcache *regcache,
5547 int regno, int all)
1faeff08 5548{
daca57a7 5549#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5550 struct usrregs_info *usr = regs_info->usrregs;
5551
1faeff08
MR
5552 if (regno == -1)
5553 {
3aee8918
PA
5554 for (regno = 0; regno < usr->num_regs; regno++)
5555 if (all || !linux_register_in_regsets (regs_info, regno))
5556 store_register (usr, regcache, regno);
1faeff08
MR
5557 }
5558 else
3aee8918 5559 store_register (usr, regcache, regno);
58caa3dc 5560#endif
daca57a7 5561}
1faeff08 5562
a5a4d4cd
TBA
5563void
5564linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5565{
5566 int use_regsets;
5567 int all = 0;
aa8d21c9 5568 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5569
5570 if (regno == -1)
5571 {
bd70b1f2 5572 if (regs_info->usrregs != NULL)
3aee8918 5573 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5574 low_fetch_register (regcache, regno);
c14dfd32 5575
3aee8918
PA
5576 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5577 if (regs_info->usrregs != NULL)
5578 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5579 }
5580 else
5581 {
bd70b1f2 5582 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5583 return;
5584
3aee8918 5585 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5586 if (use_regsets)
3aee8918
PA
5587 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5588 regcache);
5589 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5590 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5591 }
58caa3dc
DJ
5592}
5593
a5a4d4cd
TBA
5594void
5595linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5596{
1faeff08
MR
5597 int use_regsets;
5598 int all = 0;
aa8d21c9 5599 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5600
5601 if (regno == -1)
5602 {
3aee8918
PA
5603 all = regsets_store_inferior_registers (regs_info->regsets_info,
5604 regcache);
5605 if (regs_info->usrregs != NULL)
5606 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5607 }
5608 else
5609 {
3aee8918 5610 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5611 if (use_regsets)
3aee8918
PA
5612 all = regsets_store_inferior_registers (regs_info->regsets_info,
5613 regcache);
5614 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5615 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5616 }
58caa3dc
DJ
5617}
5618
bd70b1f2
TBA
5619bool
5620linux_process_target::low_fetch_register (regcache *regcache, int regno)
5621{
5622 return false;
5623}
da6d8c04 5624
e2558df3 5625/* A wrapper for the read_memory target op. */
da6d8c04 5626
c3e735a6 5627static int
f450004a 5628linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5629{
52405d85 5630 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5631}
5632
5633/* Copy LEN bytes from inferior's memory starting at MEMADDR
5634 to debugger memory starting at MYADDR. */
5635
5636int
5637linux_process_target::read_memory (CORE_ADDR memaddr,
5638 unsigned char *myaddr, int len)
da6d8c04 5639{
0bfdf32f 5640 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5641 PTRACE_XFER_TYPE *buffer;
5642 CORE_ADDR addr;
5643 int count;
4934b29e 5644 char filename[64];
ae3e2ccf 5645 int i;
4934b29e 5646 int ret;
fd462a61 5647 int fd;
fd462a61
DJ
5648
5649 /* Try using /proc. Don't bother for one word. */
5650 if (len >= 3 * sizeof (long))
5651 {
4934b29e
MR
5652 int bytes;
5653
fd462a61
DJ
5654 /* We could keep this file open and cache it - possibly one per
5655 thread. That requires some juggling, but is even faster. */
95954743 5656 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5657 fd = open (filename, O_RDONLY | O_LARGEFILE);
5658 if (fd == -1)
5659 goto no_proc;
5660
5661 /* If pread64 is available, use it. It's faster if the kernel
5662 supports it (only one syscall), and it's 64-bit safe even on
5663 32-bit platforms (for instance, SPARC debugging a SPARC64
5664 application). */
5665#ifdef HAVE_PREAD64
4934b29e 5666 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5667#else
4934b29e
MR
5668 bytes = -1;
5669 if (lseek (fd, memaddr, SEEK_SET) != -1)
5670 bytes = read (fd, myaddr, len);
fd462a61 5671#endif
fd462a61
DJ
5672
5673 close (fd);
4934b29e
MR
5674 if (bytes == len)
5675 return 0;
5676
5677 /* Some data was read, we'll try to get the rest with ptrace. */
5678 if (bytes > 0)
5679 {
5680 memaddr += bytes;
5681 myaddr += bytes;
5682 len -= bytes;
5683 }
fd462a61 5684 }
da6d8c04 5685
fd462a61 5686 no_proc:
4934b29e
MR
5687 /* Round starting address down to longword boundary. */
5688 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5689 /* Round ending address up; get number of longwords that makes. */
5690 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5691 / sizeof (PTRACE_XFER_TYPE));
5692 /* Allocate buffer of that many longwords. */
8d749320 5693 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5694
da6d8c04 5695 /* Read all the longwords */
4934b29e 5696 errno = 0;
da6d8c04
DJ
5697 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5698 {
14ce3065
DE
5699 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5700 about coercing an 8 byte integer to a 4 byte pointer. */
5701 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5702 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5703 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5704 if (errno)
4934b29e 5705 break;
da6d8c04 5706 }
4934b29e 5707 ret = errno;
da6d8c04
DJ
5708
5709 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5710 if (i > 0)
5711 {
5712 i *= sizeof (PTRACE_XFER_TYPE);
5713 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5714 memcpy (myaddr,
5715 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5716 i < len ? i : len);
5717 }
c3e735a6 5718
4934b29e 5719 return ret;
da6d8c04
DJ
5720}
5721
93ae6fdc
PA
5722/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5723 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5724 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5725
e2558df3
TBA
5726int
5727linux_process_target::write_memory (CORE_ADDR memaddr,
5728 const unsigned char *myaddr, int len)
da6d8c04 5729{
ae3e2ccf 5730 int i;
da6d8c04 5731 /* Round starting address down to longword boundary. */
ae3e2ccf 5732 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5733 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5734 int count
493e2a69
MS
5735 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5736 / sizeof (PTRACE_XFER_TYPE);
5737
da6d8c04 5738 /* Allocate buffer of that many longwords. */
ae3e2ccf 5739 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5740
0bfdf32f 5741 int pid = lwpid_of (current_thread);
da6d8c04 5742
f0ae6fc3
PA
5743 if (len == 0)
5744 {
5745 /* Zero length write always succeeds. */
5746 return 0;
5747 }
5748
0d62e5e8
DJ
5749 if (debug_threads)
5750 {
58d6951d 5751 /* Dump up to four bytes. */
bf47e248
PA
5752 char str[4 * 2 + 1];
5753 char *p = str;
5754 int dump = len < 4 ? len : 4;
5755
5756 for (i = 0; i < dump; i++)
5757 {
5758 sprintf (p, "%02x", myaddr[i]);
5759 p += 2;
5760 }
5761 *p = '\0';
5762
5763 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5764 str, (long) memaddr, pid);
0d62e5e8
DJ
5765 }
5766
da6d8c04
DJ
5767 /* Fill start and end extra bytes of buffer with existing memory data. */
5768
93ae6fdc 5769 errno = 0;
14ce3065
DE
5770 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5771 about coercing an 8 byte integer to a 4 byte pointer. */
5772 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5773 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5774 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5775 if (errno)
5776 return errno;
da6d8c04
DJ
5777
5778 if (count > 1)
5779 {
93ae6fdc 5780 errno = 0;
da6d8c04 5781 buffer[count - 1]
95954743 5782 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5783 /* Coerce to a uintptr_t first to avoid potential gcc warning
5784 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5785 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5786 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5787 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5788 if (errno)
5789 return errno;
da6d8c04
DJ
5790 }
5791
93ae6fdc 5792 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5793
493e2a69
MS
5794 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5795 myaddr, len);
da6d8c04
DJ
5796
5797 /* Write the entire buffer. */
5798
5799 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5800 {
5801 errno = 0;
14ce3065
DE
5802 ptrace (PTRACE_POKETEXT, pid,
5803 /* Coerce to a uintptr_t first to avoid potential gcc warning
5804 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5805 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5806 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5807 if (errno)
5808 return errno;
5809 }
5810
5811 return 0;
5812}
2f2893d9 5813
2a31c7aa
TBA
5814void
5815linux_process_target::look_up_symbols ()
2f2893d9 5816{
0d62e5e8 5817#ifdef USE_THREAD_DB
95954743
PA
5818 struct process_info *proc = current_process ();
5819
fe978cb0 5820 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5821 return;
5822
9b4c5f87 5823 thread_db_init ();
0d62e5e8
DJ
5824#endif
5825}
5826
eb497a2a
TBA
5827void
5828linux_process_target::request_interrupt ()
e5379b03 5829{
78708b7c
PA
5830 /* Send a SIGINT to the process group. This acts just like the user
5831 typed a ^C on the controlling terminal. */
eb497a2a 5832 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5833}
5834
eac215cc
TBA
5835bool
5836linux_process_target::supports_read_auxv ()
5837{
5838 return true;
5839}
5840
aa691b87
RM
5841/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5842 to debugger memory starting at MYADDR. */
5843
eac215cc
TBA
5844int
5845linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5846 unsigned int len)
aa691b87
RM
5847{
5848 char filename[PATH_MAX];
5849 int fd, n;
0bfdf32f 5850 int pid = lwpid_of (current_thread);
aa691b87 5851
6cebaf6e 5852 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5853
5854 fd = open (filename, O_RDONLY);
5855 if (fd < 0)
5856 return -1;
5857
5858 if (offset != (CORE_ADDR) 0
5859 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5860 n = -1;
5861 else
5862 n = read (fd, myaddr, len);
5863
5864 close (fd);
5865
5866 return n;
5867}
5868
d993e290
PA
5869/* These breakpoint and watchpoint related wrapper functions simply
5870 pass on the function call if the target has registered a
5871 corresponding function. */
e013ee27 5872
a2b2297a
TBA
5873bool
5874linux_process_target::supports_z_point_type (char z_type)
802e8e6d
PA
5875{
5876 return (the_low_target.supports_z_point_type != NULL
5877 && the_low_target.supports_z_point_type (z_type));
5878}
5879
7e0bde70
TBA
5880int
5881linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5882 int size, raw_breakpoint *bp)
e013ee27 5883{
c8f4bfdd
YQ
5884 if (type == raw_bkpt_type_sw)
5885 return insert_memory_breakpoint (bp);
5886 else if (the_low_target.insert_point != NULL)
802e8e6d 5887 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5888 else
5889 /* Unsupported (see target.h). */
5890 return 1;
5891}
5892
7e0bde70
TBA
5893int
5894linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5895 int size, raw_breakpoint *bp)
e013ee27 5896{
c8f4bfdd
YQ
5897 if (type == raw_bkpt_type_sw)
5898 return remove_memory_breakpoint (bp);
5899 else if (the_low_target.remove_point != NULL)
802e8e6d 5900 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5901 else
5902 /* Unsupported (see target.h). */
5903 return 1;
5904}
5905
84320c4e 5906/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5907 method. */
5908
84320c4e
TBA
5909bool
5910linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5911{
5912 struct lwp_info *lwp = get_thread_lwp (current_thread);
5913
5914 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5915}
5916
84320c4e 5917/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5918 method. */
5919
84320c4e
TBA
5920bool
5921linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5922{
5923 return USE_SIGTRAP_SIGINFO;
5924}
5925
93fe88b2 5926/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5927 method. */
5928
93fe88b2
TBA
5929bool
5930linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5931{
5932 struct lwp_info *lwp = get_thread_lwp (current_thread);
5933
5934 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5935}
5936
93fe88b2 5937/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5938 method. */
5939
93fe88b2
TBA
5940bool
5941linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5942{
5943 return USE_SIGTRAP_SIGINFO;
5944}
5945
70b90b91 5946/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5947
22aa6223
TBA
5948bool
5949linux_process_target::supports_hardware_single_step ()
45614f15 5950{
45614f15
YQ
5951 return can_hardware_single_step ();
5952}
5953
5303a34f
TBA
5954bool
5955linux_process_target::supports_software_single_step ()
7d00775e
AT
5956{
5957 return can_software_single_step ();
5958}
5959
6eeb5c55
TBA
5960bool
5961linux_process_target::stopped_by_watchpoint ()
e013ee27 5962{
0bfdf32f 5963 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5964
15c66dd6 5965 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5966}
5967
6eeb5c55
TBA
5968CORE_ADDR
5969linux_process_target::stopped_data_address ()
e013ee27 5970{
0bfdf32f 5971 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5972
5973 return lwp->stopped_data_address;
e013ee27
OF
5974}
5975
db0dfaa0
LM
5976/* This is only used for targets that define PT_TEXT_ADDR,
5977 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5978 the target has different ways of acquiring this information, like
5979 loadmaps. */
52fb6437 5980
5203ae1e
TBA
5981bool
5982linux_process_target::supports_read_offsets ()
5983{
5984#ifdef SUPPORTS_READ_OFFSETS
5985 return true;
5986#else
5987 return false;
5988#endif
5989}
5990
52fb6437
NS
5991/* Under uClinux, programs are loaded at non-zero offsets, which we need
5992 to tell gdb about. */
5993
5203ae1e
TBA
5994int
5995linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5996{
5203ae1e 5997#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5998 unsigned long text, text_end, data;
62828379 5999 int pid = lwpid_of (current_thread);
52fb6437
NS
6000
6001 errno = 0;
6002
b8e1b30e
LM
6003 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6004 (PTRACE_TYPE_ARG4) 0);
6005 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6006 (PTRACE_TYPE_ARG4) 0);
6007 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6008 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
6009
6010 if (errno == 0)
6011 {
6012 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
6013 used by gdb) are relative to the beginning of the program,
6014 with the data segment immediately following the text segment.
6015 However, the actual runtime layout in memory may put the data
6016 somewhere else, so when we send gdb a data base-address, we
6017 use the real data base address and subtract the compile-time
6018 data base-address from it (which is just the length of the
6019 text segment). BSS immediately follows data in both
6020 cases. */
52fb6437
NS
6021 *text_p = text;
6022 *data_p = data - (text_end - text);
1b3f6016 6023
52fb6437
NS
6024 return 1;
6025 }
5203ae1e
TBA
6026 return 0;
6027#else
6028 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 6029#endif
5203ae1e 6030}
52fb6437 6031
6e3fd7e9
TBA
6032bool
6033linux_process_target::supports_get_tls_address ()
6034{
6035#ifdef USE_THREAD_DB
6036 return true;
6037#else
6038 return false;
6039#endif
6040}
6041
6042int
6043linux_process_target::get_tls_address (thread_info *thread,
6044 CORE_ADDR offset,
6045 CORE_ADDR load_module,
6046 CORE_ADDR *address)
6047{
6048#ifdef USE_THREAD_DB
6049 return thread_db_get_tls_address (thread, offset, load_module, address);
6050#else
6051 return -1;
6052#endif
6053}
6054
2d0795ee
TBA
6055bool
6056linux_process_target::supports_qxfer_osdata ()
6057{
6058 return true;
6059}
6060
6061int
6062linux_process_target::qxfer_osdata (const char *annex,
6063 unsigned char *readbuf,
6064 unsigned const char *writebuf,
6065 CORE_ADDR offset, int len)
07e059b5 6066{
d26e3629 6067 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6068}
6069
d0722149
DE
6070/* Convert a native/host siginfo object, into/from the siginfo in the
6071 layout of the inferiors' architecture. */
6072
6073static void
8adce034 6074siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
6075{
6076 int done = 0;
6077
6078 if (the_low_target.siginfo_fixup != NULL)
6079 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6080
6081 /* If there was no callback, or the callback didn't do anything,
6082 then just do a straight memcpy. */
6083 if (!done)
6084 {
6085 if (direction == 1)
a5362b9a 6086 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6087 else
a5362b9a 6088 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6089 }
6090}
6091
d7abedf7
TBA
6092bool
6093linux_process_target::supports_qxfer_siginfo ()
6094{
6095 return true;
6096}
6097
6098int
6099linux_process_target::qxfer_siginfo (const char *annex,
6100 unsigned char *readbuf,
6101 unsigned const char *writebuf,
6102 CORE_ADDR offset, int len)
4aa995e1 6103{
d0722149 6104 int pid;
a5362b9a 6105 siginfo_t siginfo;
8adce034 6106 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6107
0bfdf32f 6108 if (current_thread == NULL)
4aa995e1
PA
6109 return -1;
6110
0bfdf32f 6111 pid = lwpid_of (current_thread);
4aa995e1
PA
6112
6113 if (debug_threads)
87ce2a04
DE
6114 debug_printf ("%s siginfo for lwp %d.\n",
6115 readbuf != NULL ? "Reading" : "Writing",
6116 pid);
4aa995e1 6117
0adea5f7 6118 if (offset >= sizeof (siginfo))
4aa995e1
PA
6119 return -1;
6120
b8e1b30e 6121 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6122 return -1;
6123
d0722149
DE
6124 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6125 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6126 inferior with a 64-bit GDBSERVER should look the same as debugging it
6127 with a 32-bit GDBSERVER, we need to convert it. */
6128 siginfo_fixup (&siginfo, inf_siginfo, 0);
6129
4aa995e1
PA
6130 if (offset + len > sizeof (siginfo))
6131 len = sizeof (siginfo) - offset;
6132
6133 if (readbuf != NULL)
d0722149 6134 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6135 else
6136 {
d0722149
DE
6137 memcpy (inf_siginfo + offset, writebuf, len);
6138
6139 /* Convert back to ptrace layout before flushing it out. */
6140 siginfo_fixup (&siginfo, inf_siginfo, 1);
6141
b8e1b30e 6142 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6143 return -1;
6144 }
6145
6146 return len;
6147}
6148
bd99dc85
PA
6149/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6150 so we notice when children change state; as the handler for the
6151 sigsuspend in my_waitpid. */
6152
6153static void
6154sigchld_handler (int signo)
6155{
6156 int old_errno = errno;
6157
6158 if (debug_threads)
e581f2b4
PA
6159 {
6160 do
6161 {
a7e559cc
AH
6162 /* Use the async signal safe debug function. */
6163 if (debug_write ("sigchld_handler\n",
6164 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6165 break; /* just ignore */
6166 } while (0);
6167 }
bd99dc85
PA
6168
6169 if (target_is_async_p ())
6170 async_file_mark (); /* trigger a linux_wait */
6171
6172 errno = old_errno;
6173}
6174
0dc587d4
TBA
6175bool
6176linux_process_target::supports_non_stop ()
bd99dc85 6177{
0dc587d4 6178 return true;
bd99dc85
PA
6179}
6180
0dc587d4
TBA
6181bool
6182linux_process_target::async (bool enable)
bd99dc85 6183{
0dc587d4 6184 bool previous = target_is_async_p ();
bd99dc85 6185
8336d594 6186 if (debug_threads)
87ce2a04
DE
6187 debug_printf ("linux_async (%d), previous=%d\n",
6188 enable, previous);
8336d594 6189
bd99dc85
PA
6190 if (previous != enable)
6191 {
6192 sigset_t mask;
6193 sigemptyset (&mask);
6194 sigaddset (&mask, SIGCHLD);
6195
21987b9c 6196 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6197
6198 if (enable)
6199 {
6200 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6201 {
6202 linux_event_pipe[0] = -1;
6203 linux_event_pipe[1] = -1;
21987b9c 6204 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6205
6206 warning ("creating event pipe failed.");
6207 return previous;
6208 }
bd99dc85
PA
6209
6210 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6211 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6212
6213 /* Register the event loop handler. */
6214 add_file_handler (linux_event_pipe[0],
6215 handle_target_event, NULL);
6216
6217 /* Always trigger a linux_wait. */
6218 async_file_mark ();
6219 }
6220 else
6221 {
6222 delete_file_handler (linux_event_pipe[0]);
6223
6224 close (linux_event_pipe[0]);
6225 close (linux_event_pipe[1]);
6226 linux_event_pipe[0] = -1;
6227 linux_event_pipe[1] = -1;
6228 }
6229
21987b9c 6230 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6231 }
6232
6233 return previous;
6234}
6235
0dc587d4
TBA
6236int
6237linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6238{
6239 /* Register or unregister from event-loop accordingly. */
0dc587d4 6240 target_async (nonstop);
aa96c426 6241
0dc587d4 6242 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6243 return -1;
6244
bd99dc85
PA
6245 return 0;
6246}
6247
652aef77
TBA
6248bool
6249linux_process_target::supports_multi_process ()
cf8fd78b 6250{
652aef77 6251 return true;
cf8fd78b
PA
6252}
6253
89245bc0
DB
6254/* Check if fork events are supported. */
6255
9690a72a
TBA
6256bool
6257linux_process_target::supports_fork_events ()
89245bc0
DB
6258{
6259 return linux_supports_tracefork ();
6260}
6261
6262/* Check if vfork events are supported. */
6263
9690a72a
TBA
6264bool
6265linux_process_target::supports_vfork_events ()
89245bc0
DB
6266{
6267 return linux_supports_tracefork ();
6268}
6269
94585166
DB
6270/* Check if exec events are supported. */
6271
9690a72a
TBA
6272bool
6273linux_process_target::supports_exec_events ()
94585166
DB
6274{
6275 return linux_supports_traceexec ();
6276}
6277
de0d863e
DB
6278/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6279 ptrace flags for all inferiors. This is in case the new GDB connection
6280 doesn't support the same set of events that the previous one did. */
6281
fb00dfce
TBA
6282void
6283linux_process_target::handle_new_gdb_connection ()
de0d863e 6284{
de0d863e 6285 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6286 for_each_thread ([] (thread_info *thread)
6287 {
6288 struct lwp_info *lwp = get_thread_lwp (thread);
6289
6290 if (!lwp->stopped)
6291 {
6292 /* Stop the lwp so we can modify its ptrace options. */
6293 lwp->must_set_ptrace_flags = 1;
6294 linux_stop_lwp (lwp);
6295 }
6296 else
6297 {
6298 /* Already stopped; go ahead and set the ptrace options. */
6299 struct process_info *proc = find_process_pid (pid_of (thread));
6300 int options = linux_low_ptrace_options (proc->attached);
6301
6302 linux_enable_event_reporting (lwpid_of (thread), options);
6303 lwp->must_set_ptrace_flags = 0;
6304 }
6305 });
de0d863e
DB
6306}
6307
55cf3021
TBA
6308int
6309linux_process_target::handle_monitor_command (char *mon)
6310{
6311#ifdef USE_THREAD_DB
6312 return thread_db_handle_monitor_command (mon);
6313#else
6314 return 0;
6315#endif
6316}
6317
95a45fc1
TBA
6318int
6319linux_process_target::core_of_thread (ptid_t ptid)
6320{
6321 return linux_common_core_of_thread (ptid);
6322}
6323
c756403b
TBA
6324bool
6325linux_process_target::supports_disable_randomization ()
03583c20
UW
6326{
6327#ifdef HAVE_PERSONALITY
c756403b 6328 return true;
03583c20 6329#else
c756403b 6330 return false;
03583c20
UW
6331#endif
6332}
efcbbd14 6333
c0245cb9
TBA
6334bool
6335linux_process_target::supports_agent ()
d1feda86 6336{
c0245cb9 6337 return true;
d1feda86
YQ
6338}
6339
2526e0cd
TBA
6340bool
6341linux_process_target::supports_range_stepping ()
c2d6af84 6342{
c3805894 6343 if (can_software_single_step ())
2526e0cd 6344 return true;
c2d6af84 6345 if (*the_low_target.supports_range_stepping == NULL)
2526e0cd 6346 return false;
c2d6af84
PA
6347
6348 return (*the_low_target.supports_range_stepping) ();
6349}
6350
8247b823
TBA
6351bool
6352linux_process_target::supports_pid_to_exec_file ()
6353{
6354 return true;
6355}
6356
6357char *
6358linux_process_target::pid_to_exec_file (int pid)
6359{
6360 return linux_proc_pid_to_exec_file (pid);
6361}
6362
c9b7b804
TBA
6363bool
6364linux_process_target::supports_multifs ()
6365{
6366 return true;
6367}
6368
6369int
6370linux_process_target::multifs_open (int pid, const char *filename,
6371 int flags, mode_t mode)
6372{
6373 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6374}
6375
6376int
6377linux_process_target::multifs_unlink (int pid, const char *filename)
6378{
6379 return linux_mntns_unlink (pid, filename);
6380}
6381
6382ssize_t
6383linux_process_target::multifs_readlink (int pid, const char *filename,
6384 char *buf, size_t bufsiz)
6385{
6386 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6387}
6388
723b724b 6389#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6390struct target_loadseg
6391{
6392 /* Core address to which the segment is mapped. */
6393 Elf32_Addr addr;
6394 /* VMA recorded in the program header. */
6395 Elf32_Addr p_vaddr;
6396 /* Size of this segment in memory. */
6397 Elf32_Word p_memsz;
6398};
6399
723b724b 6400# if defined PT_GETDSBT
78d85199
YQ
6401struct target_loadmap
6402{
6403 /* Protocol version number, must be zero. */
6404 Elf32_Word version;
6405 /* Pointer to the DSBT table, its size, and the DSBT index. */
6406 unsigned *dsbt_table;
6407 unsigned dsbt_size, dsbt_index;
6408 /* Number of segments in this map. */
6409 Elf32_Word nsegs;
6410 /* The actual memory map. */
6411 struct target_loadseg segs[/*nsegs*/];
6412};
723b724b
MF
6413# define LINUX_LOADMAP PT_GETDSBT
6414# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6415# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6416# else
6417struct target_loadmap
6418{
6419 /* Protocol version number, must be zero. */
6420 Elf32_Half version;
6421 /* Number of segments in this map. */
6422 Elf32_Half nsegs;
6423 /* The actual memory map. */
6424 struct target_loadseg segs[/*nsegs*/];
6425};
6426# define LINUX_LOADMAP PTRACE_GETFDPIC
6427# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6428# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6429# endif
78d85199 6430
9da41fda
TBA
6431bool
6432linux_process_target::supports_read_loadmap ()
6433{
6434 return true;
6435}
6436
6437int
6438linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6439 unsigned char *myaddr, unsigned int len)
78d85199 6440{
0bfdf32f 6441 int pid = lwpid_of (current_thread);
78d85199
YQ
6442 int addr = -1;
6443 struct target_loadmap *data = NULL;
6444 unsigned int actual_length, copy_length;
6445
6446 if (strcmp (annex, "exec") == 0)
723b724b 6447 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6448 else if (strcmp (annex, "interp") == 0)
723b724b 6449 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6450 else
6451 return -1;
6452
723b724b 6453 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6454 return -1;
6455
6456 if (data == NULL)
6457 return -1;
6458
6459 actual_length = sizeof (struct target_loadmap)
6460 + sizeof (struct target_loadseg) * data->nsegs;
6461
6462 if (offset < 0 || offset > actual_length)
6463 return -1;
6464
6465 copy_length = actual_length - offset < len ? actual_length - offset : len;
6466 memcpy (myaddr, (char *) data + offset, copy_length);
6467 return copy_length;
6468}
723b724b 6469#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6470
0df28b1b
TBA
6471void
6472linux_process_target::process_qsupported (char **features, int count)
1570b33e
L
6473{
6474 if (the_low_target.process_qsupported != NULL)
06e03fff 6475 the_low_target.process_qsupported (features, count);
1570b33e
L
6476}
6477
bc8d3ae4
TBA
6478bool
6479linux_process_target::supports_catch_syscall ()
82075af2
JS
6480{
6481 return (the_low_target.get_syscall_trapinfo != NULL
6482 && linux_supports_tracesysgood ());
6483}
6484
d633e831
TBA
6485int
6486linux_process_target::get_ipa_tdesc_idx ()
ae91f625
MK
6487{
6488 if (the_low_target.get_ipa_tdesc_idx == NULL)
6489 return 0;
6490
6491 return (*the_low_target.get_ipa_tdesc_idx) ();
6492}
6493
290732bf
TBA
6494bool
6495linux_process_target::supports_tracepoints ()
219f2f23
PA
6496{
6497 if (*the_low_target.supports_tracepoints == NULL)
290732bf 6498 return false;
219f2f23
PA
6499
6500 return (*the_low_target.supports_tracepoints) ();
6501}
6502
770d8f6a
TBA
6503CORE_ADDR
6504linux_process_target::read_pc (regcache *regcache)
219f2f23
PA
6505{
6506 if (the_low_target.get_pc == NULL)
6507 return 0;
6508
6509 return (*the_low_target.get_pc) (regcache);
6510}
6511
770d8f6a
TBA
6512void
6513linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23
PA
6514{
6515 gdb_assert (the_low_target.set_pc != NULL);
6516
6517 (*the_low_target.set_pc) (regcache, pc);
6518}
6519
68119632
TBA
6520bool
6521linux_process_target::supports_thread_stopped ()
6522{
6523 return true;
6524}
6525
6526bool
6527linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6528{
6529 return get_thread_lwp (thread)->stopped;
6530}
6531
6532/* This exposes stop-all-threads functionality to other modules. */
6533
29e8dc09
TBA
6534void
6535linux_process_target::pause_all (bool freeze)
8336d594 6536{
7984d532
PA
6537 stop_all_lwps (freeze, NULL);
6538}
6539
6540/* This exposes unstop-all-threads functionality to other gdbserver
6541 modules. */
6542
29e8dc09
TBA
6543void
6544linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6545{
6546 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6547}
6548
79b44087
TBA
6549int
6550linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6551{
6552 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6553 running LWP. */
6554 if (non_stop)
29e8dc09 6555 target_pause_all (true);
90d74c30
PA
6556 return 0;
6557}
6558
79b44087
TBA
6559void
6560linux_process_target::done_accessing_memory ()
90d74c30
PA
6561{
6562 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6563 running LWP. */
6564 if (non_stop)
29e8dc09 6565 target_unpause_all (true);
90d74c30
PA
6566}
6567
c23c9391
TBA
6568bool
6569linux_process_target::supports_fast_tracepoints ()
6570{
6571 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6572}
6573
6574int
6575linux_process_target::install_fast_tracepoint_jump_pad
6576 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6577 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6578 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6579 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6580 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6581 char *err)
fa593d66
PA
6582{
6583 return (*the_low_target.install_fast_tracepoint_jump_pad)
6584 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6585 jump_entry, trampoline, trampoline_size,
6586 jjump_pad_insn, jjump_pad_insn_size,
6587 adjusted_insn_addr, adjusted_insn_addr_end,
6588 err);
fa593d66
PA
6589}
6590
345dafad
TBA
6591emit_ops *
6592linux_process_target::emit_ops ()
6a271cae
PA
6593{
6594 if (the_low_target.emit_ops != NULL)
6595 return (*the_low_target.emit_ops) ();
6596 else
6597 return NULL;
6598}
6599
c23c9391
TBA
6600int
6601linux_process_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
6602{
6603 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6604}
6605
2268b414
JK
6606/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6607
6608static int
6609get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6610 CORE_ADDR *phdr_memaddr, int *num_phdr)
6611{
6612 char filename[PATH_MAX];
6613 int fd;
6614 const int auxv_size = is_elf64
6615 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6616 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6617
6618 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6619
6620 fd = open (filename, O_RDONLY);
6621 if (fd < 0)
6622 return 1;
6623
6624 *phdr_memaddr = 0;
6625 *num_phdr = 0;
6626 while (read (fd, buf, auxv_size) == auxv_size
6627 && (*phdr_memaddr == 0 || *num_phdr == 0))
6628 {
6629 if (is_elf64)
6630 {
6631 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6632
6633 switch (aux->a_type)
6634 {
6635 case AT_PHDR:
6636 *phdr_memaddr = aux->a_un.a_val;
6637 break;
6638 case AT_PHNUM:
6639 *num_phdr = aux->a_un.a_val;
6640 break;
6641 }
6642 }
6643 else
6644 {
6645 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6646
6647 switch (aux->a_type)
6648 {
6649 case AT_PHDR:
6650 *phdr_memaddr = aux->a_un.a_val;
6651 break;
6652 case AT_PHNUM:
6653 *num_phdr = aux->a_un.a_val;
6654 break;
6655 }
6656 }
6657 }
6658
6659 close (fd);
6660
6661 if (*phdr_memaddr == 0 || *num_phdr == 0)
6662 {
6663 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6664 "phdr_memaddr = %ld, phdr_num = %d",
6665 (long) *phdr_memaddr, *num_phdr);
6666 return 2;
6667 }
6668
6669 return 0;
6670}
6671
6672/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6673
6674static CORE_ADDR
6675get_dynamic (const int pid, const int is_elf64)
6676{
6677 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6678 int num_phdr, i;
2268b414 6679 unsigned char *phdr_buf;
db1ff28b 6680 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6681
6682 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6683 return 0;
6684
6685 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6686 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6687
6688 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6689 return 0;
6690
6691 /* Compute relocation: it is expected to be 0 for "regular" executables,
6692 non-zero for PIE ones. */
6693 relocation = -1;
db1ff28b
JK
6694 for (i = 0; relocation == -1 && i < num_phdr; i++)
6695 if (is_elf64)
6696 {
6697 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6698
6699 if (p->p_type == PT_PHDR)
6700 relocation = phdr_memaddr - p->p_vaddr;
6701 }
6702 else
6703 {
6704 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6705
6706 if (p->p_type == PT_PHDR)
6707 relocation = phdr_memaddr - p->p_vaddr;
6708 }
6709
2268b414
JK
6710 if (relocation == -1)
6711 {
e237a7e2
JK
6712 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6713 any real world executables, including PIE executables, have always
6714 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6715 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6716 or present DT_DEBUG anyway (fpc binaries are statically linked).
6717
6718 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6719
6720 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6721
2268b414
JK
6722 return 0;
6723 }
6724
db1ff28b
JK
6725 for (i = 0; i < num_phdr; i++)
6726 {
6727 if (is_elf64)
6728 {
6729 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6730
6731 if (p->p_type == PT_DYNAMIC)
6732 return p->p_vaddr + relocation;
6733 }
6734 else
6735 {
6736 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6737
db1ff28b
JK
6738 if (p->p_type == PT_DYNAMIC)
6739 return p->p_vaddr + relocation;
6740 }
6741 }
2268b414
JK
6742
6743 return 0;
6744}
6745
6746/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6747 can be 0 if the inferior does not yet have the library list initialized.
6748 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6749 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6750
6751static CORE_ADDR
6752get_r_debug (const int pid, const int is_elf64)
6753{
6754 CORE_ADDR dynamic_memaddr;
6755 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6756 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6757 CORE_ADDR map = -1;
2268b414
JK
6758
6759 dynamic_memaddr = get_dynamic (pid, is_elf64);
6760 if (dynamic_memaddr == 0)
367ba2c2 6761 return map;
2268b414
JK
6762
6763 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6764 {
6765 if (is_elf64)
6766 {
6767 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6768#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6769 union
6770 {
6771 Elf64_Xword map;
6772 unsigned char buf[sizeof (Elf64_Xword)];
6773 }
6774 rld_map;
a738da3a
MF
6775#endif
6776#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6777 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6778 {
6779 if (linux_read_memory (dyn->d_un.d_val,
6780 rld_map.buf, sizeof (rld_map.buf)) == 0)
6781 return rld_map.map;
6782 else
6783 break;
6784 }
75f62ce7 6785#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6786#ifdef DT_MIPS_RLD_MAP_REL
6787 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6788 {
6789 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6790 rld_map.buf, sizeof (rld_map.buf)) == 0)
6791 return rld_map.map;
6792 else
6793 break;
6794 }
6795#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6796
367ba2c2
MR
6797 if (dyn->d_tag == DT_DEBUG && map == -1)
6798 map = dyn->d_un.d_val;
2268b414
JK
6799
6800 if (dyn->d_tag == DT_NULL)
6801 break;
6802 }
6803 else
6804 {
6805 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6806#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6807 union
6808 {
6809 Elf32_Word map;
6810 unsigned char buf[sizeof (Elf32_Word)];
6811 }
6812 rld_map;
a738da3a
MF
6813#endif
6814#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6815 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6816 {
6817 if (linux_read_memory (dyn->d_un.d_val,
6818 rld_map.buf, sizeof (rld_map.buf)) == 0)
6819 return rld_map.map;
6820 else
6821 break;
6822 }
75f62ce7 6823#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6824#ifdef DT_MIPS_RLD_MAP_REL
6825 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6826 {
6827 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6828 rld_map.buf, sizeof (rld_map.buf)) == 0)
6829 return rld_map.map;
6830 else
6831 break;
6832 }
6833#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6834
367ba2c2
MR
6835 if (dyn->d_tag == DT_DEBUG && map == -1)
6836 map = dyn->d_un.d_val;
2268b414
JK
6837
6838 if (dyn->d_tag == DT_NULL)
6839 break;
6840 }
6841
6842 dynamic_memaddr += dyn_size;
6843 }
6844
367ba2c2 6845 return map;
2268b414
JK
6846}
6847
6848/* Read one pointer from MEMADDR in the inferior. */
6849
6850static int
6851read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6852{
485f1ee4
PA
6853 int ret;
6854
6855 /* Go through a union so this works on either big or little endian
6856 hosts, when the inferior's pointer size is smaller than the size
6857 of CORE_ADDR. It is assumed the inferior's endianness is the
6858 same of the superior's. */
6859 union
6860 {
6861 CORE_ADDR core_addr;
6862 unsigned int ui;
6863 unsigned char uc;
6864 } addr;
6865
6866 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6867 if (ret == 0)
6868 {
6869 if (ptr_size == sizeof (CORE_ADDR))
6870 *ptr = addr.core_addr;
6871 else if (ptr_size == sizeof (unsigned int))
6872 *ptr = addr.ui;
6873 else
6874 gdb_assert_not_reached ("unhandled pointer size");
6875 }
6876 return ret;
2268b414
JK
6877}
6878
974387bb
TBA
6879bool
6880linux_process_target::supports_qxfer_libraries_svr4 ()
6881{
6882 return true;
6883}
6884
2268b414
JK
6885struct link_map_offsets
6886 {
6887 /* Offset and size of r_debug.r_version. */
6888 int r_version_offset;
6889
6890 /* Offset and size of r_debug.r_map. */
6891 int r_map_offset;
6892
6893 /* Offset to l_addr field in struct link_map. */
6894 int l_addr_offset;
6895
6896 /* Offset to l_name field in struct link_map. */
6897 int l_name_offset;
6898
6899 /* Offset to l_ld field in struct link_map. */
6900 int l_ld_offset;
6901
6902 /* Offset to l_next field in struct link_map. */
6903 int l_next_offset;
6904
6905 /* Offset to l_prev field in struct link_map. */
6906 int l_prev_offset;
6907 };
6908
fb723180 6909/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6910
974387bb
TBA
6911int
6912linux_process_target::qxfer_libraries_svr4 (const char *annex,
6913 unsigned char *readbuf,
6914 unsigned const char *writebuf,
6915 CORE_ADDR offset, int len)
2268b414 6916{
fe978cb0 6917 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6918 char filename[PATH_MAX];
6919 int pid, is_elf64;
6920
6921 static const struct link_map_offsets lmo_32bit_offsets =
6922 {
6923 0, /* r_version offset. */
6924 4, /* r_debug.r_map offset. */
6925 0, /* l_addr offset in link_map. */
6926 4, /* l_name offset in link_map. */
6927 8, /* l_ld offset in link_map. */
6928 12, /* l_next offset in link_map. */
6929 16 /* l_prev offset in link_map. */
6930 };
6931
6932 static const struct link_map_offsets lmo_64bit_offsets =
6933 {
6934 0, /* r_version offset. */
6935 8, /* r_debug.r_map offset. */
6936 0, /* l_addr offset in link_map. */
6937 8, /* l_name offset in link_map. */
6938 16, /* l_ld offset in link_map. */
6939 24, /* l_next offset in link_map. */
6940 32 /* l_prev offset in link_map. */
6941 };
6942 const struct link_map_offsets *lmo;
214d508e 6943 unsigned int machine;
b1fbec62
GB
6944 int ptr_size;
6945 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6946 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6947 int header_done = 0;
2268b414
JK
6948
6949 if (writebuf != NULL)
6950 return -2;
6951 if (readbuf == NULL)
6952 return -1;
6953
0bfdf32f 6954 pid = lwpid_of (current_thread);
2268b414 6955 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6956 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6957 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6958 ptr_size = is_elf64 ? 8 : 4;
2268b414 6959
b1fbec62
GB
6960 while (annex[0] != '\0')
6961 {
6962 const char *sep;
6963 CORE_ADDR *addrp;
da4ae14a 6964 int name_len;
2268b414 6965
b1fbec62
GB
6966 sep = strchr (annex, '=');
6967 if (sep == NULL)
6968 break;
0c5bf5a9 6969
da4ae14a
TT
6970 name_len = sep - annex;
6971 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6972 addrp = &lm_addr;
da4ae14a 6973 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6974 addrp = &lm_prev;
6975 else
6976 {
6977 annex = strchr (sep, ';');
6978 if (annex == NULL)
6979 break;
6980 annex++;
6981 continue;
6982 }
6983
6984 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6985 }
b1fbec62
GB
6986
6987 if (lm_addr == 0)
2268b414 6988 {
b1fbec62
GB
6989 int r_version = 0;
6990
6991 if (priv->r_debug == 0)
6992 priv->r_debug = get_r_debug (pid, is_elf64);
6993
6994 /* We failed to find DT_DEBUG. Such situation will not change
6995 for this inferior - do not retry it. Report it to GDB as
6996 E01, see for the reasons at the GDB solib-svr4.c side. */
6997 if (priv->r_debug == (CORE_ADDR) -1)
6998 return -1;
6999
7000 if (priv->r_debug != 0)
2268b414 7001 {
b1fbec62
GB
7002 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7003 (unsigned char *) &r_version,
7004 sizeof (r_version)) != 0
7005 || r_version != 1)
7006 {
7007 warning ("unexpected r_debug version %d", r_version);
7008 }
7009 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7010 &lm_addr, ptr_size) != 0)
7011 {
7012 warning ("unable to read r_map from 0x%lx",
7013 (long) priv->r_debug + lmo->r_map_offset);
7014 }
2268b414 7015 }
b1fbec62 7016 }
2268b414 7017
f6e8a41e 7018 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
7019
7020 while (lm_addr
7021 && read_one_ptr (lm_addr + lmo->l_name_offset,
7022 &l_name, ptr_size) == 0
7023 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7024 &l_addr, ptr_size) == 0
7025 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7026 &l_ld, ptr_size) == 0
7027 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7028 &l_prev, ptr_size) == 0
7029 && read_one_ptr (lm_addr + lmo->l_next_offset,
7030 &l_next, ptr_size) == 0)
7031 {
7032 unsigned char libname[PATH_MAX];
7033
7034 if (lm_prev != l_prev)
2268b414 7035 {
b1fbec62
GB
7036 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7037 (long) lm_prev, (long) l_prev);
7038 break;
2268b414
JK
7039 }
7040
d878444c
JK
7041 /* Ignore the first entry even if it has valid name as the first entry
7042 corresponds to the main executable. The first entry should not be
7043 skipped if the dynamic loader was loaded late by a static executable
7044 (see solib-svr4.c parameter ignore_first). But in such case the main
7045 executable does not have PT_DYNAMIC present and this function already
7046 exited above due to failed get_r_debug. */
7047 if (lm_prev == 0)
f6e8a41e 7048 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
7049 else
7050 {
7051 /* Not checking for error because reading may stop before
7052 we've got PATH_MAX worth of characters. */
7053 libname[0] = '\0';
7054 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7055 libname[sizeof (libname) - 1] = '\0';
7056 if (libname[0] != '\0')
2268b414 7057 {
d878444c
JK
7058 if (!header_done)
7059 {
7060 /* Terminate `<library-list-svr4'. */
f6e8a41e 7061 document += '>';
d878444c
JK
7062 header_done = 1;
7063 }
2268b414 7064
e6a58aa8
SM
7065 string_appendf (document, "<library name=\"");
7066 xml_escape_text_append (&document, (char *) libname);
7067 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 7068 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
7069 (unsigned long) lm_addr, (unsigned long) l_addr,
7070 (unsigned long) l_ld);
d878444c 7071 }
0afae3cf 7072 }
b1fbec62
GB
7073
7074 lm_prev = lm_addr;
7075 lm_addr = l_next;
2268b414
JK
7076 }
7077
b1fbec62
GB
7078 if (!header_done)
7079 {
7080 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 7081 document += "/>";
b1fbec62
GB
7082 }
7083 else
f6e8a41e 7084 document += "</library-list-svr4>";
b1fbec62 7085
f6e8a41e 7086 int document_len = document.length ();
2268b414
JK
7087 if (offset < document_len)
7088 document_len -= offset;
7089 else
7090 document_len = 0;
7091 if (len > document_len)
7092 len = document_len;
7093
f6e8a41e 7094 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
7095
7096 return len;
7097}
7098
9accd112
MM
7099#ifdef HAVE_LINUX_BTRACE
7100
79597bdd
TBA
7101btrace_target_info *
7102linux_process_target::enable_btrace (ptid_t ptid,
7103 const btrace_config *conf)
7104{
7105 return linux_enable_btrace (ptid, conf);
7106}
7107
969c39fb 7108/* See to_disable_btrace target method. */
9accd112 7109
79597bdd
TBA
7110int
7111linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
7112{
7113 enum btrace_error err;
7114
7115 err = linux_disable_btrace (tinfo);
7116 return (err == BTRACE_ERR_NONE ? 0 : -1);
7117}
7118
bc504a31 7119/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7120
7121static void
7122linux_low_encode_pt_config (struct buffer *buffer,
7123 const struct btrace_data_pt_config *config)
7124{
7125 buffer_grow_str (buffer, "<pt-config>\n");
7126
7127 switch (config->cpu.vendor)
7128 {
7129 case CV_INTEL:
7130 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7131 "model=\"%u\" stepping=\"%u\"/>\n",
7132 config->cpu.family, config->cpu.model,
7133 config->cpu.stepping);
7134 break;
7135
7136 default:
7137 break;
7138 }
7139
7140 buffer_grow_str (buffer, "</pt-config>\n");
7141}
7142
7143/* Encode a raw buffer. */
7144
7145static void
7146linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7147 unsigned int size)
7148{
7149 if (size == 0)
7150 return;
7151
268a13a5 7152 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
7153 buffer_grow_str (buffer, "<raw>\n");
7154
7155 while (size-- > 0)
7156 {
7157 char elem[2];
7158
7159 elem[0] = tohex ((*data >> 4) & 0xf);
7160 elem[1] = tohex (*data++ & 0xf);
7161
7162 buffer_grow (buffer, elem, 2);
7163 }
7164
7165 buffer_grow_str (buffer, "</raw>\n");
7166}
7167
969c39fb
MM
7168/* See to_read_btrace target method. */
7169
79597bdd
TBA
7170int
7171linux_process_target::read_btrace (btrace_target_info *tinfo,
7172 buffer *buffer,
7173 enum btrace_read_type type)
9accd112 7174{
734b0e4b 7175 struct btrace_data btrace;
969c39fb 7176 enum btrace_error err;
9accd112 7177
969c39fb
MM
7178 err = linux_read_btrace (&btrace, tinfo, type);
7179 if (err != BTRACE_ERR_NONE)
7180 {
7181 if (err == BTRACE_ERR_OVERFLOW)
7182 buffer_grow_str0 (buffer, "E.Overflow.");
7183 else
7184 buffer_grow_str0 (buffer, "E.Generic Error.");
7185
8dcc53b3 7186 return -1;
969c39fb 7187 }
9accd112 7188
734b0e4b
MM
7189 switch (btrace.format)
7190 {
7191 case BTRACE_FORMAT_NONE:
7192 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7193 return -1;
734b0e4b
MM
7194
7195 case BTRACE_FORMAT_BTS:
7196 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7197 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7198
46f29a9a 7199 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7200 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7201 paddress (block.begin), paddress (block.end));
9accd112 7202
734b0e4b
MM
7203 buffer_grow_str0 (buffer, "</btrace>\n");
7204 break;
7205
b20a6524
MM
7206 case BTRACE_FORMAT_PT:
7207 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7208 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7209 buffer_grow_str (buffer, "<pt>\n");
7210
7211 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7212
b20a6524
MM
7213 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7214 btrace.variant.pt.size);
7215
7216 buffer_grow_str (buffer, "</pt>\n");
7217 buffer_grow_str0 (buffer, "</btrace>\n");
7218 break;
7219
7220 default:
7221 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7222 return -1;
734b0e4b 7223 }
969c39fb
MM
7224
7225 return 0;
9accd112 7226}
f4abbc16
MM
7227
7228/* See to_btrace_conf target method. */
7229
79597bdd
TBA
7230int
7231linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7232 buffer *buffer)
f4abbc16
MM
7233{
7234 const struct btrace_config *conf;
7235
7236 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7237 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7238
7239 conf = linux_btrace_conf (tinfo);
7240 if (conf != NULL)
7241 {
7242 switch (conf->format)
7243 {
7244 case BTRACE_FORMAT_NONE:
7245 break;
7246
7247 case BTRACE_FORMAT_BTS:
d33501a5
MM
7248 buffer_xml_printf (buffer, "<bts");
7249 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7250 buffer_xml_printf (buffer, " />\n");
f4abbc16 7251 break;
b20a6524
MM
7252
7253 case BTRACE_FORMAT_PT:
7254 buffer_xml_printf (buffer, "<pt");
7255 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7256 buffer_xml_printf (buffer, "/>\n");
7257 break;
f4abbc16
MM
7258 }
7259 }
7260
7261 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7262 return 0;
7263}
9accd112
MM
7264#endif /* HAVE_LINUX_BTRACE */
7265
7b669087
GB
7266/* See nat/linux-nat.h. */
7267
7268ptid_t
7269current_lwp_ptid (void)
7270{
7271 return ptid_of (current_thread);
7272}
7273
dd373349
AT
7274/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7275
d367006f
TBA
7276int
7277linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
dd373349
AT
7278{
7279 if (the_low_target.breakpoint_kind_from_pc != NULL)
7280 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7281 else
52405d85 7282 return process_stratum_target::breakpoint_kind_from_pc (pcptr);
dd373349
AT
7283}
7284
7285/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7286
d367006f
TBA
7287const gdb_byte *
7288linux_process_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
7289{
7290 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7291
7292 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7293}
7294
769ef81f
AT
7295/* Implementation of the target_ops method
7296 "breakpoint_kind_from_current_state". */
7297
d367006f
TBA
7298int
7299linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
769ef81f
AT
7300{
7301 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7302 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7303 else
d367006f 7304 return breakpoint_kind_from_pc (pcptr);
769ef81f
AT
7305}
7306
7f63b89b
TBA
7307const char *
7308linux_process_target::thread_name (ptid_t thread)
7309{
7310 return linux_proc_tid_get_name (thread);
7311}
7312
7313#if USE_THREAD_DB
7314bool
7315linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7316 int *handle_len)
7317{
7318 return thread_db_thread_handle (ptid, handle, handle_len);
7319}
7320#endif
7321
276d4552
YQ
7322/* Default implementation of linux_target_ops method "set_pc" for
7323 32-bit pc register which is literally named "pc". */
7324
7325void
7326linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7327{
7328 uint32_t newpc = pc;
7329
7330 supply_register_by_name (regcache, "pc", &newpc);
7331}
7332
7333/* Default implementation of linux_target_ops method "get_pc" for
7334 32-bit pc register which is literally named "pc". */
7335
7336CORE_ADDR
7337linux_get_pc_32bit (struct regcache *regcache)
7338{
7339 uint32_t pc;
7340
7341 collect_register_by_name (regcache, "pc", &pc);
7342 if (debug_threads)
7343 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7344 return pc;
7345}
7346
6f69e520
YQ
7347/* Default implementation of linux_target_ops method "set_pc" for
7348 64-bit pc register which is literally named "pc". */
7349
7350void
7351linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7352{
7353 uint64_t newpc = pc;
7354
7355 supply_register_by_name (regcache, "pc", &newpc);
7356}
7357
7358/* Default implementation of linux_target_ops method "get_pc" for
7359 64-bit pc register which is literally named "pc". */
7360
7361CORE_ADDR
7362linux_get_pc_64bit (struct regcache *regcache)
7363{
7364 uint64_t pc;
7365
7366 collect_register_by_name (regcache, "pc", &pc);
7367 if (debug_threads)
7368 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7369 return pc;
7370}
7371
0570503d 7372/* See linux-low.h. */
974c89e0 7373
0570503d
PFC
7374int
7375linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7376{
7377 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7378 int offset = 0;
7379
7380 gdb_assert (wordsize == 4 || wordsize == 8);
7381
52405d85 7382 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7383 {
7384 if (wordsize == 4)
7385 {
0570503d 7386 uint32_t *data_p = (uint32_t *) data;
974c89e0 7387 if (data_p[0] == match)
0570503d
PFC
7388 {
7389 *valp = data_p[1];
7390 return 1;
7391 }
974c89e0
AH
7392 }
7393 else
7394 {
0570503d 7395 uint64_t *data_p = (uint64_t *) data;
974c89e0 7396 if (data_p[0] == match)
0570503d
PFC
7397 {
7398 *valp = data_p[1];
7399 return 1;
7400 }
974c89e0
AH
7401 }
7402
7403 offset += 2 * wordsize;
7404 }
7405
7406 return 0;
7407}
7408
7409/* See linux-low.h. */
7410
7411CORE_ADDR
7412linux_get_hwcap (int wordsize)
7413{
0570503d
PFC
7414 CORE_ADDR hwcap = 0;
7415 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7416 return hwcap;
974c89e0
AH
7417}
7418
7419/* See linux-low.h. */
7420
7421CORE_ADDR
7422linux_get_hwcap2 (int wordsize)
7423{
0570503d
PFC
7424 CORE_ADDR hwcap2 = 0;
7425 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7426 return hwcap2;
974c89e0 7427}
6f69e520 7428
3aee8918
PA
7429#ifdef HAVE_LINUX_REGSETS
7430void
7431initialize_regsets_info (struct regsets_info *info)
7432{
7433 for (info->num_regsets = 0;
7434 info->regsets[info->num_regsets].size >= 0;
7435 info->num_regsets++)
7436 ;
3aee8918
PA
7437}
7438#endif
7439
da6d8c04
DJ
7440void
7441initialize_low (void)
7442{
bd99dc85 7443 struct sigaction sigchld_action;
dd373349 7444
bd99dc85 7445 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7446 set_target_ops (the_linux_target);
dd373349 7447
aa7c7447 7448 linux_ptrace_init_warnings ();
1b919490 7449 linux_proc_init_warnings ();
bd99dc85
PA
7450
7451 sigchld_action.sa_handler = sigchld_handler;
7452 sigemptyset (&sigchld_action.sa_mask);
7453 sigchld_action.sa_flags = SA_RESTART;
7454 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7455
7456 initialize_low_arch ();
89245bc0
DB
7457
7458 linux_check_ptrace_features ();
da6d8c04 7459}
This page took 2.424179 seconds and 4 git commands to generate.