Automatic date update in version.in
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
3666a048 2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
fd462a61
DJ
63#ifndef O_LARGEFILE
64#define O_LARGEFILE 0
65#endif
1a981360 66
69f4c9cc
AH
67#ifndef AT_HWCAP2
68#define AT_HWCAP2 26
69#endif
70
db0dfaa0
LM
71/* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74#if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77#if defined(__mcoldfire__)
78/* These are still undefined in 3.10 kernels. */
79#define PT_TEXT_ADDR 49*4
80#define PT_DATA_ADDR 50*4
81#define PT_TEXT_END_ADDR 51*4
db0dfaa0
LM
82/* These are still undefined in 3.10 kernels. */
83#elif defined(__TMS320C6X__)
84#define PT_TEXT_ADDR (0x10000*4)
85#define PT_DATA_ADDR (0x10004*4)
86#define PT_TEXT_END_ADDR (0x10008*4)
87#endif
88#endif
89
5203ae1e
TBA
90#if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95#define SUPPORTS_READ_OFFSETS
96#endif
97
9accd112 98#ifdef HAVE_LINUX_BTRACE
125f8a3d 99# include "nat/linux-btrace.h"
268a13a5 100# include "gdbsupport/btrace-common.h"
9accd112
MM
101#endif
102
8365dcf5
TJB
103#ifndef HAVE_ELF32_AUXV_T
104/* Copied from glibc's elf.h. */
105typedef struct
106{
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115} Elf32_auxv_t;
116#endif
117
118#ifndef HAVE_ELF64_AUXV_T
119/* Copied from glibc's elf.h. */
120typedef struct
121{
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130} Elf64_auxv_t;
131#endif
132
ded48a5e
YQ
133/* Does the current host support PTRACE_GETREGSET? */
134int have_ptrace_getregset = -1;
135
cff068da
GB
136/* LWP accessors. */
137
138/* See nat/linux-nat.h. */
139
140ptid_t
141ptid_of_lwp (struct lwp_info *lwp)
142{
143 return ptid_of (get_lwp_thread (lwp));
144}
145
146/* See nat/linux-nat.h. */
147
4b134ca1
GB
148void
149lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151{
152 lwp->arch_private = info;
153}
154
155/* See nat/linux-nat.h. */
156
157struct arch_lwp_info *
158lwp_arch_private_info (struct lwp_info *lwp)
159{
160 return lwp->arch_private;
161}
162
163/* See nat/linux-nat.h. */
164
cff068da
GB
165int
166lwp_is_stopped (struct lwp_info *lwp)
167{
168 return lwp->stopped;
169}
170
171/* See nat/linux-nat.h. */
172
173enum target_stop_reason
174lwp_stop_reason (struct lwp_info *lwp)
175{
176 return lwp->stop_reason;
177}
178
0e00e962
AA
179/* See nat/linux-nat.h. */
180
181int
182lwp_is_stepping (struct lwp_info *lwp)
183{
184 return lwp->stepping;
185}
186
05044653
PA
187/* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
24a09b5f 190
05044653
PA
191struct simple_pid_list
192{
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201};
05c309a8 202static struct simple_pid_list *stopped_pids;
05044653
PA
203
204/* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207static void
208add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209{
8d749320 210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216}
217
218static int
219pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220{
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234}
24a09b5f 235
bde24c0a
PA
236enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248/* This is set while stop_all_lwps is in effect. */
6bd434d6 249static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
250
251/* FIXME make into a target method? */
24a09b5f 252int using_threads = 1;
24a09b5f 253
fa593d66
PA
254/* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256static int stabilizing_threads;
257
f50bf8e5 258static void unsuspend_all_lwps (struct lwp_info *except);
95954743 259static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 260static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 261static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 262static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 263static int linux_low_ptrace_options (int attached);
ced2dffb 264static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 265
582511be
PA
266/* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
6bd434d6 268static ptid_t step_over_bkpt;
582511be 269
bf9ae9d8
TBA
270bool
271linux_process_target::low_supports_breakpoints ()
272{
273 return false;
274}
d50171e4 275
bf9ae9d8
TBA
276CORE_ADDR
277linux_process_target::low_get_pc (regcache *regcache)
278{
279 return 0;
280}
281
282void
283linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 284{
bf9ae9d8 285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 286}
0d62e5e8 287
7582c77c
TBA
288std::vector<CORE_ADDR>
289linux_process_target::low_get_next_pcs (regcache *regcache)
290{
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293}
294
d4807ea2
TBA
295int
296linux_process_target::low_decr_pc_after_break ()
297{
298 return 0;
299}
300
c2d6af84
PA
301/* True if LWP is stopped in its stepping range. */
302
303static int
304lwp_in_step_range (struct lwp_info *lwp)
305{
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309}
310
bd99dc85
PA
311/* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313static int linux_event_pipe[2] = { -1, -1 };
314
315/* True if we're currently in async mode. */
316#define target_is_async_p() (linux_event_pipe[0] != -1)
317
02fc4de7 318static void send_sigstop (struct lwp_info *lwp);
bd99dc85 319
d0722149
DE
320/* Return non-zero if HEADER is a 64-bit ELF file. */
321
322static int
214d508e 323elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 324{
214d508e
L
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
d0722149
DE
336}
337
338/* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
be07f1a2 342static int
214d508e 343elf_64_file_p (const char *file, unsigned int *machine)
d0722149 344{
957f3f49 345 Elf64_Ehdr header;
d0722149
DE
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
214d508e 359 return elf_64_header_p (&header, machine);
d0722149
DE
360}
361
be07f1a2
PA
362/* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365int
214d508e 366linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 367{
d8d2a3ee 368 char file[PATH_MAX];
be07f1a2
PA
369
370 sprintf (file, "/proc/%d/exe", pid);
214d508e 371 return elf_64_file_p (file, machine);
be07f1a2
PA
372}
373
fd000fb3
TBA
374void
375linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 376{
fa96cb38
PA
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 if (debug_threads)
380 debug_printf ("deleting %ld\n", lwpid_of (thr));
381
382 remove_thread (thr);
466eecee 383
fd000fb3 384 low_delete_thread (lwp->arch_private);
466eecee 385
013e3554 386 delete lwp;
bd99dc85
PA
387}
388
fd000fb3
TBA
389void
390linux_process_target::low_delete_thread (arch_lwp_info *info)
391{
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395}
95954743 396
fd000fb3
TBA
397process_info *
398linux_process_target::add_linux_process (int pid, int attached)
95954743
PA
399{
400 struct process_info *proc;
401
95954743 402 proc = add_process (pid, attached);
8d749320 403 proc->priv = XCNEW (struct process_info_private);
95954743 404
fd000fb3 405 proc->priv->arch_private = low_new_process ();
aa5ca48f 406
95954743
PA
407 return proc;
408}
409
fd000fb3
TBA
410arch_process_info *
411linux_process_target::low_new_process ()
412{
413 return nullptr;
414}
415
416void
417linux_process_target::low_delete_process (arch_process_info *info)
418{
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422}
423
424void
425linux_process_target::low_new_fork (process_info *parent, process_info *child)
426{
427 /* Nop. */
428}
429
797bcff5
TBA
430void
431linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
432{
433 struct thread_info *saved_thread;
434
435 saved_thread = current_thread;
436 current_thread = thread;
437
797bcff5 438 low_arch_setup ();
94585166
DB
439
440 current_thread = saved_thread;
441}
442
d16f3f6c
TBA
443int
444linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
445 int wstat)
24a09b5f 446{
c12a5089 447 client_state &cs = get_client_state ();
94585166 448 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 449 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 450 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 451 struct lwp_info *new_lwp;
24a09b5f 452
65706a29
PA
453 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
454
82075af2
JS
455 /* All extended events we currently use are mid-syscall. Only
456 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457 you have to be using PTRACE_SEIZE to get that. */
458 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
459
c269dbdb
DB
460 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
461 || (event == PTRACE_EVENT_CLONE))
24a09b5f 462 {
95954743 463 ptid_t ptid;
24a09b5f 464 unsigned long new_pid;
05044653 465 int ret, status;
24a09b5f 466
de0d863e 467 /* Get the pid of the new lwp. */
d86d4aaf 468 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 469 &new_pid);
24a09b5f
DJ
470
471 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 472 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
473 {
474 /* The new child has a pending SIGSTOP. We can't affect it until it
475 hits the SIGSTOP, but we're already attached. */
476
97438e3f 477 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
478
479 if (ret == -1)
480 perror_with_name ("waiting for new child");
481 else if (ret != new_pid)
482 warning ("wait returned unexpected PID %d", ret);
da5898ce 483 else if (!WIFSTOPPED (status))
24a09b5f
DJ
484 warning ("wait returned unexpected status 0x%x", status);
485 }
486
c269dbdb 487 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
488 {
489 struct process_info *parent_proc;
490 struct process_info *child_proc;
491 struct lwp_info *child_lwp;
bfacd19d 492 struct thread_info *child_thr;
de0d863e 493
fd79271b 494 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
495
496 if (debug_threads)
497 {
498 debug_printf ("HEW: Got fork event from LWP %ld, "
499 "new child is %d\n",
e38504b3 500 ptid_of (event_thr).lwp (),
e99b03dc 501 ptid.pid ());
de0d863e
DB
502 }
503
504 /* Add the new process to the tables and clone the breakpoint
505 lists of the parent. We need to do this even if the new process
506 will be detached, since we will need the process object and the
507 breakpoints to remove any breakpoints from memory when we
508 detach, and the client side will access registers. */
fd000fb3 509 child_proc = add_linux_process (new_pid, 0);
de0d863e
DB
510 gdb_assert (child_proc != NULL);
511 child_lwp = add_lwp (ptid);
512 gdb_assert (child_lwp != NULL);
513 child_lwp->stopped = 1;
bfacd19d
DB
514 child_lwp->must_set_ptrace_flags = 1;
515 child_lwp->status_pending_p = 0;
516 child_thr = get_lwp_thread (child_lwp);
517 child_thr->last_resume_kind = resume_stop;
998d452a
PA
518 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
519
863d01bd 520 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
521 too. If the fork/clone parent is stepping over a breakpoint,
522 all other threads have been suspended already. Leave the
523 child suspended too. */
524 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
525 || event_lwp->bp_reinsert != 0)
863d01bd
PA
526 {
527 if (debug_threads)
528 debug_printf ("HEW: leaving child suspended\n");
529 child_lwp->suspended = 1;
530 }
531
de0d863e
DB
532 parent_proc = get_thread_process (event_thr);
533 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
534
535 if (event_lwp->bp_reinsert != 0
7582c77c 536 && supports_software_single_step ()
2e7b624b
YQ
537 && event == PTRACE_EVENT_VFORK)
538 {
3b9a79ef
YQ
539 /* If we leave single-step breakpoints there, child will
540 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
541 (and child). Once vfork child is done, reinsert
542 them back to parent. */
3b9a79ef 543 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
544 }
545
63c40ec7 546 clone_all_breakpoints (child_thr, event_thr);
de0d863e 547
51a948fd
AB
548 target_desc_up tdesc = allocate_target_description ();
549 copy_target_description (tdesc.get (), parent_proc->tdesc);
550 child_proc->tdesc = tdesc.release ();
de0d863e 551
3a8a0396 552 /* Clone arch-specific process data. */
fd000fb3 553 low_new_fork (parent_proc, child_proc);
3a8a0396 554
de0d863e 555 /* Save fork info in the parent thread. */
c269dbdb
DB
556 if (event == PTRACE_EVENT_FORK)
557 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
558 else if (event == PTRACE_EVENT_VFORK)
559 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
560
de0d863e 561 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 562
de0d863e
DB
563 /* The status_pending field contains bits denoting the
564 extended event, so when the pending event is handled,
565 the handler will look at lwp->waitstatus. */
566 event_lwp->status_pending_p = 1;
567 event_lwp->status_pending = wstat;
568
5a04c4cf
PA
569 /* Link the threads until the parent event is passed on to
570 higher layers. */
571 event_lwp->fork_relative = child_lwp;
572 child_lwp->fork_relative = event_lwp;
573
3b9a79ef
YQ
574 /* If the parent thread is doing step-over with single-step
575 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
576 from the parent's. Remove them from the child process.
577 In case of vfork, we'll reinsert them back once vforked
578 child is done. */
8a81c5d7 579 if (event_lwp->bp_reinsert != 0
7582c77c 580 && supports_software_single_step ())
8a81c5d7 581 {
8a81c5d7
YQ
582 /* The child process is forked and stopped, so it is safe
583 to access its memory without stopping all other threads
584 from other processes. */
3b9a79ef 585 delete_single_step_breakpoints (child_thr);
8a81c5d7 586
3b9a79ef
YQ
587 gdb_assert (has_single_step_breakpoints (event_thr));
588 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
589 }
590
de0d863e
DB
591 /* Report the event. */
592 return 0;
593 }
594
fa96cb38
PA
595 if (debug_threads)
596 debug_printf ("HEW: Got clone event "
597 "from LWP %ld, new child is LWP %ld\n",
598 lwpid_of (event_thr), new_pid);
599
fd79271b 600 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 601 new_lwp = add_lwp (ptid);
24a09b5f 602
e27d73f6 603 /* Either we're going to immediately resume the new thread
df95181f 604 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 605 thinks the thread is currently running, so set this first
df95181f 606 before calling resume_one_lwp. */
e27d73f6
DE
607 new_lwp->stopped = 1;
608
0f8288ae
YQ
609 /* If we're suspending all threads, leave this one suspended
610 too. If the fork/clone parent is stepping over a breakpoint,
611 all other threads have been suspended already. Leave the
612 child suspended too. */
613 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
614 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
615 new_lwp->suspended = 1;
616
da5898ce
DJ
617 /* Normally we will get the pending SIGSTOP. But in some cases
618 we might get another signal delivered to the group first.
f21cc1a2 619 If we do get another signal, be sure not to lose it. */
20ba1ce6 620 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 621 {
54a0b537 622 new_lwp->stop_expected = 1;
20ba1ce6
PA
623 new_lwp->status_pending_p = 1;
624 new_lwp->status_pending = status;
da5898ce 625 }
c12a5089 626 else if (cs.report_thread_events)
65706a29
PA
627 {
628 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
629 new_lwp->status_pending_p = 1;
630 new_lwp->status_pending = status;
631 }
de0d863e 632
a0aad537 633#ifdef USE_THREAD_DB
94c207e0 634 thread_db_notice_clone (event_thr, ptid);
a0aad537 635#endif
86299109 636
de0d863e
DB
637 /* Don't report the event. */
638 return 1;
24a09b5f 639 }
c269dbdb
DB
640 else if (event == PTRACE_EVENT_VFORK_DONE)
641 {
642 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
643
7582c77c 644 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 645 {
3b9a79ef 646 reinsert_single_step_breakpoints (event_thr);
2e7b624b 647
3b9a79ef 648 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
649 }
650
c269dbdb
DB
651 /* Report the event. */
652 return 0;
653 }
c12a5089 654 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
655 {
656 struct process_info *proc;
f27866ba 657 std::vector<int> syscalls_to_catch;
94585166
DB
658 ptid_t event_ptid;
659 pid_t event_pid;
660
661 if (debug_threads)
662 {
663 debug_printf ("HEW: Got exec event from LWP %ld\n",
664 lwpid_of (event_thr));
665 }
666
667 /* Get the event ptid. */
668 event_ptid = ptid_of (event_thr);
e99b03dc 669 event_pid = event_ptid.pid ();
94585166 670
82075af2 671 /* Save the syscall list from the execing process. */
94585166 672 proc = get_thread_process (event_thr);
f27866ba 673 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
674
675 /* Delete the execing process and all its threads. */
d16f3f6c 676 mourn (proc);
94585166
DB
677 current_thread = NULL;
678
679 /* Create a new process/lwp/thread. */
fd000fb3 680 proc = add_linux_process (event_pid, 0);
94585166
DB
681 event_lwp = add_lwp (event_ptid);
682 event_thr = get_lwp_thread (event_lwp);
683 gdb_assert (current_thread == event_thr);
797bcff5 684 arch_setup_thread (event_thr);
94585166
DB
685
686 /* Set the event status. */
687 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
688 event_lwp->waitstatus.value.execd_pathname
689 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
690
691 /* Mark the exec status as pending. */
692 event_lwp->stopped = 1;
693 event_lwp->status_pending_p = 1;
694 event_lwp->status_pending = wstat;
695 event_thr->last_resume_kind = resume_continue;
696 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
697
82075af2
JS
698 /* Update syscall state in the new lwp, effectively mid-syscall too. */
699 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
700
701 /* Restore the list to catch. Don't rely on the client, which is free
702 to avoid sending a new list when the architecture doesn't change.
703 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 704 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 705
94585166
DB
706 /* Report the event. */
707 *orig_event_lwp = event_lwp;
708 return 0;
709 }
de0d863e
DB
710
711 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
712}
713
df95181f
TBA
714CORE_ADDR
715linux_process_target::get_pc (lwp_info *lwp)
d50171e4 716{
0bfdf32f 717 struct thread_info *saved_thread;
d50171e4
PA
718 struct regcache *regcache;
719 CORE_ADDR pc;
720
bf9ae9d8 721 if (!low_supports_breakpoints ())
d50171e4
PA
722 return 0;
723
0bfdf32f
GB
724 saved_thread = current_thread;
725 current_thread = get_lwp_thread (lwp);
d50171e4 726
0bfdf32f 727 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 728 pc = low_get_pc (regcache);
d50171e4
PA
729
730 if (debug_threads)
87ce2a04 731 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 732
0bfdf32f 733 current_thread = saved_thread;
d50171e4
PA
734 return pc;
735}
736
9eedd27d
TBA
737void
738linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2
JS
739{
740 struct thread_info *saved_thread;
741 struct regcache *regcache;
742
82075af2
JS
743 saved_thread = current_thread;
744 current_thread = get_lwp_thread (lwp);
745
746 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 747 low_get_syscall_trapinfo (regcache, sysno);
82075af2
JS
748
749 if (debug_threads)
4cc32bec 750 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
751
752 current_thread = saved_thread;
753}
754
9eedd27d
TBA
755void
756linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
757{
758 /* By default, report an unknown system call number. */
759 *sysno = UNKNOWN_SYSCALL;
760}
761
df95181f
TBA
762bool
763linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 764{
582511be
PA
765 CORE_ADDR pc;
766 CORE_ADDR sw_breakpoint_pc;
767 struct thread_info *saved_thread;
3e572f71
PA
768#if USE_SIGTRAP_SIGINFO
769 siginfo_t siginfo;
770#endif
d50171e4 771
bf9ae9d8 772 if (!low_supports_breakpoints ())
df95181f 773 return false;
0d62e5e8 774
582511be 775 pc = get_pc (lwp);
d4807ea2 776 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 777
582511be
PA
778 /* breakpoint_at reads from the current thread. */
779 saved_thread = current_thread;
780 current_thread = get_lwp_thread (lwp);
47c0c975 781
3e572f71
PA
782#if USE_SIGTRAP_SIGINFO
783 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
784 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
785 {
786 if (siginfo.si_signo == SIGTRAP)
787 {
e7ad2f14
PA
788 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
789 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 790 {
e7ad2f14
PA
791 /* The si_code is ambiguous on this arch -- check debug
792 registers. */
793 if (!check_stopped_by_watchpoint (lwp))
794 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
795 }
796 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
797 {
798 /* If we determine the LWP stopped for a SW breakpoint,
799 trust it. Particularly don't check watchpoint
800 registers, because at least on s390, we'd find
801 stopped-by-watchpoint as long as there's a watchpoint
802 set. */
3e572f71 803 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 804 }
e7ad2f14 805 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 806 {
e7ad2f14
PA
807 /* This can indicate either a hardware breakpoint or
808 hardware watchpoint. Check debug registers. */
809 if (!check_stopped_by_watchpoint (lwp))
810 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 811 }
2bf6fb9d
PA
812 else if (siginfo.si_code == TRAP_TRACE)
813 {
e7ad2f14
PA
814 /* We may have single stepped an instruction that
815 triggered a watchpoint. In that case, on some
816 architectures (such as x86), instead of TRAP_HWBKPT,
817 si_code indicates TRAP_TRACE, and we need to check
818 the debug registers separately. */
819 if (!check_stopped_by_watchpoint (lwp))
820 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 821 }
3e572f71
PA
822 }
823 }
824#else
582511be
PA
825 /* We may have just stepped a breakpoint instruction. E.g., in
826 non-stop mode, GDB first tells the thread A to step a range, and
827 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
828 case we need to report the breakpoint PC. */
829 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 830 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
831 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
832
833 if (hardware_breakpoint_inserted_here (pc))
834 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
835
836 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
837 check_stopped_by_watchpoint (lwp);
838#endif
839
840 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
841 {
842 if (debug_threads)
843 {
844 struct thread_info *thr = get_lwp_thread (lwp);
845
846 debug_printf ("CSBB: %s stopped by software breakpoint\n",
847 target_pid_to_str (ptid_of (thr)));
848 }
849
850 /* Back up the PC if necessary. */
851 if (pc != sw_breakpoint_pc)
e7ad2f14 852 {
582511be
PA
853 struct regcache *regcache
854 = get_thread_regcache (current_thread, 1);
bf9ae9d8 855 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
856 }
857
e7ad2f14
PA
858 /* Update this so we record the correct stop PC below. */
859 pc = sw_breakpoint_pc;
582511be 860 }
e7ad2f14 861 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
862 {
863 if (debug_threads)
864 {
865 struct thread_info *thr = get_lwp_thread (lwp);
866
867 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
868 target_pid_to_str (ptid_of (thr)));
869 }
e7ad2f14
PA
870 }
871 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
872 {
873 if (debug_threads)
874 {
875 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 876
e7ad2f14
PA
877 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
878 target_pid_to_str (ptid_of (thr)));
879 }
582511be 880 }
e7ad2f14
PA
881 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
882 {
883 if (debug_threads)
884 {
885 struct thread_info *thr = get_lwp_thread (lwp);
582511be 886
e7ad2f14
PA
887 debug_printf ("CSBB: %s stopped by trace\n",
888 target_pid_to_str (ptid_of (thr)));
889 }
890 }
891
892 lwp->stop_pc = pc;
582511be 893 current_thread = saved_thread;
df95181f 894 return true;
0d62e5e8 895}
ce3a066d 896
fd000fb3
TBA
897lwp_info *
898linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 899{
54a0b537 900 struct lwp_info *lwp;
0d62e5e8 901
013e3554 902 lwp = new lwp_info {};
00db26fa
PA
903
904 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 905
754e3168
AH
906 lwp->thread = add_thread (ptid, lwp);
907
fd000fb3 908 low_new_thread (lwp);
aa5ca48f 909
54a0b537 910 return lwp;
0d62e5e8 911}
611cb4a5 912
fd000fb3
TBA
913void
914linux_process_target::low_new_thread (lwp_info *info)
915{
916 /* Nop. */
917}
918
2090129c
SDJ
919/* Callback to be used when calling fork_inferior, responsible for
920 actually initiating the tracing of the inferior. */
921
922static void
923linux_ptrace_fun ()
924{
925 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
926 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 927 trace_start_error_with_name ("ptrace");
2090129c
SDJ
928
929 if (setpgid (0, 0) < 0)
930 trace_start_error_with_name ("setpgid");
931
932 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
933 stdout to stderr so that inferior i/o doesn't corrupt the connection.
934 Also, redirect stdin to /dev/null. */
935 if (remote_connection_is_stdio ())
936 {
937 if (close (0) < 0)
938 trace_start_error_with_name ("close");
939 if (open ("/dev/null", O_RDONLY) < 0)
940 trace_start_error_with_name ("open");
941 if (dup2 (2, 1) < 0)
942 trace_start_error_with_name ("dup2");
943 if (write (2, "stdin/stdout redirected\n",
944 sizeof ("stdin/stdout redirected\n") - 1) < 0)
945 {
946 /* Errors ignored. */;
947 }
948 }
949}
950
da6d8c04 951/* Start an inferior process and returns its pid.
2090129c
SDJ
952 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
953 are its arguments. */
da6d8c04 954
15295543
TBA
955int
956linux_process_target::create_inferior (const char *program,
957 const std::vector<char *> &program_args)
da6d8c04 958{
c12a5089 959 client_state &cs = get_client_state ();
a6dbe5df 960 struct lwp_info *new_lwp;
da6d8c04 961 int pid;
95954743 962 ptid_t ptid;
03583c20 963
41272101
TT
964 {
965 maybe_disable_address_space_randomization restore_personality
c12a5089 966 (cs.disable_randomization);
bea571eb 967 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
968
969 pid = fork_inferior (program,
970 str_program_args.c_str (),
971 get_environ ()->envp (), linux_ptrace_fun,
972 NULL, NULL, NULL, NULL);
973 }
03583c20 974
fd000fb3 975 add_linux_process (pid, 0);
95954743 976
fd79271b 977 ptid = ptid_t (pid, pid, 0);
95954743 978 new_lwp = add_lwp (ptid);
a6dbe5df 979 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 980
2090129c
SDJ
981 post_fork_inferior (pid, program);
982
a9fa9f7d 983 return pid;
da6d8c04
DJ
984}
985
ece66d65
JS
986/* Implement the post_create_inferior target_ops method. */
987
6dee9afb
TBA
988void
989linux_process_target::post_create_inferior ()
ece66d65
JS
990{
991 struct lwp_info *lwp = get_thread_lwp (current_thread);
992
797bcff5 993 low_arch_setup ();
ece66d65
JS
994
995 if (lwp->must_set_ptrace_flags)
996 {
997 struct process_info *proc = current_process ();
998 int options = linux_low_ptrace_options (proc->attached);
999
1000 linux_enable_event_reporting (lwpid_of (current_thread), options);
1001 lwp->must_set_ptrace_flags = 0;
1002 }
1003}
1004
7ae1a6a6 1005int
fd000fb3 1006linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1007{
54a0b537 1008 struct lwp_info *new_lwp;
e38504b3 1009 int lwpid = ptid.lwp ();
611cb4a5 1010
b8e1b30e 1011 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1012 != 0)
7ae1a6a6 1013 return errno;
24a09b5f 1014
b3312d80 1015 new_lwp = add_lwp (ptid);
0d62e5e8 1016
a6dbe5df
PA
1017 /* We need to wait for SIGSTOP before being able to make the next
1018 ptrace call on this LWP. */
1019 new_lwp->must_set_ptrace_flags = 1;
1020
644cebc9 1021 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1022 {
1023 if (debug_threads)
87ce2a04 1024 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1025
1026 /* The process is definitely stopped. It is in a job control
1027 stop, unless the kernel predates the TASK_STOPPED /
1028 TASK_TRACED distinction, in which case it might be in a
1029 ptrace stop. Make sure it is in a ptrace stop; from there we
1030 can kill it, signal it, et cetera.
1031
1032 First make sure there is a pending SIGSTOP. Since we are
1033 already attached, the process can not transition from stopped
1034 to running without a PTRACE_CONT; so we know this signal will
1035 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1036 probably already in the queue (unless this kernel is old
1037 enough to use TASK_STOPPED for ptrace stops); but since
1038 SIGSTOP is not an RT signal, it can only be queued once. */
1039 kill_lwp (lwpid, SIGSTOP);
1040
1041 /* Finally, resume the stopped process. This will deliver the
1042 SIGSTOP (or a higher priority signal, just like normal
1043 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1044 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1045 }
1046
0d62e5e8 1047 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1048 brings it to a halt.
1049
1050 There are several cases to consider here:
1051
1052 1) gdbserver has already attached to the process and is being notified
1b3f6016 1053 of a new thread that is being created.
d50171e4
PA
1054 In this case we should ignore that SIGSTOP and resume the
1055 process. This is handled below by setting stop_expected = 1,
8336d594 1056 and the fact that add_thread sets last_resume_kind ==
d50171e4 1057 resume_continue.
0e21c1ec
DE
1058
1059 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1060 to it via attach_inferior.
1061 In this case we want the process thread to stop.
d50171e4
PA
1062 This is handled by having linux_attach set last_resume_kind ==
1063 resume_stop after we return.
e3deef73
LM
1064
1065 If the pid we are attaching to is also the tgid, we attach to and
1066 stop all the existing threads. Otherwise, we attach to pid and
1067 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1068
1069 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1070 existing threads.
1071 In this case we want the thread to stop.
1072 FIXME: This case is currently not properly handled.
1073 We should wait for the SIGSTOP but don't. Things work apparently
1074 because enough time passes between when we ptrace (ATTACH) and when
1075 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1076
1077 On the other hand, if we are currently trying to stop all threads, we
1078 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1079 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1080 end of the list, and so the new thread has not yet reached
1081 wait_for_sigstop (but will). */
d50171e4 1082 new_lwp->stop_expected = 1;
0d62e5e8 1083
7ae1a6a6 1084 return 0;
95954743
PA
1085}
1086
8784d563
PA
1087/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1088 already attached. Returns true if a new LWP is found, false
1089 otherwise. */
1090
1091static int
1092attach_proc_task_lwp_callback (ptid_t ptid)
1093{
1094 /* Is this a new thread? */
1095 if (find_thread_ptid (ptid) == NULL)
1096 {
e38504b3 1097 int lwpid = ptid.lwp ();
8784d563
PA
1098 int err;
1099
1100 if (debug_threads)
1101 debug_printf ("Found new lwp %d\n", lwpid);
1102
fd000fb3 1103 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1104
1105 /* Be quiet if we simply raced with the thread exiting. EPERM
1106 is returned if the thread's task still exists, and is marked
1107 as exited or zombie, as well as other conditions, so in that
1108 case, confirm the status in /proc/PID/status. */
1109 if (err == ESRCH
1110 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1111 {
1112 if (debug_threads)
1113 {
1114 debug_printf ("Cannot attach to lwp %d: "
1115 "thread is gone (%d: %s)\n",
6d91ce9a 1116 lwpid, err, safe_strerror (err));
8784d563
PA
1117 }
1118 }
1119 else if (err != 0)
1120 {
4d9b86e1 1121 std::string reason
50fa3001 1122 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1123
1124 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1125 }
1126
1127 return 1;
1128 }
1129 return 0;
1130}
1131
500c1d85
PA
1132static void async_file_mark (void);
1133
e3deef73
LM
1134/* Attach to PID. If PID is the tgid, attach to it and all
1135 of its threads. */
1136
ef03dad8
TBA
1137int
1138linux_process_target::attach (unsigned long pid)
0d62e5e8 1139{
500c1d85
PA
1140 struct process_info *proc;
1141 struct thread_info *initial_thread;
fd79271b 1142 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1143 int err;
1144
fd000fb3 1145 proc = add_linux_process (pid, 1);
df0da8a2 1146
e3deef73
LM
1147 /* Attach to PID. We will check for other threads
1148 soon. */
fd000fb3 1149 err = attach_lwp (ptid);
7ae1a6a6 1150 if (err != 0)
4d9b86e1 1151 {
df0da8a2 1152 remove_process (proc);
4d9b86e1 1153
50fa3001
SDJ
1154 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1155 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1156 }
7ae1a6a6 1157
500c1d85
PA
1158 /* Don't ignore the initial SIGSTOP if we just attached to this
1159 process. It will be collected by wait shortly. */
fd79271b 1160 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1161 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1162
8784d563
PA
1163 /* We must attach to every LWP. If /proc is mounted, use that to
1164 find them now. On the one hand, the inferior may be using raw
1165 clone instead of using pthreads. On the other hand, even if it
1166 is using pthreads, GDB may not be connected yet (thread_db needs
1167 to do symbol lookups, through qSymbol). Also, thread_db walks
1168 structures in the inferior's address space to find the list of
1169 threads/LWPs, and those structures may well be corrupted. Note
1170 that once thread_db is loaded, we'll still use it to list threads
1171 and associate pthread info with each LWP. */
1172 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1173
1174 /* GDB will shortly read the xml target description for this
1175 process, to figure out the process' architecture. But the target
1176 description is only filled in when the first process/thread in
1177 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1178 that now, otherwise, if GDB is fast enough, it could read the
1179 target description _before_ that initial stop. */
1180 if (non_stop)
1181 {
1182 struct lwp_info *lwp;
1183 int wstat, lwpid;
f2907e49 1184 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1185
d16f3f6c 1186 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1187 gdb_assert (lwpid > 0);
1188
f2907e49 1189 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1190
1191 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1192 {
1193 lwp->status_pending_p = 1;
1194 lwp->status_pending = wstat;
1195 }
1196
1197 initial_thread->last_resume_kind = resume_continue;
1198
1199 async_file_mark ();
1200
1201 gdb_assert (proc->tdesc != NULL);
1202 }
1203
95954743
PA
1204 return 0;
1205}
1206
95954743 1207static int
e4eb0dec 1208last_thread_of_process_p (int pid)
95954743 1209{
e4eb0dec 1210 bool seen_one = false;
95954743 1211
da4ae14a 1212 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1213 {
e4eb0dec
SM
1214 if (!seen_one)
1215 {
1216 /* This is the first thread of this process we see. */
1217 seen_one = true;
1218 return false;
1219 }
1220 else
1221 {
1222 /* This is the second thread of this process we see. */
1223 return true;
1224 }
1225 });
da6d8c04 1226
e4eb0dec 1227 return thread == NULL;
95954743
PA
1228}
1229
da84f473
PA
1230/* Kill LWP. */
1231
1232static void
1233linux_kill_one_lwp (struct lwp_info *lwp)
1234{
d86d4aaf
DE
1235 struct thread_info *thr = get_lwp_thread (lwp);
1236 int pid = lwpid_of (thr);
da84f473
PA
1237
1238 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1239 there is no signal context, and ptrace(PTRACE_KILL) (or
1240 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1241 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1242 alternative is to kill with SIGKILL. We only need one SIGKILL
1243 per process, not one for each thread. But since we still support
4a6ed09b
PA
1244 support debugging programs using raw clone without CLONE_THREAD,
1245 we send one for each thread. For years, we used PTRACE_KILL
1246 only, so we're being a bit paranoid about some old kernels where
1247 PTRACE_KILL might work better (dubious if there are any such, but
1248 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1249 second, and so we're fine everywhere. */
da84f473
PA
1250
1251 errno = 0;
69ff6be5 1252 kill_lwp (pid, SIGKILL);
da84f473 1253 if (debug_threads)
ce9e3fe7
PA
1254 {
1255 int save_errno = errno;
1256
1257 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1258 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1259 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1260 }
da84f473
PA
1261
1262 errno = 0;
b8e1b30e 1263 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1264 if (debug_threads)
ce9e3fe7
PA
1265 {
1266 int save_errno = errno;
1267
1268 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1269 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1270 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1271 }
da84f473
PA
1272}
1273
e76126e8
PA
1274/* Kill LWP and wait for it to die. */
1275
1276static void
1277kill_wait_lwp (struct lwp_info *lwp)
1278{
1279 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1280 int pid = ptid_of (thr).pid ();
e38504b3 1281 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1282 int wstat;
1283 int res;
1284
1285 if (debug_threads)
1286 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1287
1288 do
1289 {
1290 linux_kill_one_lwp (lwp);
1291
1292 /* Make sure it died. Notes:
1293
1294 - The loop is most likely unnecessary.
1295
d16f3f6c 1296 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1297 while we're iterating over them. We're not interested in
1298 any pending status at this point, only in making sure all
1299 wait status on the kernel side are collected until the
1300 process is reaped.
1301
1302 - We don't use __WALL here as the __WALL emulation relies on
1303 SIGCHLD, and killing a stopped process doesn't generate
1304 one, nor an exit status.
1305 */
1306 res = my_waitpid (lwpid, &wstat, 0);
1307 if (res == -1 && errno == ECHILD)
1308 res = my_waitpid (lwpid, &wstat, __WCLONE);
1309 } while (res > 0 && WIFSTOPPED (wstat));
1310
586b02a9
PA
1311 /* Even if it was stopped, the child may have already disappeared.
1312 E.g., if it was killed by SIGKILL. */
1313 if (res < 0 && errno != ECHILD)
1314 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1315}
1316
578290ec 1317/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1318 except the leader. */
95954743 1319
578290ec
SM
1320static void
1321kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1322{
54a0b537 1323 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1324
fd500816
DJ
1325 /* We avoid killing the first thread here, because of a Linux kernel (at
1326 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1327 the children get a chance to be reaped, it will remain a zombie
1328 forever. */
95954743 1329
d86d4aaf 1330 if (lwpid_of (thread) == pid)
95954743
PA
1331 {
1332 if (debug_threads)
87ce2a04 1333 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1334 target_pid_to_str (thread->id));
578290ec 1335 return;
95954743 1336 }
fd500816 1337
e76126e8 1338 kill_wait_lwp (lwp);
da6d8c04
DJ
1339}
1340
c6885a57
TBA
1341int
1342linux_process_target::kill (process_info *process)
0d62e5e8 1343{
a780ef4f 1344 int pid = process->pid;
9d606399 1345
f9e39928
PA
1346 /* If we're killing a running inferior, make sure it is stopped
1347 first, as PTRACE_KILL will not work otherwise. */
7984d532 1348 stop_all_lwps (0, NULL);
f9e39928 1349
578290ec
SM
1350 for_each_thread (pid, [&] (thread_info *thread)
1351 {
1352 kill_one_lwp_callback (thread, pid);
1353 });
fd500816 1354
54a0b537 1355 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1356 thread in the list, so do so now. */
a780ef4f 1357 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1358
784867a5 1359 if (lwp == NULL)
fd500816 1360 {
784867a5 1361 if (debug_threads)
d86d4aaf
DE
1362 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1363 pid);
784867a5
JK
1364 }
1365 else
e76126e8 1366 kill_wait_lwp (lwp);
2d717e4f 1367
8adb37b9 1368 mourn (process);
f9e39928
PA
1369
1370 /* Since we presently can only stop all lwps of all processes, we
1371 need to unstop lwps of other processes. */
7984d532 1372 unstop_all_lwps (0, NULL);
95954743 1373 return 0;
0d62e5e8
DJ
1374}
1375
9b224c5e
PA
1376/* Get pending signal of THREAD, for detaching purposes. This is the
1377 signal the thread last stopped for, which we need to deliver to the
1378 thread when detaching, otherwise, it'd be suppressed/lost. */
1379
1380static int
1381get_detach_signal (struct thread_info *thread)
1382{
c12a5089 1383 client_state &cs = get_client_state ();
a493e3e2 1384 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1385 int status;
1386 struct lwp_info *lp = get_thread_lwp (thread);
1387
1388 if (lp->status_pending_p)
1389 status = lp->status_pending;
1390 else
1391 {
1392 /* If the thread had been suspended by gdbserver, and it stopped
1393 cleanly, then it'll have stopped with SIGSTOP. But we don't
1394 want to deliver that SIGSTOP. */
1395 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1396 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1397 return 0;
1398
1399 /* Otherwise, we may need to deliver the signal we
1400 intercepted. */
1401 status = lp->last_status;
1402 }
1403
1404 if (!WIFSTOPPED (status))
1405 {
1406 if (debug_threads)
87ce2a04 1407 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1408 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1409 return 0;
1410 }
1411
1412 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1413 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1414 {
1415 if (debug_threads)
87ce2a04
DE
1416 debug_printf ("GPS: lwp %s had stopped with extended "
1417 "status: no pending signal\n",
d86d4aaf 1418 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1419 return 0;
1420 }
1421
2ea28649 1422 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1423
c12a5089 1424 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1425 {
1426 if (debug_threads)
87ce2a04 1427 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1428 target_pid_to_str (ptid_of (thread)),
87ce2a04 1429 gdb_signal_to_string (signo));
9b224c5e
PA
1430 return 0;
1431 }
c12a5089 1432 else if (!cs.program_signals_p
9b224c5e
PA
1433 /* If we have no way to know which signals GDB does not
1434 want to have passed to the program, assume
1435 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1436 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1437 {
1438 if (debug_threads)
87ce2a04
DE
1439 debug_printf ("GPS: lwp %s had signal %s, "
1440 "but we don't know if we should pass it. "
1441 "Default to not.\n",
d86d4aaf 1442 target_pid_to_str (ptid_of (thread)),
87ce2a04 1443 gdb_signal_to_string (signo));
9b224c5e
PA
1444 return 0;
1445 }
1446 else
1447 {
1448 if (debug_threads)
87ce2a04 1449 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1450 target_pid_to_str (ptid_of (thread)),
87ce2a04 1451 gdb_signal_to_string (signo));
9b224c5e
PA
1452
1453 return WSTOPSIG (status);
1454 }
1455}
1456
fd000fb3
TBA
1457void
1458linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1459{
ced2dffb 1460 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1461 int sig;
ced2dffb 1462 int lwpid;
6ad8ae5c 1463
9b224c5e 1464 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1465 if (lwp->stop_expected)
ae13219e 1466 {
9b224c5e 1467 if (debug_threads)
87ce2a04 1468 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1469 target_pid_to_str (ptid_of (thread)));
9b224c5e 1470
d86d4aaf 1471 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1472 lwp->stop_expected = 0;
ae13219e
DJ
1473 }
1474
9b224c5e
PA
1475 /* Pass on any pending signal for this thread. */
1476 sig = get_detach_signal (thread);
1477
ced2dffb
PA
1478 /* Preparing to resume may try to write registers, and fail if the
1479 lwp is zombie. If that happens, ignore the error. We'll handle
1480 it below, when detach fails with ESRCH. */
a70b8144 1481 try
ced2dffb
PA
1482 {
1483 /* Flush any pending changes to the process's registers. */
1484 regcache_invalidate_thread (thread);
1485
1486 /* Finally, let it resume. */
d7599cc0 1487 low_prepare_to_resume (lwp);
ced2dffb 1488 }
230d2906 1489 catch (const gdb_exception_error &ex)
ced2dffb
PA
1490 {
1491 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1492 throw;
ced2dffb 1493 }
ced2dffb
PA
1494
1495 lwpid = lwpid_of (thread);
1496 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1497 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1498 {
1499 int save_errno = errno;
1500
1501 /* We know the thread exists, so ESRCH must mean the lwp is
1502 zombie. This can happen if one of the already-detached
1503 threads exits the whole thread group. In that case we're
1504 still attached, and must reap the lwp. */
1505 if (save_errno == ESRCH)
1506 {
1507 int ret, status;
1508
1509 ret = my_waitpid (lwpid, &status, __WALL);
1510 if (ret == -1)
1511 {
1512 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1513 lwpid, safe_strerror (errno));
ced2dffb
PA
1514 }
1515 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1516 {
1517 warning (_("Reaping LWP %d while detaching "
1518 "returned unexpected status 0x%x"),
1519 lwpid, status);
1520 }
1521 }
1522 else
1523 {
1524 error (_("Can't detach %s: %s"),
1525 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1526 safe_strerror (save_errno));
ced2dffb
PA
1527 }
1528 }
1529 else if (debug_threads)
1530 {
1531 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1532 target_pid_to_str (ptid_of (thread)),
1533 strsignal (sig));
1534 }
bd99dc85
PA
1535
1536 delete_lwp (lwp);
ced2dffb
PA
1537}
1538
9061c9cf
TBA
1539int
1540linux_process_target::detach (process_info *process)
95954743 1541{
ced2dffb 1542 struct lwp_info *main_lwp;
95954743 1543
863d01bd
PA
1544 /* As there's a step over already in progress, let it finish first,
1545 otherwise nesting a stabilize_threads operation on top gets real
1546 messy. */
1547 complete_ongoing_step_over ();
1548
f9e39928 1549 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1550 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1551 may need to uninstall thread event breakpoints from memory, which
1552 only works with a stopped process anyway. */
7984d532 1553 stop_all_lwps (0, NULL);
f9e39928 1554
ca5c370d 1555#ifdef USE_THREAD_DB
8336d594 1556 thread_db_detach (process);
ca5c370d
PA
1557#endif
1558
fa593d66 1559 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1560 target_stabilize_threads ();
fa593d66 1561
ced2dffb
PA
1562 /* Detach from the clone lwps first. If the thread group exits just
1563 while we're detaching, we must reap the clone lwps before we're
1564 able to reap the leader. */
fd000fb3
TBA
1565 for_each_thread (process->pid, [this] (thread_info *thread)
1566 {
1567 /* We don't actually detach from the thread group leader just yet.
1568 If the thread group exits, we must reap the zombie clone lwps
1569 before we're able to reap the leader. */
1570 if (thread->id.pid () == thread->id.lwp ())
1571 return;
1572
1573 lwp_info *lwp = get_thread_lwp (thread);
1574 detach_one_lwp (lwp);
1575 });
ced2dffb 1576
ef2ddb33 1577 main_lwp = find_lwp_pid (ptid_t (process->pid));
fd000fb3 1578 detach_one_lwp (main_lwp);
8336d594 1579
8adb37b9 1580 mourn (process);
f9e39928
PA
1581
1582 /* Since we presently can only stop all lwps of all processes, we
1583 need to unstop lwps of other processes. */
7984d532 1584 unstop_all_lwps (0, NULL);
f9e39928
PA
1585 return 0;
1586}
1587
1588/* Remove all LWPs that belong to process PROC from the lwp list. */
1589
8adb37b9
TBA
1590void
1591linux_process_target::mourn (process_info *process)
8336d594
PA
1592{
1593 struct process_info_private *priv;
1594
1595#ifdef USE_THREAD_DB
1596 thread_db_mourn (process);
1597#endif
1598
fd000fb3 1599 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1600 {
1601 delete_lwp (get_thread_lwp (thread));
1602 });
f9e39928 1603
8336d594 1604 /* Freeing all private data. */
fe978cb0 1605 priv = process->priv;
fd000fb3 1606 low_delete_process (priv->arch_private);
8336d594 1607 free (priv);
fe978cb0 1608 process->priv = NULL;
505106cd
PA
1609
1610 remove_process (process);
8336d594
PA
1611}
1612
95a49a39
TBA
1613void
1614linux_process_target::join (int pid)
444d6139 1615{
444d6139
PA
1616 int status, ret;
1617
1618 do {
d105de22 1619 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1620 if (WIFEXITED (status) || WIFSIGNALED (status))
1621 break;
1622 } while (ret != -1 || errno != ECHILD);
1623}
1624
13d3d99b
TBA
1625/* Return true if the given thread is still alive. */
1626
1627bool
1628linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1629{
95954743
PA
1630 struct lwp_info *lwp = find_lwp_pid (ptid);
1631
1632 /* We assume we always know if a thread exits. If a whole process
1633 exited but we still haven't been able to report it to GDB, we'll
1634 hold on to the last lwp of the dead process. */
1635 if (lwp != NULL)
00db26fa 1636 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1637 else
1638 return 0;
1639}
1640
df95181f
TBA
1641bool
1642linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1643{
1644 struct lwp_info *lp = get_thread_lwp (thread);
1645
1646 if (!lp->status_pending_p)
1647 return 0;
1648
582511be 1649 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1650 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1651 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1652 {
1653 struct thread_info *saved_thread;
1654 CORE_ADDR pc;
1655 int discard = 0;
1656
1657 gdb_assert (lp->last_status != 0);
1658
1659 pc = get_pc (lp);
1660
1661 saved_thread = current_thread;
1662 current_thread = thread;
1663
1664 if (pc != lp->stop_pc)
1665 {
1666 if (debug_threads)
1667 debug_printf ("PC of %ld changed\n",
1668 lwpid_of (thread));
1669 discard = 1;
1670 }
3e572f71
PA
1671
1672#if !USE_SIGTRAP_SIGINFO
15c66dd6 1673 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1674 && !low_breakpoint_at (pc))
582511be
PA
1675 {
1676 if (debug_threads)
1677 debug_printf ("previous SW breakpoint of %ld gone\n",
1678 lwpid_of (thread));
1679 discard = 1;
1680 }
15c66dd6 1681 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1682 && !hardware_breakpoint_inserted_here (pc))
1683 {
1684 if (debug_threads)
1685 debug_printf ("previous HW breakpoint of %ld gone\n",
1686 lwpid_of (thread));
1687 discard = 1;
1688 }
3e572f71 1689#endif
582511be
PA
1690
1691 current_thread = saved_thread;
1692
1693 if (discard)
1694 {
1695 if (debug_threads)
1696 debug_printf ("discarding pending breakpoint status\n");
1697 lp->status_pending_p = 0;
1698 return 0;
1699 }
1700 }
1701
1702 return 1;
1703}
1704
a681f9c9
PA
1705/* Returns true if LWP is resumed from the client's perspective. */
1706
1707static int
1708lwp_resumed (struct lwp_info *lwp)
1709{
1710 struct thread_info *thread = get_lwp_thread (lwp);
1711
1712 if (thread->last_resume_kind != resume_stop)
1713 return 1;
1714
1715 /* Did gdb send us a `vCont;t', but we haven't reported the
1716 corresponding stop to gdb yet? If so, the thread is still
1717 resumed/running from gdb's perspective. */
1718 if (thread->last_resume_kind == resume_stop
1719 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1720 return 1;
1721
1722 return 0;
1723}
1724
df95181f
TBA
1725bool
1726linux_process_target::status_pending_p_callback (thread_info *thread,
1727 ptid_t ptid)
0d62e5e8 1728{
582511be 1729 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1730
1731 /* Check if we're only interested in events from a specific process
afa8d396 1732 or a specific LWP. */
83e1b6c1 1733 if (!thread->id.matches (ptid))
95954743 1734 return 0;
0d62e5e8 1735
a681f9c9
PA
1736 if (!lwp_resumed (lp))
1737 return 0;
1738
582511be 1739 if (lp->status_pending_p
df95181f 1740 && !thread_still_has_status_pending (thread))
582511be 1741 {
df95181f 1742 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1743 return 0;
1744 }
0d62e5e8 1745
582511be 1746 return lp->status_pending_p;
0d62e5e8
DJ
1747}
1748
95954743
PA
1749struct lwp_info *
1750find_lwp_pid (ptid_t ptid)
1751{
da4ae14a 1752 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1753 {
1754 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1755 return thr_arg->id.lwp () == lwp;
454296a2 1756 });
d86d4aaf
DE
1757
1758 if (thread == NULL)
1759 return NULL;
1760
9c80ecd6 1761 return get_thread_lwp (thread);
95954743
PA
1762}
1763
fa96cb38 1764/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1765
fa96cb38
PA
1766static int
1767num_lwps (int pid)
1768{
fa96cb38 1769 int count = 0;
0d62e5e8 1770
4d3bb80e
SM
1771 for_each_thread (pid, [&] (thread_info *thread)
1772 {
9c80ecd6 1773 count++;
4d3bb80e 1774 });
3aee8918 1775
fa96cb38
PA
1776 return count;
1777}
d61ddec4 1778
6d4ee8c6
GB
1779/* See nat/linux-nat.h. */
1780
1781struct lwp_info *
1782iterate_over_lwps (ptid_t filter,
d3a70e03 1783 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1784{
da4ae14a 1785 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1786 {
da4ae14a 1787 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1788
d3a70e03 1789 return callback (lwp);
6d1e5673 1790 });
6d4ee8c6 1791
9c80ecd6 1792 if (thread == NULL)
6d4ee8c6
GB
1793 return NULL;
1794
9c80ecd6 1795 return get_thread_lwp (thread);
6d4ee8c6
GB
1796}
1797
fd000fb3
TBA
1798void
1799linux_process_target::check_zombie_leaders ()
fa96cb38 1800{
fd000fb3 1801 for_each_process ([this] (process_info *proc) {
9179355e
SM
1802 pid_t leader_pid = pid_of (proc);
1803 struct lwp_info *leader_lp;
1804
f2907e49 1805 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1806
1807 if (debug_threads)
1808 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1809 "num_lwps=%d, zombie=%d\n",
1810 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1811 linux_proc_pid_is_zombie (leader_pid));
1812
1813 if (leader_lp != NULL && !leader_lp->stopped
1814 /* Check if there are other threads in the group, as we may
1815 have raced with the inferior simply exiting. */
1816 && !last_thread_of_process_p (leader_pid)
1817 && linux_proc_pid_is_zombie (leader_pid))
1818 {
1819 /* A leader zombie can mean one of two things:
1820
1821 - It exited, and there's an exit status pending
1822 available, or only the leader exited (not the whole
1823 program). In the latter case, we can't waitpid the
1824 leader's exit status until all other threads are gone.
1825
1826 - There are 3 or more threads in the group, and a thread
1827 other than the leader exec'd. On an exec, the Linux
1828 kernel destroys all other threads (except the execing
1829 one) in the thread group, and resets the execing thread's
1830 tid to the tgid. No exit notification is sent for the
1831 execing thread -- from the ptracer's perspective, it
1832 appears as though the execing thread just vanishes.
1833 Until we reap all other threads except the leader and the
1834 execing thread, the leader will be zombie, and the
1835 execing thread will be in `D (disc sleep)'. As soon as
1836 all other threads are reaped, the execing thread changes
1837 it's tid to the tgid, and the previous (zombie) leader
1838 vanishes, giving place to the "new" leader. We could try
1839 distinguishing the exit and exec cases, by waiting once
1840 more, and seeing if something comes out, but it doesn't
1841 sound useful. The previous leader _does_ go away, and
1842 we'll re-add the new one once we see the exec event
1843 (which is just the same as what would happen if the
1844 previous leader did exit voluntarily before some other
1845 thread execs). */
1846
1847 if (debug_threads)
1848 debug_printf ("CZL: Thread group leader %d zombie "
1849 "(it exited, or another thread execd).\n",
1850 leader_pid);
1851
1852 delete_lwp (leader_lp);
1853 }
1854 });
fa96cb38 1855}
c3adc08c 1856
a1385b7b
SM
1857/* Callback for `find_thread'. Returns the first LWP that is not
1858 stopped. */
d50171e4 1859
a1385b7b
SM
1860static bool
1861not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1862{
a1385b7b
SM
1863 if (!thread->id.matches (filter))
1864 return false;
47c0c975 1865
a1385b7b 1866 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1867
a1385b7b 1868 return !lwp->stopped;
0d62e5e8 1869}
611cb4a5 1870
863d01bd
PA
1871/* Increment LWP's suspend count. */
1872
1873static void
1874lwp_suspended_inc (struct lwp_info *lwp)
1875{
1876 lwp->suspended++;
1877
1878 if (debug_threads && lwp->suspended > 4)
1879 {
1880 struct thread_info *thread = get_lwp_thread (lwp);
1881
1882 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1883 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1884 }
1885}
1886
1887/* Decrement LWP's suspend count. */
1888
1889static void
1890lwp_suspended_decr (struct lwp_info *lwp)
1891{
1892 lwp->suspended--;
1893
1894 if (lwp->suspended < 0)
1895 {
1896 struct thread_info *thread = get_lwp_thread (lwp);
1897
1898 internal_error (__FILE__, __LINE__,
1899 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1900 lwp->suspended);
1901 }
1902}
1903
219f2f23
PA
1904/* This function should only be called if the LWP got a SIGTRAP.
1905
1906 Handle any tracepoint steps or hits. Return true if a tracepoint
1907 event was handled, 0 otherwise. */
1908
1909static int
1910handle_tracepoints (struct lwp_info *lwp)
1911{
1912 struct thread_info *tinfo = get_lwp_thread (lwp);
1913 int tpoint_related_event = 0;
1914
582511be
PA
1915 gdb_assert (lwp->suspended == 0);
1916
7984d532
PA
1917 /* If this tracepoint hit causes a tracing stop, we'll immediately
1918 uninsert tracepoints. To do this, we temporarily pause all
1919 threads, unpatch away, and then unpause threads. We need to make
1920 sure the unpausing doesn't resume LWP too. */
863d01bd 1921 lwp_suspended_inc (lwp);
7984d532 1922
219f2f23
PA
1923 /* And we need to be sure that any all-threads-stopping doesn't try
1924 to move threads out of the jump pads, as it could deadlock the
1925 inferior (LWP could be in the jump pad, maybe even holding the
1926 lock.) */
1927
1928 /* Do any necessary step collect actions. */
1929 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1930
fa593d66
PA
1931 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1932
219f2f23
PA
1933 /* See if we just hit a tracepoint and do its main collect
1934 actions. */
1935 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1936
863d01bd 1937 lwp_suspended_decr (lwp);
7984d532
PA
1938
1939 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1940 gdb_assert (!stabilizing_threads
1941 || (lwp->collecting_fast_tracepoint
1942 != fast_tpoint_collect_result::not_collecting));
7984d532 1943
219f2f23
PA
1944 if (tpoint_related_event)
1945 {
1946 if (debug_threads)
87ce2a04 1947 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1948 return 1;
1949 }
1950
1951 return 0;
1952}
1953
13e567af
TBA
1954fast_tpoint_collect_result
1955linux_process_target::linux_fast_tracepoint_collecting
1956 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1957{
1958 CORE_ADDR thread_area;
d86d4aaf 1959 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1960
fa593d66
PA
1961 /* Get the thread area address. This is used to recognize which
1962 thread is which when tracing with the in-process agent library.
1963 We don't read anything from the address, and treat it as opaque;
1964 it's the address itself that we assume is unique per-thread. */
13e567af 1965 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1966 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1967
1968 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1969}
1970
13e567af
TBA
1971int
1972linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1973{
1974 return -1;
1975}
1976
d16f3f6c
TBA
1977bool
1978linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 1979{
0bfdf32f 1980 struct thread_info *saved_thread;
fa593d66 1981
0bfdf32f
GB
1982 saved_thread = current_thread;
1983 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1984
1985 if ((wstat == NULL
1986 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1987 && supports_fast_tracepoints ()
58b4daa5 1988 && agent_loaded_p ())
fa593d66
PA
1989 {
1990 struct fast_tpoint_collect_status status;
fa593d66
PA
1991
1992 if (debug_threads)
87ce2a04
DE
1993 debug_printf ("Checking whether LWP %ld needs to move out of the "
1994 "jump pad.\n",
0bfdf32f 1995 lwpid_of (current_thread));
fa593d66 1996
229d26fc
SM
1997 fast_tpoint_collect_result r
1998 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
1999
2000 if (wstat == NULL
2001 || (WSTOPSIG (*wstat) != SIGILL
2002 && WSTOPSIG (*wstat) != SIGFPE
2003 && WSTOPSIG (*wstat) != SIGSEGV
2004 && WSTOPSIG (*wstat) != SIGBUS))
2005 {
2006 lwp->collecting_fast_tracepoint = r;
2007
229d26fc 2008 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2009 {
229d26fc
SM
2010 if (r == fast_tpoint_collect_result::before_insn
2011 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2012 {
2013 /* Haven't executed the original instruction yet.
2014 Set breakpoint there, and wait till it's hit,
2015 then single-step until exiting the jump pad. */
2016 lwp->exit_jump_pad_bkpt
2017 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2018 }
2019
2020 if (debug_threads)
87ce2a04
DE
2021 debug_printf ("Checking whether LWP %ld needs to move out of "
2022 "the jump pad...it does\n",
0bfdf32f
GB
2023 lwpid_of (current_thread));
2024 current_thread = saved_thread;
fa593d66 2025
d16f3f6c 2026 return true;
fa593d66
PA
2027 }
2028 }
2029 else
2030 {
2031 /* If we get a synchronous signal while collecting, *and*
2032 while executing the (relocated) original instruction,
2033 reset the PC to point at the tpoint address, before
2034 reporting to GDB. Otherwise, it's an IPA lib bug: just
2035 report the signal to GDB, and pray for the best. */
2036
229d26fc
SM
2037 lwp->collecting_fast_tracepoint
2038 = fast_tpoint_collect_result::not_collecting;
fa593d66 2039
229d26fc 2040 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2041 && (status.adjusted_insn_addr <= lwp->stop_pc
2042 && lwp->stop_pc < status.adjusted_insn_addr_end))
2043 {
2044 siginfo_t info;
2045 struct regcache *regcache;
2046
2047 /* The si_addr on a few signals references the address
2048 of the faulting instruction. Adjust that as
2049 well. */
2050 if ((WSTOPSIG (*wstat) == SIGILL
2051 || WSTOPSIG (*wstat) == SIGFPE
2052 || WSTOPSIG (*wstat) == SIGBUS
2053 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2054 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2055 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2056 /* Final check just to make sure we don't clobber
2057 the siginfo of non-kernel-sent signals. */
2058 && (uintptr_t) info.si_addr == lwp->stop_pc)
2059 {
2060 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2061 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2062 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2063 }
2064
0bfdf32f 2065 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2066 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2067 lwp->stop_pc = status.tpoint_addr;
2068
2069 /* Cancel any fast tracepoint lock this thread was
2070 holding. */
2071 force_unlock_trace_buffer ();
2072 }
2073
2074 if (lwp->exit_jump_pad_bkpt != NULL)
2075 {
2076 if (debug_threads)
87ce2a04
DE
2077 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2078 "stopping all threads momentarily.\n");
fa593d66
PA
2079
2080 stop_all_lwps (1, lwp);
fa593d66
PA
2081
2082 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2083 lwp->exit_jump_pad_bkpt = NULL;
2084
2085 unstop_all_lwps (1, lwp);
2086
2087 gdb_assert (lwp->suspended >= 0);
2088 }
2089 }
2090 }
2091
2092 if (debug_threads)
87ce2a04
DE
2093 debug_printf ("Checking whether LWP %ld needs to move out of the "
2094 "jump pad...no\n",
0bfdf32f 2095 lwpid_of (current_thread));
0cccb683 2096
0bfdf32f 2097 current_thread = saved_thread;
d16f3f6c 2098 return false;
fa593d66
PA
2099}
2100
2101/* Enqueue one signal in the "signals to report later when out of the
2102 jump pad" list. */
2103
2104static void
2105enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2106{
d86d4aaf 2107 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2108
2109 if (debug_threads)
87ce2a04 2110 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2111 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2112
2113 if (debug_threads)
2114 {
013e3554 2115 for (const auto &sig : lwp->pending_signals_to_report)
87ce2a04 2116 debug_printf (" Already queued %d\n",
013e3554 2117 sig.signal);
fa593d66 2118
87ce2a04 2119 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2120 }
2121
1a981360
PA
2122 /* Don't enqueue non-RT signals if they are already in the deferred
2123 queue. (SIGSTOP being the easiest signal to see ending up here
2124 twice) */
2125 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2126 {
013e3554 2127 for (const auto &sig : lwp->pending_signals_to_report)
1a981360 2128 {
013e3554 2129 if (sig.signal == WSTOPSIG (*wstat))
1a981360
PA
2130 {
2131 if (debug_threads)
87ce2a04
DE
2132 debug_printf ("Not requeuing already queued non-RT signal %d"
2133 " for LWP %ld\n",
013e3554 2134 sig.signal,
d86d4aaf 2135 lwpid_of (thread));
1a981360
PA
2136 return;
2137 }
2138 }
2139 }
2140
013e3554 2141 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
8d749320 2142
d86d4aaf 2143 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 2144 &lwp->pending_signals_to_report.back ().info);
fa593d66
PA
2145}
2146
2147/* Dequeue one signal from the "signals to report later when out of
2148 the jump pad" list. */
2149
2150static int
2151dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2152{
d86d4aaf
DE
2153 struct thread_info *thread = get_lwp_thread (lwp);
2154
013e3554 2155 if (!lwp->pending_signals_to_report.empty ())
fa593d66 2156 {
013e3554 2157 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
fa593d66 2158
013e3554
TBA
2159 *wstat = W_STOPCODE (p_sig.signal);
2160 if (p_sig.info.si_signo != 0)
d86d4aaf 2161 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554
TBA
2162 &p_sig.info);
2163
2164 lwp->pending_signals_to_report.pop_front ();
fa593d66
PA
2165
2166 if (debug_threads)
87ce2a04 2167 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2168 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2169
2170 if (debug_threads)
2171 {
013e3554 2172 for (const auto &sig : lwp->pending_signals_to_report)
87ce2a04 2173 debug_printf (" Still queued %d\n",
013e3554 2174 sig.signal);
fa593d66 2175
87ce2a04 2176 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2177 }
2178
2179 return 1;
2180 }
2181
2182 return 0;
2183}
2184
ac1bbaca
TBA
2185bool
2186linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2187{
ac1bbaca
TBA
2188 struct thread_info *saved_thread = current_thread;
2189 current_thread = get_lwp_thread (child);
d50171e4 2190
ac1bbaca
TBA
2191 if (low_stopped_by_watchpoint ())
2192 {
2193 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2194 child->stopped_data_address = low_stopped_data_address ();
2195 }
582511be 2196
ac1bbaca 2197 current_thread = saved_thread;
582511be 2198
ac1bbaca
TBA
2199 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2200}
d50171e4 2201
ac1bbaca
TBA
2202bool
2203linux_process_target::low_stopped_by_watchpoint ()
2204{
2205 return false;
2206}
d50171e4 2207
ac1bbaca
TBA
2208CORE_ADDR
2209linux_process_target::low_stopped_data_address ()
2210{
2211 return 0;
c4d9ceb6
YQ
2212}
2213
de0d863e
DB
2214/* Return the ptrace options that we want to try to enable. */
2215
2216static int
2217linux_low_ptrace_options (int attached)
2218{
c12a5089 2219 client_state &cs = get_client_state ();
de0d863e
DB
2220 int options = 0;
2221
2222 if (!attached)
2223 options |= PTRACE_O_EXITKILL;
2224
c12a5089 2225 if (cs.report_fork_events)
de0d863e
DB
2226 options |= PTRACE_O_TRACEFORK;
2227
c12a5089 2228 if (cs.report_vfork_events)
c269dbdb
DB
2229 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2230
c12a5089 2231 if (cs.report_exec_events)
94585166
DB
2232 options |= PTRACE_O_TRACEEXEC;
2233
82075af2
JS
2234 options |= PTRACE_O_TRACESYSGOOD;
2235
de0d863e
DB
2236 return options;
2237}
2238
1a48f002 2239void
d16f3f6c 2240linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2241{
c12a5089 2242 client_state &cs = get_client_state ();
fa96cb38
PA
2243 struct lwp_info *child;
2244 struct thread_info *thread;
582511be 2245 int have_stop_pc = 0;
fa96cb38 2246
f2907e49 2247 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2248
94585166
DB
2249 /* Check for stop events reported by a process we didn't already
2250 know about - anything not already in our LWP list.
2251
2252 If we're expecting to receive stopped processes after
2253 fork, vfork, and clone events, then we'll just add the
2254 new one to our list and go back to waiting for the event
2255 to be reported - the stopped process might be returned
2256 from waitpid before or after the event is.
2257
2258 But note the case of a non-leader thread exec'ing after the
2259 leader having exited, and gone from our lists (because
2260 check_zombie_leaders deleted it). The non-leader thread
2261 changes its tid to the tgid. */
2262
2263 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2264 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2265 {
2266 ptid_t child_ptid;
2267
2268 /* A multi-thread exec after we had seen the leader exiting. */
2269 if (debug_threads)
2270 {
2271 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2272 "after exec.\n", lwpid);
2273 }
2274
fd79271b 2275 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2276 child = add_lwp (child_ptid);
2277 child->stopped = 1;
2278 current_thread = child->thread;
2279 }
2280
fa96cb38
PA
2281 /* If we didn't find a process, one of two things presumably happened:
2282 - A process we started and then detached from has exited. Ignore it.
2283 - A process we are controlling has forked and the new child's stop
2284 was reported to us by the kernel. Save its PID. */
2285 if (child == NULL && WIFSTOPPED (wstat))
2286 {
2287 add_to_pid_list (&stopped_pids, lwpid, wstat);
1a48f002 2288 return;
fa96cb38
PA
2289 }
2290 else if (child == NULL)
1a48f002 2291 return;
fa96cb38
PA
2292
2293 thread = get_lwp_thread (child);
2294
2295 child->stopped = 1;
2296
2297 child->last_status = wstat;
2298
582511be
PA
2299 /* Check if the thread has exited. */
2300 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2301 {
2302 if (debug_threads)
2303 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2304
2305 if (finish_step_over (child))
2306 {
2307 /* Unsuspend all other LWPs, and set them back running again. */
2308 unsuspend_all_lwps (child);
2309 }
2310
65706a29
PA
2311 /* If there is at least one more LWP, then the exit signal was
2312 not the end of the debugged application and should be
2313 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2314 if (cs.report_thread_events
65706a29 2315 || last_thread_of_process_p (pid_of (thread)))
582511be 2316 {
65706a29
PA
2317 /* Since events are serialized to GDB core, and we can't
2318 report this one right now. Leave the status pending for
2319 the next time we're able to report it. */
2320 mark_lwp_dead (child, wstat);
1a48f002 2321 return;
582511be
PA
2322 }
2323 else
2324 {
65706a29 2325 delete_lwp (child);
1a48f002 2326 return;
582511be
PA
2327 }
2328 }
2329
2330 gdb_assert (WIFSTOPPED (wstat));
2331
fa96cb38
PA
2332 if (WIFSTOPPED (wstat))
2333 {
2334 struct process_info *proc;
2335
c06cbd92 2336 /* Architecture-specific setup after inferior is running. */
fa96cb38 2337 proc = find_process_pid (pid_of (thread));
c06cbd92 2338 if (proc->tdesc == NULL)
fa96cb38 2339 {
c06cbd92
YQ
2340 if (proc->attached)
2341 {
c06cbd92
YQ
2342 /* This needs to happen after we have attached to the
2343 inferior and it is stopped for the first time, but
2344 before we access any inferior registers. */
797bcff5 2345 arch_setup_thread (thread);
c06cbd92
YQ
2346 }
2347 else
2348 {
2349 /* The process is started, but GDBserver will do
2350 architecture-specific setup after the program stops at
2351 the first instruction. */
2352 child->status_pending_p = 1;
2353 child->status_pending = wstat;
1a48f002 2354 return;
c06cbd92 2355 }
fa96cb38
PA
2356 }
2357 }
2358
fa96cb38
PA
2359 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2360 {
beed38b8 2361 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2362 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2363
de0d863e 2364 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2365 child->must_set_ptrace_flags = 0;
2366 }
2367
82075af2
JS
2368 /* Always update syscall_state, even if it will be filtered later. */
2369 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2370 {
2371 child->syscall_state
2372 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2373 ? TARGET_WAITKIND_SYSCALL_RETURN
2374 : TARGET_WAITKIND_SYSCALL_ENTRY);
2375 }
2376 else
2377 {
2378 /* Almost all other ptrace-stops are known to be outside of system
2379 calls, with further exceptions in handle_extended_wait. */
2380 child->syscall_state = TARGET_WAITKIND_IGNORE;
2381 }
2382
e7ad2f14
PA
2383 /* Be careful to not overwrite stop_pc until save_stop_reason is
2384 called. */
fa96cb38 2385 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2386 && linux_is_extended_waitstatus (wstat))
fa96cb38 2387 {
582511be 2388 child->stop_pc = get_pc (child);
94585166 2389 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2390 {
2391 /* The event has been handled, so just return without
2392 reporting it. */
1a48f002 2393 return;
de0d863e 2394 }
fa96cb38
PA
2395 }
2396
80aea927 2397 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2398 {
e7ad2f14 2399 if (save_stop_reason (child))
582511be
PA
2400 have_stop_pc = 1;
2401 }
2402
2403 if (!have_stop_pc)
2404 child->stop_pc = get_pc (child);
2405
fa96cb38
PA
2406 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2407 && child->stop_expected)
2408 {
2409 if (debug_threads)
2410 debug_printf ("Expected stop.\n");
2411 child->stop_expected = 0;
2412
2413 if (thread->last_resume_kind == resume_stop)
2414 {
2415 /* We want to report the stop to the core. Treat the
2416 SIGSTOP as a normal event. */
2bf6fb9d
PA
2417 if (debug_threads)
2418 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2419 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2420 }
2421 else if (stopping_threads != NOT_STOPPING_THREADS)
2422 {
2423 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2424 pending. */
2bf6fb9d
PA
2425 if (debug_threads)
2426 debug_printf ("LLW: SIGSTOP caught for %s "
2427 "while stopping threads.\n",
2428 target_pid_to_str (ptid_of (thread)));
1a48f002 2429 return;
fa96cb38
PA
2430 }
2431 else
2432 {
2bf6fb9d
PA
2433 /* This is a delayed SIGSTOP. Filter out the event. */
2434 if (debug_threads)
2435 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2436 child->stepping ? "step" : "continue",
2437 target_pid_to_str (ptid_of (thread)));
2438
df95181f 2439 resume_one_lwp (child, child->stepping, 0, NULL);
1a48f002 2440 return;
fa96cb38
PA
2441 }
2442 }
2443
582511be
PA
2444 child->status_pending_p = 1;
2445 child->status_pending = wstat;
1a48f002 2446 return;
fa96cb38
PA
2447}
2448
b31cdfa6
TBA
2449bool
2450linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2451{
b31cdfa6
TBA
2452 if (supports_hardware_single_step ())
2453 return true;
f79b145d
YQ
2454 else
2455 {
3b9a79ef 2456 /* GDBserver must insert single-step breakpoint for software
f79b145d 2457 single step. */
3b9a79ef 2458 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2459 return false;
f79b145d
YQ
2460 }
2461}
2462
df95181f
TBA
2463void
2464linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2465{
20ba1ce6
PA
2466 struct lwp_info *lp = get_thread_lwp (thread);
2467
2468 if (lp->stopped
863d01bd 2469 && !lp->suspended
20ba1ce6 2470 && !lp->status_pending_p
20ba1ce6
PA
2471 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2472 {
8901d193
YQ
2473 int step = 0;
2474
2475 if (thread->last_resume_kind == resume_step)
2476 step = maybe_hw_step (thread);
20ba1ce6
PA
2477
2478 if (debug_threads)
2479 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2480 target_pid_to_str (ptid_of (thread)),
2481 paddress (lp->stop_pc),
2482 step);
2483
df95181f 2484 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2485 }
2486}
2487
d16f3f6c
TBA
2488int
2489linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2490 ptid_t filter_ptid,
2491 int *wstatp, int options)
0d62e5e8 2492{
d86d4aaf 2493 struct thread_info *event_thread;
d50171e4 2494 struct lwp_info *event_child, *requested_child;
fa96cb38 2495 sigset_t block_mask, prev_mask;
d50171e4 2496
fa96cb38 2497 retry:
d86d4aaf
DE
2498 /* N.B. event_thread points to the thread_info struct that contains
2499 event_child. Keep them in sync. */
2500 event_thread = NULL;
d50171e4
PA
2501 event_child = NULL;
2502 requested_child = NULL;
0d62e5e8 2503
95954743 2504 /* Check for a lwp with a pending status. */
bd99dc85 2505
d7e15655 2506 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2507 {
83e1b6c1
SM
2508 event_thread = find_thread_in_random ([&] (thread_info *thread)
2509 {
2510 return status_pending_p_callback (thread, filter_ptid);
2511 });
2512
d86d4aaf
DE
2513 if (event_thread != NULL)
2514 event_child = get_thread_lwp (event_thread);
2515 if (debug_threads && event_thread)
2516 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2517 }
d7e15655 2518 else if (filter_ptid != null_ptid)
0d62e5e8 2519 {
fa96cb38 2520 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2521
bde24c0a 2522 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2523 && requested_child->status_pending_p
229d26fc
SM
2524 && (requested_child->collecting_fast_tracepoint
2525 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2526 {
2527 enqueue_one_deferred_signal (requested_child,
2528 &requested_child->status_pending);
2529 requested_child->status_pending_p = 0;
2530 requested_child->status_pending = 0;
df95181f 2531 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2532 }
2533
2534 if (requested_child->suspended
2535 && requested_child->status_pending_p)
38e08fca
GB
2536 {
2537 internal_error (__FILE__, __LINE__,
2538 "requesting an event out of a"
2539 " suspended child?");
2540 }
fa593d66 2541
d50171e4 2542 if (requested_child->status_pending_p)
d86d4aaf
DE
2543 {
2544 event_child = requested_child;
2545 event_thread = get_lwp_thread (event_child);
2546 }
0d62e5e8 2547 }
611cb4a5 2548
0d62e5e8
DJ
2549 if (event_child != NULL)
2550 {
bd99dc85 2551 if (debug_threads)
87ce2a04 2552 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2553 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2554 *wstatp = event_child->status_pending;
bd99dc85
PA
2555 event_child->status_pending_p = 0;
2556 event_child->status_pending = 0;
0bfdf32f 2557 current_thread = event_thread;
d86d4aaf 2558 return lwpid_of (event_thread);
0d62e5e8
DJ
2559 }
2560
fa96cb38
PA
2561 /* But if we don't find a pending event, we'll have to wait.
2562
2563 We only enter this loop if no process has a pending wait status.
2564 Thus any action taken in response to a wait status inside this
2565 loop is responding as soon as we detect the status, not after any
2566 pending events. */
d8301ad1 2567
fa96cb38
PA
2568 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2569 all signals while here. */
2570 sigfillset (&block_mask);
21987b9c 2571 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2572
582511be
PA
2573 /* Always pull all events out of the kernel. We'll randomly select
2574 an event LWP out of all that have events, to prevent
2575 starvation. */
fa96cb38 2576 while (event_child == NULL)
0d62e5e8 2577 {
fa96cb38 2578 pid_t ret = 0;
0d62e5e8 2579
fa96cb38
PA
2580 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2581 quirks:
0d62e5e8 2582
fa96cb38
PA
2583 - If the thread group leader exits while other threads in the
2584 thread group still exist, waitpid(TGID, ...) hangs. That
2585 waitpid won't return an exit status until the other threads
2586 in the group are reaped.
611cb4a5 2587
fa96cb38
PA
2588 - When a non-leader thread execs, that thread just vanishes
2589 without reporting an exit (so we'd hang if we waited for it
2590 explicitly in that case). The exec event is reported to
94585166 2591 the TGID pid. */
fa96cb38
PA
2592 errno = 0;
2593 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2594
fa96cb38
PA
2595 if (debug_threads)
2596 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2597 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2598
fa96cb38 2599 if (ret > 0)
0d62e5e8 2600 {
89be2091 2601 if (debug_threads)
bd99dc85 2602 {
fa96cb38 2603 debug_printf ("LLW: waitpid %ld received %s\n",
8d06918f 2604 (long) ret, status_to_str (*wstatp).c_str ());
bd99dc85 2605 }
89be2091 2606
582511be
PA
2607 /* Filter all events. IOW, leave all events pending. We'll
2608 randomly select an event LWP out of all that have events
2609 below. */
d16f3f6c 2610 filter_event (ret, *wstatp);
fa96cb38
PA
2611 /* Retry until nothing comes out of waitpid. A single
2612 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2613 continue;
2614 }
2615
20ba1ce6
PA
2616 /* Now that we've pulled all events out of the kernel, resume
2617 LWPs that don't have an interesting event to report. */
2618 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2619 for_each_thread ([this] (thread_info *thread)
2620 {
2621 resume_stopped_resumed_lwps (thread);
2622 });
20ba1ce6
PA
2623
2624 /* ... and find an LWP with a status to report to the core, if
2625 any. */
83e1b6c1
SM
2626 event_thread = find_thread_in_random ([&] (thread_info *thread)
2627 {
2628 return status_pending_p_callback (thread, filter_ptid);
2629 });
2630
582511be
PA
2631 if (event_thread != NULL)
2632 {
2633 event_child = get_thread_lwp (event_thread);
2634 *wstatp = event_child->status_pending;
2635 event_child->status_pending_p = 0;
2636 event_child->status_pending = 0;
2637 break;
2638 }
2639
fa96cb38
PA
2640 /* Check for zombie thread group leaders. Those can't be reaped
2641 until all other threads in the thread group are. */
2642 check_zombie_leaders ();
2643
a1385b7b
SM
2644 auto not_stopped = [&] (thread_info *thread)
2645 {
2646 return not_stopped_callback (thread, wait_ptid);
2647 };
2648
fa96cb38
PA
2649 /* If there are no resumed children left in the set of LWPs we
2650 want to wait for, bail. We can't just block in
2651 waitpid/sigsuspend, because lwps might have been left stopped
2652 in trace-stop state, and we'd be stuck forever waiting for
2653 their status to change (which would only happen if we resumed
2654 them). Even if WNOHANG is set, this return code is preferred
2655 over 0 (below), as it is more detailed. */
a1385b7b 2656 if (find_thread (not_stopped) == NULL)
a6dbe5df 2657 {
fa96cb38
PA
2658 if (debug_threads)
2659 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2660 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2661 return -1;
a6dbe5df
PA
2662 }
2663
fa96cb38
PA
2664 /* No interesting event to report to the caller. */
2665 if ((options & WNOHANG))
24a09b5f 2666 {
fa96cb38
PA
2667 if (debug_threads)
2668 debug_printf ("WNOHANG set, no event found\n");
2669
21987b9c 2670 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2671 return 0;
24a09b5f
DJ
2672 }
2673
fa96cb38
PA
2674 /* Block until we get an event reported with SIGCHLD. */
2675 if (debug_threads)
2676 debug_printf ("sigsuspend'ing\n");
d50171e4 2677
fa96cb38 2678 sigsuspend (&prev_mask);
21987b9c 2679 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2680 goto retry;
2681 }
d50171e4 2682
21987b9c 2683 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2684
0bfdf32f 2685 current_thread = event_thread;
d50171e4 2686
fa96cb38
PA
2687 return lwpid_of (event_thread);
2688}
2689
d16f3f6c
TBA
2690int
2691linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2692{
d16f3f6c 2693 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2694}
2695
6bf5e0ba
PA
2696/* Select one LWP out of those that have events pending. */
2697
2698static void
2699select_event_lwp (struct lwp_info **orig_lp)
2700{
582511be
PA
2701 struct thread_info *event_thread = NULL;
2702
2703 /* In all-stop, give preference to the LWP that is being
2704 single-stepped. There will be at most one, and it's the LWP that
2705 the core is most interested in. If we didn't do this, then we'd
2706 have to handle pending step SIGTRAPs somehow in case the core
2707 later continues the previously-stepped thread, otherwise we'd
2708 report the pending SIGTRAP, and the core, not having stepped the
2709 thread, wouldn't understand what the trap was for, and therefore
2710 would report it to the user as a random signal. */
2711 if (!non_stop)
6bf5e0ba 2712 {
39a64da5
SM
2713 event_thread = find_thread ([] (thread_info *thread)
2714 {
2715 lwp_info *lp = get_thread_lwp (thread);
2716
2717 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2718 && thread->last_resume_kind == resume_step
2719 && lp->status_pending_p);
2720 });
2721
582511be
PA
2722 if (event_thread != NULL)
2723 {
2724 if (debug_threads)
2725 debug_printf ("SEL: Select single-step %s\n",
2726 target_pid_to_str (ptid_of (event_thread)));
2727 }
6bf5e0ba 2728 }
582511be 2729 if (event_thread == NULL)
6bf5e0ba
PA
2730 {
2731 /* No single-stepping LWP. Select one at random, out of those
dda83cd7 2732 which have had events. */
6bf5e0ba 2733
b0319eaa 2734 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2735 {
2736 lwp_info *lp = get_thread_lwp (thread);
2737
b0319eaa
TT
2738 /* Only resumed LWPs that have an event pending. */
2739 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2740 && lp->status_pending_p);
39a64da5 2741 });
6bf5e0ba
PA
2742 }
2743
d86d4aaf 2744 if (event_thread != NULL)
6bf5e0ba 2745 {
d86d4aaf
DE
2746 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2747
6bf5e0ba
PA
2748 /* Switch the event LWP. */
2749 *orig_lp = event_lp;
2750 }
2751}
2752
7984d532
PA
2753/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2754 NULL. */
2755
2756static void
2757unsuspend_all_lwps (struct lwp_info *except)
2758{
139720c5
SM
2759 for_each_thread ([&] (thread_info *thread)
2760 {
2761 lwp_info *lwp = get_thread_lwp (thread);
2762
2763 if (lwp != except)
2764 lwp_suspended_decr (lwp);
2765 });
7984d532
PA
2766}
2767
5a6b0a41 2768static bool lwp_running (thread_info *thread);
fa593d66
PA
2769
2770/* Stabilize threads (move out of jump pads).
2771
2772 If a thread is midway collecting a fast tracepoint, we need to
2773 finish the collection and move it out of the jump pad before
2774 reporting the signal.
2775
2776 This avoids recursion while collecting (when a signal arrives
2777 midway, and the signal handler itself collects), which would trash
2778 the trace buffer. In case the user set a breakpoint in a signal
2779 handler, this avoids the backtrace showing the jump pad, etc..
2780 Most importantly, there are certain things we can't do safely if
2781 threads are stopped in a jump pad (or in its callee's). For
2782 example:
2783
2784 - starting a new trace run. A thread still collecting the
2785 previous run, could trash the trace buffer when resumed. The trace
2786 buffer control structures would have been reset but the thread had
2787 no way to tell. The thread could even midway memcpy'ing to the
2788 buffer, which would mean that when resumed, it would clobber the
2789 trace buffer that had been set for a new run.
2790
2791 - we can't rewrite/reuse the jump pads for new tracepoints
2792 safely. Say you do tstart while a thread is stopped midway while
2793 collecting. When the thread is later resumed, it finishes the
2794 collection, and returns to the jump pad, to execute the original
2795 instruction that was under the tracepoint jump at the time the
2796 older run had been started. If the jump pad had been rewritten
2797 since for something else in the new run, the thread would now
2798 execute the wrong / random instructions. */
2799
5c9eb2f2
TBA
2800void
2801linux_process_target::stabilize_threads ()
fa593d66 2802{
13e567af
TBA
2803 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2804 {
2805 return stuck_in_jump_pad (thread);
2806 });
fa593d66 2807
d86d4aaf 2808 if (thread_stuck != NULL)
fa593d66 2809 {
b4d51a55 2810 if (debug_threads)
87ce2a04 2811 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2812 lwpid_of (thread_stuck));
fa593d66
PA
2813 return;
2814 }
2815
fcb056a5 2816 thread_info *saved_thread = current_thread;
fa593d66
PA
2817
2818 stabilizing_threads = 1;
2819
2820 /* Kick 'em all. */
d16f3f6c
TBA
2821 for_each_thread ([this] (thread_info *thread)
2822 {
2823 move_out_of_jump_pad (thread);
2824 });
fa593d66
PA
2825
2826 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2827 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2828 {
2829 struct target_waitstatus ourstatus;
2830 struct lwp_info *lwp;
fa593d66
PA
2831 int wstat;
2832
2833 /* Note that we go through the full wait even loop. While
2834 moving threads out of jump pad, we need to be able to step
2835 over internal breakpoints and such. */
d16f3f6c 2836 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2837
2838 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2839 {
0bfdf32f 2840 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2841
2842 /* Lock it. */
863d01bd 2843 lwp_suspended_inc (lwp);
fa593d66 2844
a493e3e2 2845 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2846 || current_thread->last_resume_kind == resume_stop)
fa593d66 2847 {
2ea28649 2848 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2849 enqueue_one_deferred_signal (lwp, &wstat);
2850 }
2851 }
2852 }
2853
fcdad592 2854 unsuspend_all_lwps (NULL);
fa593d66
PA
2855
2856 stabilizing_threads = 0;
2857
0bfdf32f 2858 current_thread = saved_thread;
fa593d66 2859
b4d51a55 2860 if (debug_threads)
fa593d66 2861 {
13e567af
TBA
2862 thread_stuck = find_thread ([this] (thread_info *thread)
2863 {
2864 return stuck_in_jump_pad (thread);
2865 });
fcb056a5 2866
d86d4aaf 2867 if (thread_stuck != NULL)
87ce2a04 2868 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2869 lwpid_of (thread_stuck));
fa593d66
PA
2870 }
2871}
2872
582511be
PA
2873/* Convenience function that is called when the kernel reports an
2874 event that is not passed out to GDB. */
2875
2876static ptid_t
2877ignore_event (struct target_waitstatus *ourstatus)
2878{
2879 /* If we got an event, there may still be others, as a single
2880 SIGCHLD can indicate more than one child stopped. This forces
2881 another target_wait call. */
2882 async_file_mark ();
2883
2884 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2885 return null_ptid;
2886}
2887
fd000fb3
TBA
2888ptid_t
2889linux_process_target::filter_exit_event (lwp_info *event_child,
2890 target_waitstatus *ourstatus)
65706a29 2891{
c12a5089 2892 client_state &cs = get_client_state ();
65706a29
PA
2893 struct thread_info *thread = get_lwp_thread (event_child);
2894 ptid_t ptid = ptid_of (thread);
2895
2896 if (!last_thread_of_process_p (pid_of (thread)))
2897 {
c12a5089 2898 if (cs.report_thread_events)
65706a29
PA
2899 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2900 else
2901 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2902
2903 delete_lwp (event_child);
2904 }
2905 return ptid;
2906}
2907
82075af2
JS
2908/* Returns 1 if GDB is interested in any event_child syscalls. */
2909
2910static int
2911gdb_catching_syscalls_p (struct lwp_info *event_child)
2912{
2913 struct thread_info *thread = get_lwp_thread (event_child);
2914 struct process_info *proc = get_thread_process (thread);
2915
f27866ba 2916 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2917}
2918
9eedd27d
TBA
2919bool
2920linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2921{
4cc32bec 2922 int sysno;
82075af2
JS
2923 struct thread_info *thread = get_lwp_thread (event_child);
2924 struct process_info *proc = get_thread_process (thread);
2925
f27866ba 2926 if (proc->syscalls_to_catch.empty ())
9eedd27d 2927 return false;
82075af2 2928
f27866ba 2929 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2930 return true;
82075af2 2931
4cc32bec 2932 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2933
2934 for (int iter : proc->syscalls_to_catch)
82075af2 2935 if (iter == sysno)
9eedd27d 2936 return true;
82075af2 2937
9eedd27d 2938 return false;
82075af2
JS
2939}
2940
d16f3f6c
TBA
2941ptid_t
2942linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
b60cea74 2943 target_wait_flags target_options)
da6d8c04 2944{
c12a5089 2945 client_state &cs = get_client_state ();
e5f1222d 2946 int w;
fc7238bb 2947 struct lwp_info *event_child;
bd99dc85 2948 int options;
bd99dc85 2949 int pid;
6bf5e0ba
PA
2950 int step_over_finished;
2951 int bp_explains_trap;
2952 int maybe_internal_trap;
2953 int report_to_gdb;
219f2f23 2954 int trace_event;
c2d6af84 2955 int in_step_range;
f2faf941 2956 int any_resumed;
bd99dc85 2957
87ce2a04
DE
2958 if (debug_threads)
2959 {
2960 debug_enter ();
d16f3f6c 2961 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
2962 }
2963
bd99dc85
PA
2964 /* Translate generic target options into linux options. */
2965 options = __WALL;
2966 if (target_options & TARGET_WNOHANG)
2967 options |= WNOHANG;
0d62e5e8 2968
fa593d66
PA
2969 bp_explains_trap = 0;
2970 trace_event = 0;
c2d6af84 2971 in_step_range = 0;
bd99dc85
PA
2972 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2973
83e1b6c1
SM
2974 auto status_pending_p_any = [&] (thread_info *thread)
2975 {
2976 return status_pending_p_callback (thread, minus_one_ptid);
2977 };
2978
a1385b7b
SM
2979 auto not_stopped = [&] (thread_info *thread)
2980 {
2981 return not_stopped_callback (thread, minus_one_ptid);
2982 };
2983
f2faf941 2984 /* Find a resumed LWP, if any. */
83e1b6c1 2985 if (find_thread (status_pending_p_any) != NULL)
f2faf941 2986 any_resumed = 1;
a1385b7b 2987 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
2988 any_resumed = 1;
2989 else
2990 any_resumed = 0;
2991
d7e15655 2992 if (step_over_bkpt == null_ptid)
d16f3f6c 2993 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
2994 else
2995 {
2996 if (debug_threads)
87ce2a04
DE
2997 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2998 target_pid_to_str (step_over_bkpt));
d16f3f6c 2999 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3000 }
3001
f2faf941 3002 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3003 {
fa96cb38
PA
3004 gdb_assert (target_options & TARGET_WNOHANG);
3005
87ce2a04
DE
3006 if (debug_threads)
3007 {
d16f3f6c 3008 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3009 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3010 debug_exit ();
3011 }
fa96cb38
PA
3012
3013 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3014 return null_ptid;
3015 }
fa96cb38
PA
3016 else if (pid == -1)
3017 {
3018 if (debug_threads)
3019 {
d16f3f6c 3020 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3021 "TARGET_WAITKIND_NO_RESUMED\n");
3022 debug_exit ();
3023 }
bd99dc85 3024
fa96cb38
PA
3025 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3026 return null_ptid;
3027 }
0d62e5e8 3028
0bfdf32f 3029 event_child = get_thread_lwp (current_thread);
0d62e5e8 3030
d16f3f6c 3031 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3032 child of a process. Report it. */
3033 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3034 {
fa96cb38 3035 if (WIFEXITED (w))
0d62e5e8 3036 {
fa96cb38
PA
3037 ourstatus->kind = TARGET_WAITKIND_EXITED;
3038 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3039
fa96cb38 3040 if (debug_threads)
bd99dc85 3041 {
d16f3f6c 3042 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3043 "retcode %d\n",
0bfdf32f 3044 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3045 WEXITSTATUS (w));
3046 debug_exit ();
bd99dc85 3047 }
fa96cb38
PA
3048 }
3049 else
3050 {
3051 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3052 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3053
fa96cb38
PA
3054 if (debug_threads)
3055 {
d16f3f6c 3056 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3057 "signal %d\n",
0bfdf32f 3058 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3059 WTERMSIG (w));
3060 debug_exit ();
3061 }
0d62e5e8 3062 }
fa96cb38 3063
65706a29
PA
3064 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3065 return filter_exit_event (event_child, ourstatus);
3066
0bfdf32f 3067 return ptid_of (current_thread);
da6d8c04
DJ
3068 }
3069
2d97cd35
AT
3070 /* If step-over executes a breakpoint instruction, in the case of a
3071 hardware single step it means a gdb/gdbserver breakpoint had been
3072 planted on top of a permanent breakpoint, in the case of a software
3073 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3074 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3075 the breakpoint address.
3076 So in the case of the hardware single step advance the PC manually
3077 past the breakpoint and in the case of software single step advance only
3b9a79ef 3078 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3079 This avoids that a program would keep trapping a permanent breakpoint
3080 forever. */
d7e15655 3081 if (step_over_bkpt != null_ptid
2d97cd35
AT
3082 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3083 && (event_child->stepping
3b9a79ef 3084 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3085 {
dd373349
AT
3086 int increment_pc = 0;
3087 int breakpoint_kind = 0;
3088 CORE_ADDR stop_pc = event_child->stop_pc;
3089
d16f3f6c
TBA
3090 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3091 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3092
3093 if (debug_threads)
3094 {
3095 debug_printf ("step-over for %s executed software breakpoint\n",
3096 target_pid_to_str (ptid_of (current_thread)));
3097 }
3098
3099 if (increment_pc != 0)
3100 {
3101 struct regcache *regcache
3102 = get_thread_regcache (current_thread, 1);
3103
3104 event_child->stop_pc += increment_pc;
bf9ae9d8 3105 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3106
d7146cda 3107 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3108 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3109 }
3110 }
3111
6bf5e0ba
PA
3112 /* If this event was not handled before, and is not a SIGTRAP, we
3113 report it. SIGILL and SIGSEGV are also treated as traps in case
3114 a breakpoint is inserted at the current PC. If this target does
3115 not support internal breakpoints at all, we also report the
3116 SIGTRAP without further processing; it's of no concern to us. */
3117 maybe_internal_trap
bf9ae9d8 3118 = (low_supports_breakpoints ()
6bf5e0ba
PA
3119 && (WSTOPSIG (w) == SIGTRAP
3120 || ((WSTOPSIG (w) == SIGILL
3121 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3122 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3123
3124 if (maybe_internal_trap)
3125 {
3126 /* Handle anything that requires bookkeeping before deciding to
3127 report the event or continue waiting. */
3128
3129 /* First check if we can explain the SIGTRAP with an internal
3130 breakpoint, or if we should possibly report the event to GDB.
3131 Do this before anything that may remove or insert a
3132 breakpoint. */
3133 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3134
3135 /* We have a SIGTRAP, possibly a step-over dance has just
3136 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3137 reinsert breakpoints and delete any single-step
3138 breakpoints. */
6bf5e0ba
PA
3139 step_over_finished = finish_step_over (event_child);
3140
3141 /* Now invoke the callbacks of any internal breakpoints there. */
3142 check_breakpoints (event_child->stop_pc);
3143
219f2f23
PA
3144 /* Handle tracepoint data collecting. This may overflow the
3145 trace buffer, and cause a tracing stop, removing
3146 breakpoints. */
3147 trace_event = handle_tracepoints (event_child);
3148
6bf5e0ba
PA
3149 if (bp_explains_trap)
3150 {
6bf5e0ba 3151 if (debug_threads)
87ce2a04 3152 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3153 }
3154 }
3155 else
3156 {
3157 /* We have some other signal, possibly a step-over dance was in
3158 progress, and it should be cancelled too. */
3159 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3160 }
3161
3162 /* We have all the data we need. Either report the event to GDB, or
3163 resume threads and keep waiting for more. */
3164
3165 /* If we're collecting a fast tracepoint, finish the collection and
3166 move out of the jump pad before delivering a signal. See
3167 linux_stabilize_threads. */
3168
3169 if (WIFSTOPPED (w)
3170 && WSTOPSIG (w) != SIGTRAP
3171 && supports_fast_tracepoints ()
58b4daa5 3172 && agent_loaded_p ())
fa593d66
PA
3173 {
3174 if (debug_threads)
87ce2a04
DE
3175 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3176 "to defer or adjust it.\n",
0bfdf32f 3177 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3178
3179 /* Allow debugging the jump pad itself. */
0bfdf32f 3180 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3181 && maybe_move_out_of_jump_pad (event_child, &w))
3182 {
3183 enqueue_one_deferred_signal (event_child, &w);
3184
3185 if (debug_threads)
87ce2a04 3186 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3187 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3188
df95181f 3189 resume_one_lwp (event_child, 0, 0, NULL);
582511be 3190
edeeb602
YQ
3191 if (debug_threads)
3192 debug_exit ();
582511be 3193 return ignore_event (ourstatus);
fa593d66
PA
3194 }
3195 }
219f2f23 3196
229d26fc
SM
3197 if (event_child->collecting_fast_tracepoint
3198 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3199 {
3200 if (debug_threads)
87ce2a04
DE
3201 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3202 "Check if we're already there.\n",
0bfdf32f 3203 lwpid_of (current_thread),
229d26fc 3204 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3205
3206 trace_event = 1;
3207
3208 event_child->collecting_fast_tracepoint
3209 = linux_fast_tracepoint_collecting (event_child, NULL);
3210
229d26fc
SM
3211 if (event_child->collecting_fast_tracepoint
3212 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3213 {
3214 /* No longer need this breakpoint. */
3215 if (event_child->exit_jump_pad_bkpt != NULL)
3216 {
3217 if (debug_threads)
87ce2a04
DE
3218 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3219 "stopping all threads momentarily.\n");
fa593d66
PA
3220
3221 /* Other running threads could hit this breakpoint.
3222 We don't handle moribund locations like GDB does,
3223 instead we always pause all threads when removing
3224 breakpoints, so that any step-over or
3225 decr_pc_after_break adjustment is always taken
3226 care of while the breakpoint is still
3227 inserted. */
3228 stop_all_lwps (1, event_child);
fa593d66
PA
3229
3230 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3231 event_child->exit_jump_pad_bkpt = NULL;
3232
3233 unstop_all_lwps (1, event_child);
3234
3235 gdb_assert (event_child->suspended >= 0);
3236 }
3237 }
3238
229d26fc
SM
3239 if (event_child->collecting_fast_tracepoint
3240 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3241 {
3242 if (debug_threads)
87ce2a04
DE
3243 debug_printf ("fast tracepoint finished "
3244 "collecting successfully.\n");
fa593d66
PA
3245
3246 /* We may have a deferred signal to report. */
3247 if (dequeue_one_deferred_signal (event_child, &w))
3248 {
3249 if (debug_threads)
87ce2a04 3250 debug_printf ("dequeued one signal.\n");
fa593d66 3251 }
3c11dd79 3252 else
fa593d66 3253 {
3c11dd79 3254 if (debug_threads)
87ce2a04 3255 debug_printf ("no deferred signals.\n");
fa593d66
PA
3256
3257 if (stabilizing_threads)
3258 {
3259 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3260 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3261
3262 if (debug_threads)
3263 {
d16f3f6c 3264 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3265 "while stabilizing threads\n",
0bfdf32f 3266 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3267 debug_exit ();
3268 }
3269
0bfdf32f 3270 return ptid_of (current_thread);
fa593d66
PA
3271 }
3272 }
3273 }
6bf5e0ba
PA
3274 }
3275
e471f25b
PA
3276 /* Check whether GDB would be interested in this event. */
3277
82075af2
JS
3278 /* Check if GDB is interested in this syscall. */
3279 if (WIFSTOPPED (w)
3280 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3281 && !gdb_catch_this_syscall (event_child))
82075af2
JS
3282 {
3283 if (debug_threads)
3284 {
3285 debug_printf ("Ignored syscall for LWP %ld.\n",
3286 lwpid_of (current_thread));
3287 }
3288
df95181f 3289 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602
YQ
3290
3291 if (debug_threads)
3292 debug_exit ();
82075af2
JS
3293 return ignore_event (ourstatus);
3294 }
3295
e471f25b
PA
3296 /* If GDB is not interested in this signal, don't stop other
3297 threads, and don't report it to GDB. Just resume the inferior
3298 right away. We do this for threading-related signals as well as
3299 any that GDB specifically requested we ignore. But never ignore
3300 SIGSTOP if we sent it ourselves, and do not ignore signals when
3301 stepping - they may require special handling to skip the signal
c9587f88
AT
3302 handler. Also never ignore signals that could be caused by a
3303 breakpoint. */
e471f25b 3304 if (WIFSTOPPED (w)
0bfdf32f 3305 && current_thread->last_resume_kind != resume_step
e471f25b 3306 && (
1a981360 3307#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3308 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3309 && (WSTOPSIG (w) == __SIGRTMIN
3310 || WSTOPSIG (w) == __SIGRTMIN + 1))
3311 ||
3312#endif
c12a5089 3313 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3314 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3315 && current_thread->last_resume_kind == resume_stop)
3316 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3317 {
3318 siginfo_t info, *info_p;
3319
3320 if (debug_threads)
87ce2a04 3321 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3322 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3323
0bfdf32f 3324 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3325 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3326 info_p = &info;
3327 else
3328 info_p = NULL;
863d01bd
PA
3329
3330 if (step_over_finished)
3331 {
3332 /* We cancelled this thread's step-over above. We still
3333 need to unsuspend all other LWPs, and set them back
3334 running again while the signal handler runs. */
3335 unsuspend_all_lwps (event_child);
3336
3337 /* Enqueue the pending signal info so that proceed_all_lwps
3338 doesn't lose it. */
3339 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3340
3341 proceed_all_lwps ();
3342 }
3343 else
3344 {
df95181f
TBA
3345 resume_one_lwp (event_child, event_child->stepping,
3346 WSTOPSIG (w), info_p);
863d01bd 3347 }
edeeb602
YQ
3348
3349 if (debug_threads)
3350 debug_exit ();
3351
582511be 3352 return ignore_event (ourstatus);
e471f25b
PA
3353 }
3354
c2d6af84
PA
3355 /* Note that all addresses are always "out of the step range" when
3356 there's no range to begin with. */
3357 in_step_range = lwp_in_step_range (event_child);
3358
3359 /* If GDB wanted this thread to single step, and the thread is out
3360 of the step range, we always want to report the SIGTRAP, and let
3361 GDB handle it. Watchpoints should always be reported. So should
3362 signals we can't explain. A SIGTRAP we can't explain could be a
3363 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3364 do, we're be able to handle GDB breakpoints on top of internal
3365 breakpoints, by handling the internal breakpoint and still
3366 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3367 won't see the breakpoint hit. If we see a single-step event but
3368 the thread should be continuing, don't pass the trap to gdb.
3369 That indicates that we had previously finished a single-step but
3370 left the single-step pending -- see
3371 complete_ongoing_step_over. */
6bf5e0ba 3372 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3373 || (current_thread->last_resume_kind == resume_step
c2d6af84 3374 && !in_step_range)
15c66dd6 3375 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3376 || (!in_step_range
3377 && !bp_explains_trap
3378 && !trace_event
3379 && !step_over_finished
3380 && !(current_thread->last_resume_kind == resume_continue
3381 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3382 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3383 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3384 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3385 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3386
3387 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3388
3389 /* We found no reason GDB would want us to stop. We either hit one
3390 of our own breakpoints, or finished an internal step GDB
3391 shouldn't know about. */
3392 if (!report_to_gdb)
3393 {
3394 if (debug_threads)
3395 {
3396 if (bp_explains_trap)
87ce2a04 3397 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3398 if (step_over_finished)
87ce2a04 3399 debug_printf ("Step-over finished.\n");
219f2f23 3400 if (trace_event)
87ce2a04 3401 debug_printf ("Tracepoint event.\n");
c2d6af84 3402 if (lwp_in_step_range (event_child))
87ce2a04
DE
3403 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3404 paddress (event_child->stop_pc),
3405 paddress (event_child->step_range_start),
3406 paddress (event_child->step_range_end));
6bf5e0ba
PA
3407 }
3408
3409 /* We're not reporting this breakpoint to GDB, so apply the
3410 decr_pc_after_break adjustment to the inferior's regcache
3411 ourselves. */
3412
bf9ae9d8 3413 if (low_supports_breakpoints ())
6bf5e0ba
PA
3414 {
3415 struct regcache *regcache
0bfdf32f 3416 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3417 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3418 }
3419
7984d532 3420 if (step_over_finished)
e3652c84
YQ
3421 {
3422 /* If we have finished stepping over a breakpoint, we've
3423 stopped and suspended all LWPs momentarily except the
3424 stepping one. This is where we resume them all again.
3425 We're going to keep waiting, so use proceed, which
3426 handles stepping over the next breakpoint. */
3427 unsuspend_all_lwps (event_child);
3428 }
3429 else
3430 {
3431 /* Remove the single-step breakpoints if any. Note that
3432 there isn't single-step breakpoint if we finished stepping
3433 over. */
7582c77c 3434 if (supports_software_single_step ()
e3652c84
YQ
3435 && has_single_step_breakpoints (current_thread))
3436 {
3437 stop_all_lwps (0, event_child);
3438 delete_single_step_breakpoints (current_thread);
3439 unstop_all_lwps (0, event_child);
3440 }
3441 }
7984d532 3442
e3652c84
YQ
3443 if (debug_threads)
3444 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3445 proceed_all_lwps ();
edeeb602
YQ
3446
3447 if (debug_threads)
3448 debug_exit ();
3449
582511be 3450 return ignore_event (ourstatus);
6bf5e0ba
PA
3451 }
3452
3453 if (debug_threads)
3454 {
00db26fa 3455 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3456 {
23fdd69e
SM
3457 std::string str
3458 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3459
ad071a30 3460 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3461 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3462 }
0bfdf32f 3463 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3464 {
3465 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3466 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3467 else if (!lwp_in_step_range (event_child))
87ce2a04 3468 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3469 }
15c66dd6 3470 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3471 debug_printf ("Stopped by watchpoint.\n");
582511be 3472 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3473 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3474 if (debug_threads)
87ce2a04 3475 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3476 }
3477
3478 /* Alright, we're going to report a stop. */
3479
3b9a79ef 3480 /* Remove single-step breakpoints. */
7582c77c 3481 if (supports_software_single_step ())
8901d193 3482 {
3b9a79ef 3483 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3484 lwps, so that other threads won't hit the breakpoint in the
3485 staled memory. */
3b9a79ef 3486 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3487
3488 if (non_stop)
3489 {
3b9a79ef
YQ
3490 remove_single_step_breakpoints_p
3491 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3492 }
3493 else
3494 {
3495 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3496 requests. Delete all single-step breakpoints. */
8901d193 3497
9c80ecd6
SM
3498 find_thread ([&] (thread_info *thread) {
3499 if (has_single_step_breakpoints (thread))
3500 {
3501 remove_single_step_breakpoints_p = 1;
3502 return true;
3503 }
8901d193 3504
9c80ecd6
SM
3505 return false;
3506 });
8901d193
YQ
3507 }
3508
3b9a79ef 3509 if (remove_single_step_breakpoints_p)
8901d193 3510 {
3b9a79ef 3511 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3512 so that other threads won't hit the breakpoint in the staled
3513 memory. */
3514 stop_all_lwps (0, event_child);
3515
3516 if (non_stop)
3517 {
3b9a79ef
YQ
3518 gdb_assert (has_single_step_breakpoints (current_thread));
3519 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3520 }
3521 else
3522 {
9c80ecd6
SM
3523 for_each_thread ([] (thread_info *thread){
3524 if (has_single_step_breakpoints (thread))
3525 delete_single_step_breakpoints (thread);
3526 });
8901d193
YQ
3527 }
3528
3529 unstop_all_lwps (0, event_child);
3530 }
3531 }
3532
582511be 3533 if (!stabilizing_threads)
6bf5e0ba
PA
3534 {
3535 /* In all-stop, stop all threads. */
582511be
PA
3536 if (!non_stop)
3537 stop_all_lwps (0, NULL);
6bf5e0ba 3538
c03e6ccc 3539 if (step_over_finished)
582511be
PA
3540 {
3541 if (!non_stop)
3542 {
3543 /* If we were doing a step-over, all other threads but
3544 the stepping one had been paused in start_step_over,
3545 with their suspend counts incremented. We don't want
3546 to do a full unstop/unpause, because we're in
3547 all-stop mode (so we want threads stopped), but we
3548 still need to unsuspend the other threads, to
3549 decrement their `suspended' count back. */
3550 unsuspend_all_lwps (event_child);
3551 }
3552 else
3553 {
3554 /* If we just finished a step-over, then all threads had
3555 been momentarily paused. In all-stop, that's fine,
3556 we want threads stopped by now anyway. In non-stop,
3557 we need to re-resume threads that GDB wanted to be
3558 running. */
3559 unstop_all_lwps (1, event_child);
3560 }
3561 }
c03e6ccc 3562
3aa5cfa0
AT
3563 /* If we're not waiting for a specific LWP, choose an event LWP
3564 from among those that have had events. Giving equal priority
3565 to all LWPs that have had events helps prevent
3566 starvation. */
d7e15655 3567 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3568 {
3569 event_child->status_pending_p = 1;
3570 event_child->status_pending = w;
3571
3572 select_event_lwp (&event_child);
3573
3574 /* current_thread and event_child must stay in sync. */
3575 current_thread = get_lwp_thread (event_child);
3576
3577 event_child->status_pending_p = 0;
3578 w = event_child->status_pending;
3579 }
3580
3581
fa593d66 3582 /* Stabilize threads (move out of jump pads). */
582511be 3583 if (!non_stop)
5c9eb2f2 3584 target_stabilize_threads ();
6bf5e0ba
PA
3585 }
3586 else
3587 {
3588 /* If we just finished a step-over, then all threads had been
3589 momentarily paused. In all-stop, that's fine, we want
3590 threads stopped by now anyway. In non-stop, we need to
3591 re-resume threads that GDB wanted to be running. */
3592 if (step_over_finished)
7984d532 3593 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3594 }
3595
00db26fa 3596 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3597 {
00db26fa
PA
3598 /* If the reported event is an exit, fork, vfork or exec, let
3599 GDB know. */
5a04c4cf
PA
3600
3601 /* Break the unreported fork relationship chain. */
3602 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3603 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3604 {
3605 event_child->fork_relative->fork_relative = NULL;
3606 event_child->fork_relative = NULL;
3607 }
3608
00db26fa 3609 *ourstatus = event_child->waitstatus;
de0d863e
DB
3610 /* Clear the event lwp's waitstatus since we handled it already. */
3611 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3612 }
3613 else
3614 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3615
582511be 3616 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3617 it was a software breakpoint, and the client doesn't know we can
3618 adjust the breakpoint ourselves. */
3619 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3620 && !cs.swbreak_feature)
582511be 3621 {
d4807ea2 3622 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3623
3624 if (decr_pc != 0)
3625 {
3626 struct regcache *regcache
3627 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3628 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3629 }
3630 }
3631
82075af2
JS
3632 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3633 {
82075af2 3634 get_syscall_trapinfo (event_child,
4cc32bec 3635 &ourstatus->value.syscall_number);
82075af2
JS
3636 ourstatus->kind = event_child->syscall_state;
3637 }
3638 else if (current_thread->last_resume_kind == resume_stop
3639 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3640 {
3641 /* A thread that has been requested to stop by GDB with vCont;t,
3642 and it stopped cleanly, so report as SIG0. The use of
3643 SIGSTOP is an implementation detail. */
a493e3e2 3644 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3645 }
0bfdf32f 3646 else if (current_thread->last_resume_kind == resume_stop
8336d594 3647 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3648 {
3649 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3650 but, it stopped for other reasons. */
2ea28649 3651 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3652 }
de0d863e 3653 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3654 {
2ea28649 3655 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3656 }
3657
d7e15655 3658 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3659
bd99dc85 3660 if (debug_threads)
87ce2a04 3661 {
d16f3f6c 3662 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3663 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3664 ourstatus->kind, ourstatus->value.sig);
3665 debug_exit ();
3666 }
bd99dc85 3667
65706a29
PA
3668 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3669 return filter_exit_event (event_child, ourstatus);
3670
0bfdf32f 3671 return ptid_of (current_thread);
bd99dc85
PA
3672}
3673
3674/* Get rid of any pending event in the pipe. */
3675static void
3676async_file_flush (void)
3677{
3678 int ret;
3679 char buf;
3680
3681 do
3682 ret = read (linux_event_pipe[0], &buf, 1);
3683 while (ret >= 0 || (ret == -1 && errno == EINTR));
3684}
3685
3686/* Put something in the pipe, so the event loop wakes up. */
3687static void
3688async_file_mark (void)
3689{
3690 int ret;
3691
3692 async_file_flush ();
3693
3694 do
3695 ret = write (linux_event_pipe[1], "+", 1);
3696 while (ret == 0 || (ret == -1 && errno == EINTR));
3697
3698 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3699 be awakened anyway. */
3700}
3701
6532e7e3
TBA
3702ptid_t
3703linux_process_target::wait (ptid_t ptid,
3704 target_waitstatus *ourstatus,
b60cea74 3705 target_wait_flags target_options)
bd99dc85 3706{
95954743 3707 ptid_t event_ptid;
bd99dc85 3708
bd99dc85
PA
3709 /* Flush the async file first. */
3710 if (target_is_async_p ())
3711 async_file_flush ();
3712
582511be
PA
3713 do
3714 {
d16f3f6c 3715 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3716 }
3717 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3718 && event_ptid == null_ptid
582511be 3719 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3720
3721 /* If at least one stop was reported, there may be more. A single
3722 SIGCHLD can signal more than one child stop. */
3723 if (target_is_async_p ()
3724 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3725 && event_ptid != null_ptid)
bd99dc85
PA
3726 async_file_mark ();
3727
3728 return event_ptid;
da6d8c04
DJ
3729}
3730
c5f62d5f 3731/* Send a signal to an LWP. */
fd500816
DJ
3732
3733static int
a1928bad 3734kill_lwp (unsigned long lwpid, int signo)
fd500816 3735{
4a6ed09b 3736 int ret;
fd500816 3737
4a6ed09b
PA
3738 errno = 0;
3739 ret = syscall (__NR_tkill, lwpid, signo);
3740 if (errno == ENOSYS)
3741 {
3742 /* If tkill fails, then we are not using nptl threads, a
3743 configuration we no longer support. */
3744 perror_with_name (("tkill"));
3745 }
3746 return ret;
fd500816
DJ
3747}
3748
964e4306
PA
3749void
3750linux_stop_lwp (struct lwp_info *lwp)
3751{
3752 send_sigstop (lwp);
3753}
3754
0d62e5e8 3755static void
02fc4de7 3756send_sigstop (struct lwp_info *lwp)
0d62e5e8 3757{
bd99dc85 3758 int pid;
0d62e5e8 3759
d86d4aaf 3760 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3761
0d62e5e8
DJ
3762 /* If we already have a pending stop signal for this process, don't
3763 send another. */
54a0b537 3764 if (lwp->stop_expected)
0d62e5e8 3765 {
ae13219e 3766 if (debug_threads)
87ce2a04 3767 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3768
0d62e5e8
DJ
3769 return;
3770 }
3771
3772 if (debug_threads)
87ce2a04 3773 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3774
d50171e4 3775 lwp->stop_expected = 1;
bd99dc85 3776 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3777}
3778
df3e4dbe
SM
3779static void
3780send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3781{
d86d4aaf 3782 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3783
7984d532
PA
3784 /* Ignore EXCEPT. */
3785 if (lwp == except)
df3e4dbe 3786 return;
7984d532 3787
02fc4de7 3788 if (lwp->stopped)
df3e4dbe 3789 return;
02fc4de7
PA
3790
3791 send_sigstop (lwp);
7984d532
PA
3792}
3793
3794/* Increment the suspend count of an LWP, and stop it, if not stopped
3795 yet. */
df3e4dbe
SM
3796static void
3797suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3798{
d86d4aaf 3799 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3800
3801 /* Ignore EXCEPT. */
3802 if (lwp == except)
df3e4dbe 3803 return;
7984d532 3804
863d01bd 3805 lwp_suspended_inc (lwp);
7984d532 3806
df3e4dbe 3807 send_sigstop (thread, except);
02fc4de7
PA
3808}
3809
95954743
PA
3810static void
3811mark_lwp_dead (struct lwp_info *lwp, int wstat)
3812{
95954743
PA
3813 /* Store the exit status for later. */
3814 lwp->status_pending_p = 1;
3815 lwp->status_pending = wstat;
3816
00db26fa
PA
3817 /* Store in waitstatus as well, as there's nothing else to process
3818 for this event. */
3819 if (WIFEXITED (wstat))
3820 {
3821 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3822 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3823 }
3824 else if (WIFSIGNALED (wstat))
3825 {
3826 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3827 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3828 }
3829
95954743
PA
3830 /* Prevent trying to stop it. */
3831 lwp->stopped = 1;
3832
3833 /* No further stops are expected from a dead lwp. */
3834 lwp->stop_expected = 0;
3835}
3836
00db26fa
PA
3837/* Return true if LWP has exited already, and has a pending exit event
3838 to report to GDB. */
3839
3840static int
3841lwp_is_marked_dead (struct lwp_info *lwp)
3842{
3843 return (lwp->status_pending_p
3844 && (WIFEXITED (lwp->status_pending)
3845 || WIFSIGNALED (lwp->status_pending)));
3846}
3847
d16f3f6c
TBA
3848void
3849linux_process_target::wait_for_sigstop ()
0d62e5e8 3850{
0bfdf32f 3851 struct thread_info *saved_thread;
95954743 3852 ptid_t saved_tid;
fa96cb38
PA
3853 int wstat;
3854 int ret;
0d62e5e8 3855
0bfdf32f
GB
3856 saved_thread = current_thread;
3857 if (saved_thread != NULL)
9c80ecd6 3858 saved_tid = saved_thread->id;
bd99dc85 3859 else
95954743 3860 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3861
d50171e4 3862 if (debug_threads)
fa96cb38 3863 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3864
fa96cb38
PA
3865 /* Passing NULL_PTID as filter indicates we want all events to be
3866 left pending. Eventually this returns when there are no
3867 unwaited-for children left. */
d16f3f6c 3868 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3869 gdb_assert (ret == -1);
0d62e5e8 3870
13d3d99b 3871 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3872 current_thread = saved_thread;
0d62e5e8
DJ
3873 else
3874 {
3875 if (debug_threads)
87ce2a04 3876 debug_printf ("Previously current thread died.\n");
0d62e5e8 3877
f0db101d
PA
3878 /* We can't change the current inferior behind GDB's back,
3879 otherwise, a subsequent command may apply to the wrong
3880 process. */
3881 current_thread = NULL;
0d62e5e8
DJ
3882 }
3883}
3884
13e567af
TBA
3885bool
3886linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3887{
d86d4aaf 3888 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3889
863d01bd
PA
3890 if (lwp->suspended != 0)
3891 {
3892 internal_error (__FILE__, __LINE__,
3893 "LWP %ld is suspended, suspended=%d\n",
3894 lwpid_of (thread), lwp->suspended);
3895 }
fa593d66
PA
3896 gdb_assert (lwp->stopped);
3897
3898 /* Allow debugging the jump pad, gdb_collect, etc.. */
3899 return (supports_fast_tracepoints ()
58b4daa5 3900 && agent_loaded_p ()
fa593d66 3901 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3902 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3903 || thread->last_resume_kind == resume_step)
229d26fc
SM
3904 && (linux_fast_tracepoint_collecting (lwp, NULL)
3905 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3906}
3907
d16f3f6c
TBA
3908void
3909linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3910{
f0ce0d3a 3911 struct thread_info *saved_thread;
d86d4aaf 3912 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3913 int *wstat;
3914
863d01bd
PA
3915 if (lwp->suspended != 0)
3916 {
3917 internal_error (__FILE__, __LINE__,
3918 "LWP %ld is suspended, suspended=%d\n",
3919 lwpid_of (thread), lwp->suspended);
3920 }
fa593d66
PA
3921 gdb_assert (lwp->stopped);
3922
f0ce0d3a
PA
3923 /* For gdb_breakpoint_here. */
3924 saved_thread = current_thread;
3925 current_thread = thread;
3926
fa593d66
PA
3927 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3928
3929 /* Allow debugging the jump pad, gdb_collect, etc. */
3930 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3931 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3932 && thread->last_resume_kind != resume_step
3933 && maybe_move_out_of_jump_pad (lwp, wstat))
3934 {
3935 if (debug_threads)
87ce2a04 3936 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3937 lwpid_of (thread));
fa593d66
PA
3938
3939 if (wstat)
3940 {
3941 lwp->status_pending_p = 0;
3942 enqueue_one_deferred_signal (lwp, wstat);
3943
3944 if (debug_threads)
87ce2a04
DE
3945 debug_printf ("Signal %d for LWP %ld deferred "
3946 "(in jump pad)\n",
d86d4aaf 3947 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3948 }
3949
df95181f 3950 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3951 }
3952 else
863d01bd 3953 lwp_suspended_inc (lwp);
f0ce0d3a
PA
3954
3955 current_thread = saved_thread;
fa593d66
PA
3956}
3957
5a6b0a41
SM
3958static bool
3959lwp_running (thread_info *thread)
fa593d66 3960{
d86d4aaf 3961 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3962
00db26fa 3963 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
3964 return false;
3965
3966 return !lwp->stopped;
fa593d66
PA
3967}
3968
d16f3f6c
TBA
3969void
3970linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 3971{
bde24c0a
PA
3972 /* Should not be called recursively. */
3973 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3974
87ce2a04
DE
3975 if (debug_threads)
3976 {
3977 debug_enter ();
3978 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3979 suspend ? "stop-and-suspend" : "stop",
3980 except != NULL
d86d4aaf 3981 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3982 : "none");
3983 }
3984
bde24c0a
PA
3985 stopping_threads = (suspend
3986 ? STOPPING_AND_SUSPENDING_THREADS
3987 : STOPPING_THREADS);
7984d532
PA
3988
3989 if (suspend)
df3e4dbe
SM
3990 for_each_thread ([&] (thread_info *thread)
3991 {
3992 suspend_and_send_sigstop (thread, except);
3993 });
7984d532 3994 else
df3e4dbe
SM
3995 for_each_thread ([&] (thread_info *thread)
3996 {
3997 send_sigstop (thread, except);
3998 });
3999
fa96cb38 4000 wait_for_sigstop ();
bde24c0a 4001 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4002
4003 if (debug_threads)
4004 {
4005 debug_printf ("stop_all_lwps done, setting stopping_threads "
4006 "back to !stopping\n");
4007 debug_exit ();
4008 }
0d62e5e8
DJ
4009}
4010
863d01bd
PA
4011/* Enqueue one signal in the chain of signals which need to be
4012 delivered to this process on next resume. */
4013
4014static void
4015enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4016{
013e3554
TBA
4017 lwp->pending_signals.emplace_back (signal);
4018 if (info == nullptr)
4019 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
863d01bd 4020 else
013e3554 4021 lwp->pending_signals.back ().info = *info;
863d01bd
PA
4022}
4023
df95181f
TBA
4024void
4025linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4026{
984a2c04
YQ
4027 struct thread_info *thread = get_lwp_thread (lwp);
4028 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4029
4030 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4031
984a2c04 4032 current_thread = thread;
7582c77c 4033 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4034
a0ff9e1a 4035 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4036 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4037}
4038
df95181f
TBA
4039int
4040linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4041{
4042 int step = 0;
4043
b31cdfa6 4044 if (supports_hardware_single_step ())
7fe5e27e
AT
4045 {
4046 step = 1;
4047 }
7582c77c 4048 else if (supports_software_single_step ())
7fe5e27e
AT
4049 {
4050 install_software_single_step_breakpoints (lwp);
4051 step = 0;
4052 }
4053 else
4054 {
4055 if (debug_threads)
4056 debug_printf ("stepping is not implemented on this target");
4057 }
4058
4059 return step;
4060}
4061
35ac8b3e 4062/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4063 finish a fast tracepoint collect. Since signal can be delivered in
4064 the step-over, the program may go to signal handler and trap again
4065 after return from the signal handler. We can live with the spurious
4066 double traps. */
35ac8b3e
YQ
4067
4068static int
4069lwp_signal_can_be_delivered (struct lwp_info *lwp)
4070{
229d26fc
SM
4071 return (lwp->collecting_fast_tracepoint
4072 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4073}
4074
df95181f
TBA
4075void
4076linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4077 int signal, siginfo_t *info)
da6d8c04 4078{
d86d4aaf 4079 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4080 struct thread_info *saved_thread;
82075af2 4081 int ptrace_request;
c06cbd92
YQ
4082 struct process_info *proc = get_thread_process (thread);
4083
4084 /* Note that target description may not be initialised
4085 (proc->tdesc == NULL) at this point because the program hasn't
4086 stopped at the first instruction yet. It means GDBserver skips
4087 the extra traps from the wrapper program (see option --wrapper).
4088 Code in this function that requires register access should be
4089 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4090
54a0b537 4091 if (lwp->stopped == 0)
0d62e5e8
DJ
4092 return;
4093
65706a29
PA
4094 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4095
229d26fc
SM
4096 fast_tpoint_collect_result fast_tp_collecting
4097 = lwp->collecting_fast_tracepoint;
fa593d66 4098
229d26fc
SM
4099 gdb_assert (!stabilizing_threads
4100 || (fast_tp_collecting
4101 != fast_tpoint_collect_result::not_collecting));
fa593d66 4102
219f2f23
PA
4103 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4104 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4105 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4106 {
4107 /* Collecting 'while-stepping' actions doesn't make sense
4108 anymore. */
d86d4aaf 4109 release_while_stepping_state_list (thread);
219f2f23
PA
4110 }
4111
0d62e5e8 4112 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4113 signal. Also enqueue the signal if it can't be delivered to the
4114 inferior right now. */
0d62e5e8 4115 if (signal != 0
fa593d66 4116 && (lwp->status_pending_p
013e3554 4117 || !lwp->pending_signals.empty ()
35ac8b3e 4118 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4119 {
4120 enqueue_pending_signal (lwp, signal, info);
4121
4122 /* Postpone any pending signal. It was enqueued above. */
4123 signal = 0;
4124 }
0d62e5e8 4125
d50171e4
PA
4126 if (lwp->status_pending_p)
4127 {
4128 if (debug_threads)
94610ec4 4129 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4130 " has pending status\n",
94610ec4 4131 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4132 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4133 return;
4134 }
0d62e5e8 4135
0bfdf32f
GB
4136 saved_thread = current_thread;
4137 current_thread = thread;
0d62e5e8 4138
0d62e5e8
DJ
4139 /* This bit needs some thinking about. If we get a signal that
4140 we must report while a single-step reinsert is still pending,
4141 we often end up resuming the thread. It might be better to
4142 (ew) allow a stack of pending events; then we could be sure that
4143 the reinsert happened right away and not lose any signals.
4144
4145 Making this stack would also shrink the window in which breakpoints are
54a0b537 4146 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4147 complete correctness, so it won't solve that problem. It may be
4148 worthwhile just to solve this one, however. */
54a0b537 4149 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4150 {
4151 if (debug_threads)
87ce2a04
DE
4152 debug_printf (" pending reinsert at 0x%s\n",
4153 paddress (lwp->bp_reinsert));
d50171e4 4154
b31cdfa6 4155 if (supports_hardware_single_step ())
d50171e4 4156 {
229d26fc 4157 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4158 {
4159 if (step == 0)
9986ba08 4160 warning ("BAD - reinserting but not stepping.");
fa593d66 4161 if (lwp->suspended)
9986ba08
PA
4162 warning ("BAD - reinserting and suspended(%d).",
4163 lwp->suspended);
fa593d66 4164 }
d50171e4 4165 }
f79b145d
YQ
4166
4167 step = maybe_hw_step (thread);
0d62e5e8
DJ
4168 }
4169
229d26fc 4170 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4171 {
4172 if (debug_threads)
87ce2a04
DE
4173 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4174 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4175 lwpid_of (thread));
fa593d66 4176 }
229d26fc 4177 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4178 {
4179 if (debug_threads)
87ce2a04
DE
4180 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4181 " single-stepping\n",
d86d4aaf 4182 lwpid_of (thread));
fa593d66 4183
b31cdfa6 4184 if (supports_hardware_single_step ())
fa593d66
PA
4185 step = 1;
4186 else
38e08fca
GB
4187 {
4188 internal_error (__FILE__, __LINE__,
4189 "moving out of jump pad single-stepping"
4190 " not implemented on this target");
4191 }
fa593d66
PA
4192 }
4193
219f2f23
PA
4194 /* If we have while-stepping actions in this thread set it stepping.
4195 If we have a signal to deliver, it may or may not be set to
4196 SIG_IGN, we don't know. Assume so, and allow collecting
4197 while-stepping into a signal handler. A possible smart thing to
4198 do would be to set an internal breakpoint at the signal return
4199 address, continue, and carry on catching this while-stepping
4200 action only when that breakpoint is hit. A future
4201 enhancement. */
7fe5e27e 4202 if (thread->while_stepping != NULL)
219f2f23
PA
4203 {
4204 if (debug_threads)
87ce2a04 4205 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4206 lwpid_of (thread));
7fe5e27e
AT
4207
4208 step = single_step (lwp);
219f2f23
PA
4209 }
4210
bf9ae9d8 4211 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4212 {
0bfdf32f 4213 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4214
bf9ae9d8 4215 lwp->stop_pc = low_get_pc (regcache);
582511be
PA
4216
4217 if (debug_threads)
4218 {
4219 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4220 (long) lwp->stop_pc);
4221 }
0d62e5e8
DJ
4222 }
4223
35ac8b3e
YQ
4224 /* If we have pending signals, consume one if it can be delivered to
4225 the inferior. */
013e3554 4226 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
0d62e5e8 4227 {
013e3554 4228 const pending_signal &p_sig = lwp->pending_signals.front ();
0d62e5e8 4229
013e3554
TBA
4230 signal = p_sig.signal;
4231 if (p_sig.info.si_signo != 0)
d86d4aaf 4232 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
013e3554 4233 &p_sig.info);
32ca6d61 4234
013e3554 4235 lwp->pending_signals.pop_front ();
0d62e5e8
DJ
4236 }
4237
94610ec4
YQ
4238 if (debug_threads)
4239 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4240 lwpid_of (thread), step ? "step" : "continue", signal,
4241 lwp->stop_expected ? "expected" : "not expected");
4242
d7599cc0 4243 low_prepare_to_resume (lwp);
aa5ca48f 4244
d86d4aaf 4245 regcache_invalidate_thread (thread);
da6d8c04 4246 errno = 0;
54a0b537 4247 lwp->stepping = step;
82075af2
JS
4248 if (step)
4249 ptrace_request = PTRACE_SINGLESTEP;
4250 else if (gdb_catching_syscalls_p (lwp))
4251 ptrace_request = PTRACE_SYSCALL;
4252 else
4253 ptrace_request = PTRACE_CONT;
4254 ptrace (ptrace_request,
4255 lwpid_of (thread),
b8e1b30e 4256 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4257 /* Coerce to a uintptr_t first to avoid potential gcc warning
4258 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4259 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4260
0bfdf32f 4261 current_thread = saved_thread;
da6d8c04 4262 if (errno)
23f238d3
PA
4263 perror_with_name ("resuming thread");
4264
4265 /* Successfully resumed. Clear state that no longer makes sense,
4266 and mark the LWP as running. Must not do this before resuming
4267 otherwise if that fails other code will be confused. E.g., we'd
4268 later try to stop the LWP and hang forever waiting for a stop
4269 status. Note that we must not throw after this is cleared,
4270 otherwise handle_zombie_lwp_error would get confused. */
4271 lwp->stopped = 0;
4272 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4273}
4274
d7599cc0
TBA
4275void
4276linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4277{
4278 /* Nop. */
4279}
4280
23f238d3
PA
4281/* Called when we try to resume a stopped LWP and that errors out. If
4282 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4283 or about to become), discard the error, clear any pending status
4284 the LWP may have, and return true (we'll collect the exit status
4285 soon enough). Otherwise, return false. */
4286
4287static int
4288check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4289{
4290 struct thread_info *thread = get_lwp_thread (lp);
4291
4292 /* If we get an error after resuming the LWP successfully, we'd
4293 confuse !T state for the LWP being gone. */
4294 gdb_assert (lp->stopped);
4295
4296 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4297 because even if ptrace failed with ESRCH, the tracee may be "not
4298 yet fully dead", but already refusing ptrace requests. In that
4299 case the tracee has 'R (Running)' state for a little bit
4300 (observed in Linux 3.18). See also the note on ESRCH in the
4301 ptrace(2) man page. Instead, check whether the LWP has any state
4302 other than ptrace-stopped. */
4303
4304 /* Don't assume anything if /proc/PID/status can't be read. */
4305 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4306 {
23f238d3
PA
4307 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4308 lp->status_pending_p = 0;
4309 return 1;
4310 }
4311 return 0;
4312}
4313
df95181f
TBA
4314void
4315linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4316 siginfo_t *info)
23f238d3 4317{
a70b8144 4318 try
23f238d3 4319 {
df95181f 4320 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4321 }
230d2906 4322 catch (const gdb_exception_error &ex)
23f238d3
PA
4323 {
4324 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4325 throw;
3221518c 4326 }
da6d8c04
DJ
4327}
4328
5fdda392
SM
4329/* This function is called once per thread via for_each_thread.
4330 We look up which resume request applies to THREAD and mark it with a
4331 pointer to the appropriate resume request.
5544ad89
DJ
4332
4333 This algorithm is O(threads * resume elements), but resume elements
4334 is small (and will remain small at least until GDB supports thread
4335 suspension). */
ebcf782c 4336
5fdda392
SM
4337static void
4338linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4339{
d86d4aaf 4340 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4341
5fdda392 4342 for (int ndx = 0; ndx < n; ndx++)
95954743 4343 {
5fdda392 4344 ptid_t ptid = resume[ndx].thread;
d7e15655 4345 if (ptid == minus_one_ptid
9c80ecd6 4346 || ptid == thread->id
0c9070b3
YQ
4347 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4348 of PID'. */
e99b03dc 4349 || (ptid.pid () == pid_of (thread)
0e998d96 4350 && (ptid.is_pid ()
e38504b3 4351 || ptid.lwp () == -1)))
95954743 4352 {
5fdda392 4353 if (resume[ndx].kind == resume_stop
8336d594 4354 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4355 {
4356 if (debug_threads)
87ce2a04
DE
4357 debug_printf ("already %s LWP %ld at GDB's request\n",
4358 (thread->last_status.kind
4359 == TARGET_WAITKIND_STOPPED)
4360 ? "stopped"
4361 : "stopping",
d86d4aaf 4362 lwpid_of (thread));
d50171e4
PA
4363
4364 continue;
4365 }
4366
5a04c4cf
PA
4367 /* Ignore (wildcard) resume requests for already-resumed
4368 threads. */
5fdda392 4369 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4370 && thread->last_resume_kind != resume_stop)
4371 {
4372 if (debug_threads)
4373 debug_printf ("already %s LWP %ld at GDB's request\n",
4374 (thread->last_resume_kind
4375 == resume_step)
4376 ? "stepping"
4377 : "continuing",
4378 lwpid_of (thread));
4379 continue;
4380 }
4381
4382 /* Don't let wildcard resumes resume fork children that GDB
4383 does not yet know are new fork children. */
4384 if (lwp->fork_relative != NULL)
4385 {
5a04c4cf
PA
4386 struct lwp_info *rel = lwp->fork_relative;
4387
4388 if (rel->status_pending_p
4389 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4390 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4391 {
4392 if (debug_threads)
4393 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4394 lwpid_of (thread));
4395 continue;
4396 }
4397 }
4398
4399 /* If the thread has a pending event that has already been
4400 reported to GDBserver core, but GDB has not pulled the
4401 event out of the vStopped queue yet, likewise, ignore the
4402 (wildcard) resume request. */
9c80ecd6 4403 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4404 {
4405 if (debug_threads)
4406 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4407 lwpid_of (thread));
4408 continue;
4409 }
4410
5fdda392 4411 lwp->resume = &resume[ndx];
8336d594 4412 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4413
c2d6af84
PA
4414 lwp->step_range_start = lwp->resume->step_range_start;
4415 lwp->step_range_end = lwp->resume->step_range_end;
4416
fa593d66
PA
4417 /* If we had a deferred signal to report, dequeue one now.
4418 This can happen if LWP gets more than one signal while
4419 trying to get out of a jump pad. */
4420 if (lwp->stopped
4421 && !lwp->status_pending_p
4422 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4423 {
4424 lwp->status_pending_p = 1;
4425
4426 if (debug_threads)
87ce2a04
DE
4427 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4428 "leaving status pending.\n",
d86d4aaf
DE
4429 WSTOPSIG (lwp->status_pending),
4430 lwpid_of (thread));
fa593d66
PA
4431 }
4432
5fdda392 4433 return;
95954743
PA
4434 }
4435 }
2bd7c093
PA
4436
4437 /* No resume action for this thread. */
4438 lwp->resume = NULL;
5544ad89
DJ
4439}
4440
df95181f
TBA
4441bool
4442linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4443{
d86d4aaf 4444 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4445
bd99dc85
PA
4446 /* LWPs which will not be resumed are not interesting, because
4447 we might not wait for them next time through linux_wait. */
2bd7c093 4448 if (lwp->resume == NULL)
25c28b4d 4449 return false;
64386c31 4450
df95181f 4451 return thread_still_has_status_pending (thread);
d50171e4
PA
4452}
4453
df95181f
TBA
4454bool
4455linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4456{
d86d4aaf 4457 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4458 struct thread_info *saved_thread;
d50171e4 4459 CORE_ADDR pc;
c06cbd92
YQ
4460 struct process_info *proc = get_thread_process (thread);
4461
4462 /* GDBserver is skipping the extra traps from the wrapper program,
4463 don't have to do step over. */
4464 if (proc->tdesc == NULL)
eca55aec 4465 return false;
d50171e4
PA
4466
4467 /* LWPs which will not be resumed are not interesting, because we
4468 might not wait for them next time through linux_wait. */
4469
4470 if (!lwp->stopped)
4471 {
4472 if (debug_threads)
87ce2a04 4473 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4474 lwpid_of (thread));
eca55aec 4475 return false;
d50171e4
PA
4476 }
4477
8336d594 4478 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4479 {
4480 if (debug_threads)
87ce2a04
DE
4481 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4482 " stopped\n",
d86d4aaf 4483 lwpid_of (thread));
eca55aec 4484 return false;
d50171e4
PA
4485 }
4486
7984d532
PA
4487 gdb_assert (lwp->suspended >= 0);
4488
4489 if (lwp->suspended)
4490 {
4491 if (debug_threads)
87ce2a04 4492 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4493 lwpid_of (thread));
eca55aec 4494 return false;
7984d532
PA
4495 }
4496
bd99dc85 4497 if (lwp->status_pending_p)
d50171e4
PA
4498 {
4499 if (debug_threads)
87ce2a04
DE
4500 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4501 " status.\n",
d86d4aaf 4502 lwpid_of (thread));
eca55aec 4503 return false;
d50171e4
PA
4504 }
4505
4506 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4507 or we have. */
4508 pc = get_pc (lwp);
4509
4510 /* If the PC has changed since we stopped, then don't do anything,
4511 and let the breakpoint/tracepoint be hit. This happens if, for
4512 instance, GDB handled the decr_pc_after_break subtraction itself,
4513 GDB is OOL stepping this thread, or the user has issued a "jump"
4514 command, or poked thread's registers herself. */
4515 if (pc != lwp->stop_pc)
4516 {
4517 if (debug_threads)
87ce2a04
DE
4518 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4519 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4520 lwpid_of (thread),
4521 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4522 return false;
d50171e4
PA
4523 }
4524
484b3c32
YQ
4525 /* On software single step target, resume the inferior with signal
4526 rather than stepping over. */
7582c77c 4527 if (supports_software_single_step ()
013e3554 4528 && !lwp->pending_signals.empty ()
484b3c32
YQ
4529 && lwp_signal_can_be_delivered (lwp))
4530 {
4531 if (debug_threads)
4532 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4533 " signals.\n",
4534 lwpid_of (thread));
4535
eca55aec 4536 return false;
484b3c32
YQ
4537 }
4538
0bfdf32f
GB
4539 saved_thread = current_thread;
4540 current_thread = thread;
d50171e4 4541
8b07ae33 4542 /* We can only step over breakpoints we know about. */
fa593d66 4543 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4544 {
8b07ae33 4545 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4546 though. If the condition is being evaluated on the target's side
4547 and it evaluate to false, step over this breakpoint as well. */
4548 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4549 && gdb_condition_true_at_breakpoint (pc)
4550 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4551 {
4552 if (debug_threads)
87ce2a04
DE
4553 debug_printf ("Need step over [LWP %ld]? yes, but found"
4554 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4555 lwpid_of (thread), paddress (pc));
d50171e4 4556
0bfdf32f 4557 current_thread = saved_thread;
eca55aec 4558 return false;
8b07ae33
PA
4559 }
4560 else
4561 {
4562 if (debug_threads)
87ce2a04
DE
4563 debug_printf ("Need step over [LWP %ld]? yes, "
4564 "found breakpoint at 0x%s\n",
d86d4aaf 4565 lwpid_of (thread), paddress (pc));
d50171e4 4566
8b07ae33 4567 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4568 that find_thread stops looking. */
0bfdf32f 4569 current_thread = saved_thread;
8b07ae33 4570
eca55aec 4571 return true;
8b07ae33 4572 }
d50171e4
PA
4573 }
4574
0bfdf32f 4575 current_thread = saved_thread;
d50171e4
PA
4576
4577 if (debug_threads)
87ce2a04
DE
4578 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4579 " at 0x%s\n",
d86d4aaf 4580 lwpid_of (thread), paddress (pc));
c6ecbae5 4581
eca55aec 4582 return false;
5544ad89
DJ
4583}
4584
d16f3f6c
TBA
4585void
4586linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4587{
d86d4aaf 4588 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4589 struct thread_info *saved_thread;
d50171e4
PA
4590 CORE_ADDR pc;
4591 int step;
4592
4593 if (debug_threads)
87ce2a04 4594 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4595 lwpid_of (thread));
d50171e4 4596
7984d532 4597 stop_all_lwps (1, lwp);
863d01bd
PA
4598
4599 if (lwp->suspended != 0)
4600 {
4601 internal_error (__FILE__, __LINE__,
4602 "LWP %ld suspended=%d\n", lwpid_of (thread),
4603 lwp->suspended);
4604 }
d50171e4
PA
4605
4606 if (debug_threads)
87ce2a04 4607 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4608
4609 /* Note, we should always reach here with an already adjusted PC,
4610 either by GDB (if we're resuming due to GDB's request), or by our
4611 caller, if we just finished handling an internal breakpoint GDB
4612 shouldn't care about. */
4613 pc = get_pc (lwp);
4614
0bfdf32f
GB
4615 saved_thread = current_thread;
4616 current_thread = thread;
d50171e4
PA
4617
4618 lwp->bp_reinsert = pc;
4619 uninsert_breakpoints_at (pc);
fa593d66 4620 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4621
7fe5e27e 4622 step = single_step (lwp);
d50171e4 4623
0bfdf32f 4624 current_thread = saved_thread;
d50171e4 4625
df95181f 4626 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4627
4628 /* Require next event from this LWP. */
9c80ecd6 4629 step_over_bkpt = thread->id;
d50171e4
PA
4630}
4631
b31cdfa6
TBA
4632bool
4633linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4634{
4635 if (lwp->bp_reinsert != 0)
4636 {
f79b145d
YQ
4637 struct thread_info *saved_thread = current_thread;
4638
d50171e4 4639 if (debug_threads)
87ce2a04 4640 debug_printf ("Finished step over.\n");
d50171e4 4641
f79b145d
YQ
4642 current_thread = get_lwp_thread (lwp);
4643
d50171e4
PA
4644 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4645 may be no breakpoint to reinsert there by now. */
4646 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4647 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4648
4649 lwp->bp_reinsert = 0;
4650
3b9a79ef
YQ
4651 /* Delete any single-step breakpoints. No longer needed. We
4652 don't have to worry about other threads hitting this trap,
4653 and later not being able to explain it, because we were
4654 stepping over a breakpoint, and we hold all threads but
4655 LWP stopped while doing that. */
b31cdfa6 4656 if (!supports_hardware_single_step ())
f79b145d 4657 {
3b9a79ef
YQ
4658 gdb_assert (has_single_step_breakpoints (current_thread));
4659 delete_single_step_breakpoints (current_thread);
f79b145d 4660 }
d50171e4
PA
4661
4662 step_over_bkpt = null_ptid;
f79b145d 4663 current_thread = saved_thread;
b31cdfa6 4664 return true;
d50171e4
PA
4665 }
4666 else
b31cdfa6 4667 return false;
d50171e4
PA
4668}
4669
d16f3f6c
TBA
4670void
4671linux_process_target::complete_ongoing_step_over ()
863d01bd 4672{
d7e15655 4673 if (step_over_bkpt != null_ptid)
863d01bd
PA
4674 {
4675 struct lwp_info *lwp;
4676 int wstat;
4677 int ret;
4678
4679 if (debug_threads)
4680 debug_printf ("detach: step over in progress, finish it first\n");
4681
4682 /* Passing NULL_PTID as filter indicates we want all events to
4683 be left pending. Eventually this returns when there are no
4684 unwaited-for children left. */
d16f3f6c
TBA
4685 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4686 __WALL);
863d01bd
PA
4687 gdb_assert (ret == -1);
4688
4689 lwp = find_lwp_pid (step_over_bkpt);
4690 if (lwp != NULL)
7e9cf1fe
PA
4691 {
4692 finish_step_over (lwp);
4693
4694 /* If we got our step SIGTRAP, don't leave it pending,
4695 otherwise we would report it to GDB as a spurious
4696 SIGTRAP. */
4697 gdb_assert (lwp->status_pending_p);
4698 if (WIFSTOPPED (lwp->status_pending)
4699 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4700 {
4701 thread_info *thread = get_lwp_thread (lwp);
4702 if (thread->last_resume_kind != resume_step)
4703 {
4704 if (debug_threads)
4705 debug_printf ("detach: discard step-over SIGTRAP\n");
4706
4707 lwp->status_pending_p = 0;
4708 lwp->status_pending = 0;
4709 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4710 }
4711 else
4712 {
4713 if (debug_threads)
4714 debug_printf ("detach: resume_step, "
4715 "not discarding step-over SIGTRAP\n");
4716 }
4717 }
4718 }
863d01bd
PA
4719 step_over_bkpt = null_ptid;
4720 unsuspend_all_lwps (lwp);
4721 }
4722}
4723
df95181f
TBA
4724void
4725linux_process_target::resume_one_thread (thread_info *thread,
4726 bool leave_all_stopped)
5544ad89 4727{
d86d4aaf 4728 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4729 int leave_pending;
5544ad89 4730
2bd7c093 4731 if (lwp->resume == NULL)
c80825ff 4732 return;
5544ad89 4733
bd99dc85 4734 if (lwp->resume->kind == resume_stop)
5544ad89 4735 {
bd99dc85 4736 if (debug_threads)
d86d4aaf 4737 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4738
4739 if (!lwp->stopped)
4740 {
4741 if (debug_threads)
d86d4aaf 4742 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4743
d50171e4
PA
4744 /* Stop the thread, and wait for the event asynchronously,
4745 through the event loop. */
02fc4de7 4746 send_sigstop (lwp);
bd99dc85
PA
4747 }
4748 else
4749 {
4750 if (debug_threads)
87ce2a04 4751 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4752 lwpid_of (thread));
d50171e4
PA
4753
4754 /* The LWP may have been stopped in an internal event that
4755 was not meant to be notified back to GDB (e.g., gdbserver
4756 breakpoint), so we should be reporting a stop event in
4757 this case too. */
4758
4759 /* If the thread already has a pending SIGSTOP, this is a
4760 no-op. Otherwise, something later will presumably resume
4761 the thread and this will cause it to cancel any pending
4762 operation, due to last_resume_kind == resume_stop. If
4763 the thread already has a pending status to report, we
4764 will still report it the next time we wait - see
4765 status_pending_p_callback. */
1a981360
PA
4766
4767 /* If we already have a pending signal to report, then
4768 there's no need to queue a SIGSTOP, as this means we're
4769 midway through moving the LWP out of the jumppad, and we
4770 will report the pending signal as soon as that is
4771 finished. */
013e3554 4772 if (lwp->pending_signals_to_report.empty ())
1a981360 4773 send_sigstop (lwp);
bd99dc85 4774 }
32ca6d61 4775
bd99dc85
PA
4776 /* For stop requests, we're done. */
4777 lwp->resume = NULL;
fc7238bb 4778 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4779 return;
5544ad89
DJ
4780 }
4781
bd99dc85 4782 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4783 then don't resume it - we can just report the pending status.
4784 Likewise if it is suspended, because e.g., another thread is
4785 stepping past a breakpoint. Make sure to queue any signals that
4786 would otherwise be sent. In all-stop mode, we do this decision
4787 based on if *any* thread has a pending status. If there's a
4788 thread that needs the step-over-breakpoint dance, then don't
4789 resume any other thread but that particular one. */
4790 leave_pending = (lwp->suspended
4791 || lwp->status_pending_p
4792 || leave_all_stopped);
5544ad89 4793
0e9a339e
YQ
4794 /* If we have a new signal, enqueue the signal. */
4795 if (lwp->resume->sig != 0)
4796 {
4797 siginfo_t info, *info_p;
4798
4799 /* If this is the same signal we were previously stopped by,
4800 make sure to queue its siginfo. */
4801 if (WIFSTOPPED (lwp->last_status)
4802 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4803 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4804 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4805 info_p = &info;
4806 else
4807 info_p = NULL;
4808
4809 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4810 }
4811
d50171e4 4812 if (!leave_pending)
bd99dc85
PA
4813 {
4814 if (debug_threads)
d86d4aaf 4815 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4816
9c80ecd6 4817 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4818 }
4819 else
4820 {
4821 if (debug_threads)
d86d4aaf 4822 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4823 }
5544ad89 4824
fc7238bb 4825 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4826 lwp->resume = NULL;
0d62e5e8
DJ
4827}
4828
0e4d7e35
TBA
4829void
4830linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4831{
d86d4aaf 4832 struct thread_info *need_step_over = NULL;
c6ecbae5 4833
87ce2a04
DE
4834 if (debug_threads)
4835 {
4836 debug_enter ();
4837 debug_printf ("linux_resume:\n");
4838 }
4839
5fdda392
SM
4840 for_each_thread ([&] (thread_info *thread)
4841 {
4842 linux_set_resume_request (thread, resume_info, n);
4843 });
5544ad89 4844
d50171e4
PA
4845 /* If there is a thread which would otherwise be resumed, which has
4846 a pending status, then don't resume any threads - we can just
4847 report the pending status. Make sure to queue any signals that
4848 would otherwise be sent. In non-stop mode, we'll apply this
4849 logic to each thread individually. We consume all pending events
4850 before considering to start a step-over (in all-stop). */
25c28b4d 4851 bool any_pending = false;
bd99dc85 4852 if (!non_stop)
df95181f
TBA
4853 any_pending = find_thread ([this] (thread_info *thread)
4854 {
4855 return resume_status_pending (thread);
4856 }) != nullptr;
d50171e4
PA
4857
4858 /* If there is a thread which would otherwise be resumed, which is
4859 stopped at a breakpoint that needs stepping over, then don't
4860 resume any threads - have it step over the breakpoint with all
4861 other threads stopped, then resume all threads again. Make sure
4862 to queue any signals that would otherwise be delivered or
4863 queued. */
bf9ae9d8 4864 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4865 need_step_over = find_thread ([this] (thread_info *thread)
4866 {
4867 return thread_needs_step_over (thread);
4868 });
d50171e4 4869
c80825ff 4870 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4871
4872 if (debug_threads)
4873 {
4874 if (need_step_over != NULL)
87ce2a04 4875 debug_printf ("Not resuming all, need step over\n");
d50171e4 4876 else if (any_pending)
87ce2a04
DE
4877 debug_printf ("Not resuming, all-stop and found "
4878 "an LWP with pending status\n");
d50171e4 4879 else
87ce2a04 4880 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4881 }
4882
4883 /* Even if we're leaving threads stopped, queue all signals we'd
4884 otherwise deliver. */
c80825ff
SM
4885 for_each_thread ([&] (thread_info *thread)
4886 {
df95181f 4887 resume_one_thread (thread, leave_all_stopped);
c80825ff 4888 });
d50171e4
PA
4889
4890 if (need_step_over)
d86d4aaf 4891 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4892
4893 if (debug_threads)
4894 {
4895 debug_printf ("linux_resume done\n");
4896 debug_exit ();
4897 }
1bebeeca
PA
4898
4899 /* We may have events that were pending that can/should be sent to
4900 the client now. Trigger a linux_wait call. */
4901 if (target_is_async_p ())
4902 async_file_mark ();
d50171e4
PA
4903}
4904
df95181f
TBA
4905void
4906linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4907{
d86d4aaf 4908 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4909 int step;
4910
7984d532 4911 if (lwp == except)
e2b44075 4912 return;
d50171e4
PA
4913
4914 if (debug_threads)
d86d4aaf 4915 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4916
4917 if (!lwp->stopped)
4918 {
4919 if (debug_threads)
d86d4aaf 4920 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 4921 return;
d50171e4
PA
4922 }
4923
02fc4de7
PA
4924 if (thread->last_resume_kind == resume_stop
4925 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4926 {
4927 if (debug_threads)
87ce2a04 4928 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4929 lwpid_of (thread));
e2b44075 4930 return;
d50171e4
PA
4931 }
4932
4933 if (lwp->status_pending_p)
4934 {
4935 if (debug_threads)
87ce2a04 4936 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4937 lwpid_of (thread));
e2b44075 4938 return;
d50171e4
PA
4939 }
4940
7984d532
PA
4941 gdb_assert (lwp->suspended >= 0);
4942
d50171e4
PA
4943 if (lwp->suspended)
4944 {
4945 if (debug_threads)
d86d4aaf 4946 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 4947 return;
d50171e4
PA
4948 }
4949
1a981360 4950 if (thread->last_resume_kind == resume_stop
013e3554 4951 && lwp->pending_signals_to_report.empty ()
229d26fc
SM
4952 && (lwp->collecting_fast_tracepoint
4953 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4954 {
4955 /* We haven't reported this LWP as stopped yet (otherwise, the
4956 last_status.kind check above would catch it, and we wouldn't
4957 reach here. This LWP may have been momentarily paused by a
4958 stop_all_lwps call while handling for example, another LWP's
4959 step-over. In that case, the pending expected SIGSTOP signal
4960 that was queued at vCont;t handling time will have already
4961 been consumed by wait_for_sigstop, and so we need to requeue
4962 another one here. Note that if the LWP already has a SIGSTOP
4963 pending, this is a no-op. */
4964
4965 if (debug_threads)
87ce2a04
DE
4966 debug_printf ("Client wants LWP %ld to stop. "
4967 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4968 lwpid_of (thread));
02fc4de7
PA
4969
4970 send_sigstop (lwp);
4971 }
4972
863d01bd
PA
4973 if (thread->last_resume_kind == resume_step)
4974 {
4975 if (debug_threads)
4976 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4977 lwpid_of (thread));
8901d193 4978
3b9a79ef 4979 /* If resume_step is requested by GDB, install single-step
8901d193 4980 breakpoints when the thread is about to be actually resumed if
3b9a79ef 4981 the single-step breakpoints weren't removed. */
7582c77c 4982 if (supports_software_single_step ()
3b9a79ef 4983 && !has_single_step_breakpoints (thread))
8901d193
YQ
4984 install_software_single_step_breakpoints (lwp);
4985
4986 step = maybe_hw_step (thread);
863d01bd
PA
4987 }
4988 else if (lwp->bp_reinsert != 0)
4989 {
4990 if (debug_threads)
4991 debug_printf (" stepping LWP %ld, reinsert set\n",
4992 lwpid_of (thread));
f79b145d
YQ
4993
4994 step = maybe_hw_step (thread);
863d01bd
PA
4995 }
4996 else
4997 step = 0;
4998
df95181f 4999 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5000}
5001
df95181f
TBA
5002void
5003linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5004 lwp_info *except)
7984d532 5005{
d86d4aaf 5006 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5007
5008 if (lwp == except)
e2b44075 5009 return;
7984d532 5010
863d01bd 5011 lwp_suspended_decr (lwp);
7984d532 5012
e2b44075 5013 proceed_one_lwp (thread, except);
d50171e4
PA
5014}
5015
d16f3f6c
TBA
5016void
5017linux_process_target::proceed_all_lwps ()
d50171e4 5018{
d86d4aaf 5019 struct thread_info *need_step_over;
d50171e4
PA
5020
5021 /* If there is a thread which would otherwise be resumed, which is
5022 stopped at a breakpoint that needs stepping over, then don't
5023 resume any threads - have it step over the breakpoint with all
5024 other threads stopped, then resume all threads again. */
5025
bf9ae9d8 5026 if (low_supports_breakpoints ())
d50171e4 5027 {
df95181f
TBA
5028 need_step_over = find_thread ([this] (thread_info *thread)
5029 {
5030 return thread_needs_step_over (thread);
5031 });
d50171e4
PA
5032
5033 if (need_step_over != NULL)
5034 {
5035 if (debug_threads)
87ce2a04
DE
5036 debug_printf ("proceed_all_lwps: found "
5037 "thread %ld needing a step-over\n",
5038 lwpid_of (need_step_over));
d50171e4 5039
d86d4aaf 5040 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5041 return;
5042 }
5043 }
5544ad89 5044
d50171e4 5045 if (debug_threads)
87ce2a04 5046 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5047
df95181f 5048 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
5049 {
5050 proceed_one_lwp (thread, NULL);
5051 });
d50171e4
PA
5052}
5053
d16f3f6c
TBA
5054void
5055linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5056{
5544ad89
DJ
5057 if (debug_threads)
5058 {
87ce2a04 5059 debug_enter ();
d50171e4 5060 if (except)
87ce2a04 5061 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5062 lwpid_of (get_lwp_thread (except)));
5544ad89 5063 else
87ce2a04 5064 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5065 }
5066
7984d532 5067 if (unsuspend)
e2b44075
SM
5068 for_each_thread ([&] (thread_info *thread)
5069 {
5070 unsuspend_and_proceed_one_lwp (thread, except);
5071 });
7984d532 5072 else
e2b44075
SM
5073 for_each_thread ([&] (thread_info *thread)
5074 {
5075 proceed_one_lwp (thread, except);
5076 });
87ce2a04
DE
5077
5078 if (debug_threads)
5079 {
5080 debug_printf ("unstop_all_lwps done\n");
5081 debug_exit ();
5082 }
0d62e5e8
DJ
5083}
5084
58caa3dc
DJ
5085
5086#ifdef HAVE_LINUX_REGSETS
5087
1faeff08
MR
5088#define use_linux_regsets 1
5089
030031ee
PA
5090/* Returns true if REGSET has been disabled. */
5091
5092static int
5093regset_disabled (struct regsets_info *info, struct regset_info *regset)
5094{
5095 return (info->disabled_regsets != NULL
5096 && info->disabled_regsets[regset - info->regsets]);
5097}
5098
5099/* Disable REGSET. */
5100
5101static void
5102disable_regset (struct regsets_info *info, struct regset_info *regset)
5103{
5104 int dr_offset;
5105
5106 dr_offset = regset - info->regsets;
5107 if (info->disabled_regsets == NULL)
224c3ddb 5108 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5109 info->disabled_regsets[dr_offset] = 1;
5110}
5111
58caa3dc 5112static int
3aee8918
PA
5113regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5114 struct regcache *regcache)
58caa3dc
DJ
5115{
5116 struct regset_info *regset;
e9d25b98 5117 int saw_general_regs = 0;
95954743 5118 int pid;
1570b33e 5119 struct iovec iov;
58caa3dc 5120
0bfdf32f 5121 pid = lwpid_of (current_thread);
28eef672 5122 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5123 {
1570b33e
L
5124 void *buf, *data;
5125 int nt_type, res;
58caa3dc 5126
030031ee 5127 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5128 continue;
58caa3dc 5129
bca929d3 5130 buf = xmalloc (regset->size);
1570b33e
L
5131
5132 nt_type = regset->nt_type;
5133 if (nt_type)
5134 {
5135 iov.iov_base = buf;
5136 iov.iov_len = regset->size;
5137 data = (void *) &iov;
5138 }
5139 else
5140 data = buf;
5141
dfb64f85 5142#ifndef __sparc__
f15f9948 5143 res = ptrace (regset->get_request, pid,
b8e1b30e 5144 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5145#else
1570b33e 5146 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5147#endif
58caa3dc
DJ
5148 if (res < 0)
5149 {
1ef53e6b
AH
5150 if (errno == EIO
5151 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5152 {
1ef53e6b
AH
5153 /* If we get EIO on a regset, or an EINVAL and the regset is
5154 optional, do not try it again for this process mode. */
030031ee 5155 disable_regset (regsets_info, regset);
58caa3dc 5156 }
e5a9158d
AA
5157 else if (errno == ENODATA)
5158 {
5159 /* ENODATA may be returned if the regset is currently
5160 not "active". This can happen in normal operation,
5161 so suppress the warning in this case. */
5162 }
fcd4a73d
YQ
5163 else if (errno == ESRCH)
5164 {
5165 /* At this point, ESRCH should mean the process is
5166 already gone, in which case we simply ignore attempts
5167 to read its registers. */
5168 }
58caa3dc
DJ
5169 else
5170 {
0d62e5e8 5171 char s[256];
95954743
PA
5172 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5173 pid);
0d62e5e8 5174 perror (s);
58caa3dc
DJ
5175 }
5176 }
098dbe61
AA
5177 else
5178 {
5179 if (regset->type == GENERAL_REGS)
5180 saw_general_regs = 1;
5181 regset->store_function (regcache, buf);
5182 }
fdeb2a12 5183 free (buf);
58caa3dc 5184 }
e9d25b98
DJ
5185 if (saw_general_regs)
5186 return 0;
5187 else
5188 return 1;
58caa3dc
DJ
5189}
5190
5191static int
3aee8918
PA
5192regsets_store_inferior_registers (struct regsets_info *regsets_info,
5193 struct regcache *regcache)
58caa3dc
DJ
5194{
5195 struct regset_info *regset;
e9d25b98 5196 int saw_general_regs = 0;
95954743 5197 int pid;
1570b33e 5198 struct iovec iov;
58caa3dc 5199
0bfdf32f 5200 pid = lwpid_of (current_thread);
28eef672 5201 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5202 {
1570b33e
L
5203 void *buf, *data;
5204 int nt_type, res;
58caa3dc 5205
feea5f36
AA
5206 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5207 || regset->fill_function == NULL)
28eef672 5208 continue;
58caa3dc 5209
bca929d3 5210 buf = xmalloc (regset->size);
545587ee
DJ
5211
5212 /* First fill the buffer with the current register set contents,
5213 in case there are any items in the kernel's regset that are
5214 not in gdbserver's regcache. */
1570b33e
L
5215
5216 nt_type = regset->nt_type;
5217 if (nt_type)
5218 {
5219 iov.iov_base = buf;
5220 iov.iov_len = regset->size;
5221 data = (void *) &iov;
5222 }
5223 else
5224 data = buf;
5225
dfb64f85 5226#ifndef __sparc__
f15f9948 5227 res = ptrace (regset->get_request, pid,
b8e1b30e 5228 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5229#else
689cc2ae 5230 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5231#endif
545587ee
DJ
5232
5233 if (res == 0)
5234 {
5235 /* Then overlay our cached registers on that. */
442ea881 5236 regset->fill_function (regcache, buf);
545587ee
DJ
5237
5238 /* Only now do we write the register set. */
dfb64f85 5239#ifndef __sparc__
f15f9948 5240 res = ptrace (regset->set_request, pid,
b8e1b30e 5241 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5242#else
1570b33e 5243 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5244#endif
545587ee
DJ
5245 }
5246
58caa3dc
DJ
5247 if (res < 0)
5248 {
1ef53e6b
AH
5249 if (errno == EIO
5250 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5251 {
1ef53e6b
AH
5252 /* If we get EIO on a regset, or an EINVAL and the regset is
5253 optional, do not try it again for this process mode. */
030031ee 5254 disable_regset (regsets_info, regset);
58caa3dc 5255 }
3221518c
UW
5256 else if (errno == ESRCH)
5257 {
1b3f6016
PA
5258 /* At this point, ESRCH should mean the process is
5259 already gone, in which case we simply ignore attempts
5260 to change its registers. See also the related
df95181f 5261 comment in resume_one_lwp. */
fdeb2a12 5262 free (buf);
3221518c
UW
5263 return 0;
5264 }
58caa3dc
DJ
5265 else
5266 {
ce3a066d 5267 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5268 }
5269 }
e9d25b98
DJ
5270 else if (regset->type == GENERAL_REGS)
5271 saw_general_regs = 1;
09ec9b38 5272 free (buf);
58caa3dc 5273 }
e9d25b98
DJ
5274 if (saw_general_regs)
5275 return 0;
5276 else
5277 return 1;
58caa3dc
DJ
5278}
5279
1faeff08 5280#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5281
1faeff08 5282#define use_linux_regsets 0
3aee8918
PA
5283#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5284#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5285
58caa3dc 5286#endif
1faeff08
MR
5287
5288/* Return 1 if register REGNO is supported by one of the regset ptrace
5289 calls or 0 if it has to be transferred individually. */
5290
5291static int
3aee8918 5292linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5293{
5294 unsigned char mask = 1 << (regno % 8);
5295 size_t index = regno / 8;
5296
5297 return (use_linux_regsets
3aee8918
PA
5298 && (regs_info->regset_bitmap == NULL
5299 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5300}
5301
58caa3dc 5302#ifdef HAVE_LINUX_USRREGS
1faeff08 5303
5b3da067 5304static int
3aee8918 5305register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5306{
5307 int addr;
5308
3aee8918 5309 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5310 error ("Invalid register number %d.", regnum);
5311
3aee8918 5312 addr = usrregs->regmap[regnum];
1faeff08
MR
5313
5314 return addr;
5315}
5316
daca57a7
TBA
5317
5318void
5319linux_process_target::fetch_register (const usrregs_info *usrregs,
5320 regcache *regcache, int regno)
1faeff08
MR
5321{
5322 CORE_ADDR regaddr;
5323 int i, size;
5324 char *buf;
5325 int pid;
5326
3aee8918 5327 if (regno >= usrregs->num_regs)
1faeff08 5328 return;
daca57a7 5329 if (low_cannot_fetch_register (regno))
1faeff08
MR
5330 return;
5331
3aee8918 5332 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5333 if (regaddr == -1)
5334 return;
5335
3aee8918
PA
5336 size = ((register_size (regcache->tdesc, regno)
5337 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5338 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5339 buf = (char *) alloca (size);
1faeff08 5340
0bfdf32f 5341 pid = lwpid_of (current_thread);
1faeff08
MR
5342 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5343 {
5344 errno = 0;
5345 *(PTRACE_XFER_TYPE *) (buf + i) =
5346 ptrace (PTRACE_PEEKUSER, pid,
5347 /* Coerce to a uintptr_t first to avoid potential gcc warning
5348 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5349 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5350 regaddr += sizeof (PTRACE_XFER_TYPE);
5351 if (errno != 0)
9a70f35c
YQ
5352 {
5353 /* Mark register REGNO unavailable. */
5354 supply_register (regcache, regno, NULL);
5355 return;
5356 }
1faeff08
MR
5357 }
5358
b35db733 5359 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5360}
5361
daca57a7
TBA
5362void
5363linux_process_target::store_register (const usrregs_info *usrregs,
5364 regcache *regcache, int regno)
1faeff08
MR
5365{
5366 CORE_ADDR regaddr;
5367 int i, size;
5368 char *buf;
5369 int pid;
5370
3aee8918 5371 if (regno >= usrregs->num_regs)
1faeff08 5372 return;
daca57a7 5373 if (low_cannot_store_register (regno))
1faeff08
MR
5374 return;
5375
3aee8918 5376 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5377 if (regaddr == -1)
5378 return;
5379
3aee8918
PA
5380 size = ((register_size (regcache->tdesc, regno)
5381 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5382 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5383 buf = (char *) alloca (size);
1faeff08
MR
5384 memset (buf, 0, size);
5385
b35db733 5386 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5387
0bfdf32f 5388 pid = lwpid_of (current_thread);
1faeff08
MR
5389 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5390 {
5391 errno = 0;
5392 ptrace (PTRACE_POKEUSER, pid,
5393 /* Coerce to a uintptr_t first to avoid potential gcc warning
5394 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5395 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5396 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5397 if (errno != 0)
5398 {
5399 /* At this point, ESRCH should mean the process is
5400 already gone, in which case we simply ignore attempts
5401 to change its registers. See also the related
df95181f 5402 comment in resume_one_lwp. */
1faeff08
MR
5403 if (errno == ESRCH)
5404 return;
5405
daca57a7
TBA
5406
5407 if (!low_cannot_store_register (regno))
6d91ce9a 5408 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5409 }
5410 regaddr += sizeof (PTRACE_XFER_TYPE);
5411 }
5412}
daca57a7 5413#endif /* HAVE_LINUX_USRREGS */
1faeff08 5414
b35db733
TBA
5415void
5416linux_process_target::low_collect_ptrace_register (regcache *regcache,
5417 int regno, char *buf)
5418{
5419 collect_register (regcache, regno, buf);
5420}
5421
5422void
5423linux_process_target::low_supply_ptrace_register (regcache *regcache,
5424 int regno, const char *buf)
5425{
5426 supply_register (regcache, regno, buf);
5427}
5428
daca57a7
TBA
5429void
5430linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5431 regcache *regcache,
5432 int regno, int all)
1faeff08 5433{
daca57a7 5434#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5435 struct usrregs_info *usr = regs_info->usrregs;
5436
1faeff08
MR
5437 if (regno == -1)
5438 {
3aee8918
PA
5439 for (regno = 0; regno < usr->num_regs; regno++)
5440 if (all || !linux_register_in_regsets (regs_info, regno))
5441 fetch_register (usr, regcache, regno);
1faeff08
MR
5442 }
5443 else
3aee8918 5444 fetch_register (usr, regcache, regno);
daca57a7 5445#endif
1faeff08
MR
5446}
5447
daca57a7
TBA
5448void
5449linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5450 regcache *regcache,
5451 int regno, int all)
1faeff08 5452{
daca57a7 5453#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5454 struct usrregs_info *usr = regs_info->usrregs;
5455
1faeff08
MR
5456 if (regno == -1)
5457 {
3aee8918
PA
5458 for (regno = 0; regno < usr->num_regs; regno++)
5459 if (all || !linux_register_in_regsets (regs_info, regno))
5460 store_register (usr, regcache, regno);
1faeff08
MR
5461 }
5462 else
3aee8918 5463 store_register (usr, regcache, regno);
58caa3dc 5464#endif
daca57a7 5465}
1faeff08 5466
a5a4d4cd
TBA
5467void
5468linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5469{
5470 int use_regsets;
5471 int all = 0;
aa8d21c9 5472 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5473
5474 if (regno == -1)
5475 {
bd70b1f2 5476 if (regs_info->usrregs != NULL)
3aee8918 5477 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5478 low_fetch_register (regcache, regno);
c14dfd32 5479
3aee8918
PA
5480 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5481 if (regs_info->usrregs != NULL)
5482 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5483 }
5484 else
5485 {
bd70b1f2 5486 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5487 return;
5488
3aee8918 5489 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5490 if (use_regsets)
3aee8918
PA
5491 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5492 regcache);
5493 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5494 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5495 }
58caa3dc
DJ
5496}
5497
a5a4d4cd
TBA
5498void
5499linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5500{
1faeff08
MR
5501 int use_regsets;
5502 int all = 0;
aa8d21c9 5503 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5504
5505 if (regno == -1)
5506 {
3aee8918
PA
5507 all = regsets_store_inferior_registers (regs_info->regsets_info,
5508 regcache);
5509 if (regs_info->usrregs != NULL)
5510 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5511 }
5512 else
5513 {
3aee8918 5514 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5515 if (use_regsets)
3aee8918
PA
5516 all = regsets_store_inferior_registers (regs_info->regsets_info,
5517 regcache);
5518 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5519 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5520 }
58caa3dc
DJ
5521}
5522
bd70b1f2
TBA
5523bool
5524linux_process_target::low_fetch_register (regcache *regcache, int regno)
5525{
5526 return false;
5527}
da6d8c04 5528
e2558df3 5529/* A wrapper for the read_memory target op. */
da6d8c04 5530
c3e735a6 5531static int
f450004a 5532linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5533{
52405d85 5534 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5535}
5536
5537/* Copy LEN bytes from inferior's memory starting at MEMADDR
5538 to debugger memory starting at MYADDR. */
5539
5540int
5541linux_process_target::read_memory (CORE_ADDR memaddr,
5542 unsigned char *myaddr, int len)
da6d8c04 5543{
0bfdf32f 5544 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5545 PTRACE_XFER_TYPE *buffer;
5546 CORE_ADDR addr;
5547 int count;
4934b29e 5548 char filename[64];
ae3e2ccf 5549 int i;
4934b29e 5550 int ret;
fd462a61 5551 int fd;
fd462a61
DJ
5552
5553 /* Try using /proc. Don't bother for one word. */
5554 if (len >= 3 * sizeof (long))
5555 {
4934b29e
MR
5556 int bytes;
5557
fd462a61
DJ
5558 /* We could keep this file open and cache it - possibly one per
5559 thread. That requires some juggling, but is even faster. */
95954743 5560 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5561 fd = open (filename, O_RDONLY | O_LARGEFILE);
5562 if (fd == -1)
5563 goto no_proc;
5564
5565 /* If pread64 is available, use it. It's faster if the kernel
5566 supports it (only one syscall), and it's 64-bit safe even on
5567 32-bit platforms (for instance, SPARC debugging a SPARC64
5568 application). */
5569#ifdef HAVE_PREAD64
4934b29e 5570 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5571#else
4934b29e
MR
5572 bytes = -1;
5573 if (lseek (fd, memaddr, SEEK_SET) != -1)
5574 bytes = read (fd, myaddr, len);
fd462a61 5575#endif
fd462a61
DJ
5576
5577 close (fd);
4934b29e
MR
5578 if (bytes == len)
5579 return 0;
5580
5581 /* Some data was read, we'll try to get the rest with ptrace. */
5582 if (bytes > 0)
5583 {
5584 memaddr += bytes;
5585 myaddr += bytes;
5586 len -= bytes;
5587 }
fd462a61 5588 }
da6d8c04 5589
fd462a61 5590 no_proc:
4934b29e
MR
5591 /* Round starting address down to longword boundary. */
5592 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5593 /* Round ending address up; get number of longwords that makes. */
5594 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5595 / sizeof (PTRACE_XFER_TYPE));
5596 /* Allocate buffer of that many longwords. */
8d749320 5597 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5598
da6d8c04 5599 /* Read all the longwords */
4934b29e 5600 errno = 0;
da6d8c04
DJ
5601 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5602 {
14ce3065
DE
5603 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5604 about coercing an 8 byte integer to a 4 byte pointer. */
5605 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5606 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5607 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5608 if (errno)
4934b29e 5609 break;
da6d8c04 5610 }
4934b29e 5611 ret = errno;
da6d8c04
DJ
5612
5613 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5614 if (i > 0)
5615 {
5616 i *= sizeof (PTRACE_XFER_TYPE);
5617 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5618 memcpy (myaddr,
5619 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5620 i < len ? i : len);
5621 }
c3e735a6 5622
4934b29e 5623 return ret;
da6d8c04
DJ
5624}
5625
93ae6fdc
PA
5626/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5627 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5628 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5629
e2558df3
TBA
5630int
5631linux_process_target::write_memory (CORE_ADDR memaddr,
5632 const unsigned char *myaddr, int len)
da6d8c04 5633{
ae3e2ccf 5634 int i;
da6d8c04 5635 /* Round starting address down to longword boundary. */
ae3e2ccf 5636 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5637 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5638 int count
493e2a69
MS
5639 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5640 / sizeof (PTRACE_XFER_TYPE);
5641
da6d8c04 5642 /* Allocate buffer of that many longwords. */
ae3e2ccf 5643 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5644
0bfdf32f 5645 int pid = lwpid_of (current_thread);
da6d8c04 5646
f0ae6fc3
PA
5647 if (len == 0)
5648 {
5649 /* Zero length write always succeeds. */
5650 return 0;
5651 }
5652
0d62e5e8
DJ
5653 if (debug_threads)
5654 {
58d6951d 5655 /* Dump up to four bytes. */
bf47e248
PA
5656 char str[4 * 2 + 1];
5657 char *p = str;
5658 int dump = len < 4 ? len : 4;
5659
5660 for (i = 0; i < dump; i++)
5661 {
5662 sprintf (p, "%02x", myaddr[i]);
5663 p += 2;
5664 }
5665 *p = '\0';
5666
5667 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5668 str, (long) memaddr, pid);
0d62e5e8
DJ
5669 }
5670
da6d8c04
DJ
5671 /* Fill start and end extra bytes of buffer with existing memory data. */
5672
93ae6fdc 5673 errno = 0;
14ce3065
DE
5674 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5675 about coercing an 8 byte integer to a 4 byte pointer. */
5676 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5677 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5678 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5679 if (errno)
5680 return errno;
da6d8c04
DJ
5681
5682 if (count > 1)
5683 {
93ae6fdc 5684 errno = 0;
da6d8c04 5685 buffer[count - 1]
95954743 5686 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5687 /* Coerce to a uintptr_t first to avoid potential gcc warning
5688 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5689 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5690 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5691 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5692 if (errno)
5693 return errno;
da6d8c04
DJ
5694 }
5695
93ae6fdc 5696 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5697
493e2a69
MS
5698 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5699 myaddr, len);
da6d8c04
DJ
5700
5701 /* Write the entire buffer. */
5702
5703 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5704 {
5705 errno = 0;
14ce3065
DE
5706 ptrace (PTRACE_POKETEXT, pid,
5707 /* Coerce to a uintptr_t first to avoid potential gcc warning
5708 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5709 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5710 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5711 if (errno)
5712 return errno;
5713 }
5714
5715 return 0;
5716}
2f2893d9 5717
2a31c7aa
TBA
5718void
5719linux_process_target::look_up_symbols ()
2f2893d9 5720{
0d62e5e8 5721#ifdef USE_THREAD_DB
95954743
PA
5722 struct process_info *proc = current_process ();
5723
fe978cb0 5724 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5725 return;
5726
9b4c5f87 5727 thread_db_init ();
0d62e5e8
DJ
5728#endif
5729}
5730
eb497a2a
TBA
5731void
5732linux_process_target::request_interrupt ()
e5379b03 5733{
78708b7c
PA
5734 /* Send a SIGINT to the process group. This acts just like the user
5735 typed a ^C on the controlling terminal. */
eb497a2a 5736 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5737}
5738
eac215cc
TBA
5739bool
5740linux_process_target::supports_read_auxv ()
5741{
5742 return true;
5743}
5744
aa691b87
RM
5745/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5746 to debugger memory starting at MYADDR. */
5747
eac215cc
TBA
5748int
5749linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5750 unsigned int len)
aa691b87
RM
5751{
5752 char filename[PATH_MAX];
5753 int fd, n;
0bfdf32f 5754 int pid = lwpid_of (current_thread);
aa691b87 5755
6cebaf6e 5756 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5757
5758 fd = open (filename, O_RDONLY);
5759 if (fd < 0)
5760 return -1;
5761
5762 if (offset != (CORE_ADDR) 0
5763 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5764 n = -1;
5765 else
5766 n = read (fd, myaddr, len);
5767
5768 close (fd);
5769
5770 return n;
5771}
5772
7e0bde70
TBA
5773int
5774linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5775 int size, raw_breakpoint *bp)
e013ee27 5776{
c8f4bfdd
YQ
5777 if (type == raw_bkpt_type_sw)
5778 return insert_memory_breakpoint (bp);
e013ee27 5779 else
9db9aa23
TBA
5780 return low_insert_point (type, addr, size, bp);
5781}
5782
5783int
5784linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5785 int size, raw_breakpoint *bp)
5786{
5787 /* Unsupported (see target.h). */
5788 return 1;
e013ee27
OF
5789}
5790
7e0bde70
TBA
5791int
5792linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5793 int size, raw_breakpoint *bp)
e013ee27 5794{
c8f4bfdd
YQ
5795 if (type == raw_bkpt_type_sw)
5796 return remove_memory_breakpoint (bp);
e013ee27 5797 else
9db9aa23
TBA
5798 return low_remove_point (type, addr, size, bp);
5799}
5800
5801int
5802linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5803 int size, raw_breakpoint *bp)
5804{
5805 /* Unsupported (see target.h). */
5806 return 1;
e013ee27
OF
5807}
5808
84320c4e 5809/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5810 method. */
5811
84320c4e
TBA
5812bool
5813linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5814{
5815 struct lwp_info *lwp = get_thread_lwp (current_thread);
5816
5817 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5818}
5819
84320c4e 5820/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5821 method. */
5822
84320c4e
TBA
5823bool
5824linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5825{
5826 return USE_SIGTRAP_SIGINFO;
5827}
5828
93fe88b2 5829/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5830 method. */
5831
93fe88b2
TBA
5832bool
5833linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5834{
5835 struct lwp_info *lwp = get_thread_lwp (current_thread);
5836
5837 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5838}
5839
93fe88b2 5840/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5841 method. */
5842
93fe88b2
TBA
5843bool
5844linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5845{
5846 return USE_SIGTRAP_SIGINFO;
5847}
5848
70b90b91 5849/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5850
22aa6223
TBA
5851bool
5852linux_process_target::supports_hardware_single_step ()
45614f15 5853{
b31cdfa6 5854 return true;
45614f15
YQ
5855}
5856
6eeb5c55
TBA
5857bool
5858linux_process_target::stopped_by_watchpoint ()
e013ee27 5859{
0bfdf32f 5860 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5861
15c66dd6 5862 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5863}
5864
6eeb5c55
TBA
5865CORE_ADDR
5866linux_process_target::stopped_data_address ()
e013ee27 5867{
0bfdf32f 5868 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5869
5870 return lwp->stopped_data_address;
e013ee27
OF
5871}
5872
db0dfaa0
LM
5873/* This is only used for targets that define PT_TEXT_ADDR,
5874 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5875 the target has different ways of acquiring this information, like
5876 loadmaps. */
52fb6437 5877
5203ae1e
TBA
5878bool
5879linux_process_target::supports_read_offsets ()
5880{
5881#ifdef SUPPORTS_READ_OFFSETS
5882 return true;
5883#else
5884 return false;
5885#endif
5886}
5887
52fb6437
NS
5888/* Under uClinux, programs are loaded at non-zero offsets, which we need
5889 to tell gdb about. */
5890
5203ae1e
TBA
5891int
5892linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5893{
5203ae1e 5894#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5895 unsigned long text, text_end, data;
62828379 5896 int pid = lwpid_of (current_thread);
52fb6437
NS
5897
5898 errno = 0;
5899
b8e1b30e
LM
5900 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5901 (PTRACE_TYPE_ARG4) 0);
5902 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5903 (PTRACE_TYPE_ARG4) 0);
5904 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5905 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5906
5907 if (errno == 0)
5908 {
5909 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5910 used by gdb) are relative to the beginning of the program,
5911 with the data segment immediately following the text segment.
5912 However, the actual runtime layout in memory may put the data
5913 somewhere else, so when we send gdb a data base-address, we
5914 use the real data base address and subtract the compile-time
5915 data base-address from it (which is just the length of the
5916 text segment). BSS immediately follows data in both
5917 cases. */
52fb6437
NS
5918 *text_p = text;
5919 *data_p = data - (text_end - text);
1b3f6016 5920
52fb6437
NS
5921 return 1;
5922 }
5203ae1e
TBA
5923 return 0;
5924#else
5925 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5926#endif
5203ae1e 5927}
52fb6437 5928
6e3fd7e9
TBA
5929bool
5930linux_process_target::supports_get_tls_address ()
5931{
5932#ifdef USE_THREAD_DB
5933 return true;
5934#else
5935 return false;
5936#endif
5937}
5938
5939int
5940linux_process_target::get_tls_address (thread_info *thread,
5941 CORE_ADDR offset,
5942 CORE_ADDR load_module,
5943 CORE_ADDR *address)
5944{
5945#ifdef USE_THREAD_DB
5946 return thread_db_get_tls_address (thread, offset, load_module, address);
5947#else
5948 return -1;
5949#endif
5950}
5951
2d0795ee
TBA
5952bool
5953linux_process_target::supports_qxfer_osdata ()
5954{
5955 return true;
5956}
5957
5958int
5959linux_process_target::qxfer_osdata (const char *annex,
5960 unsigned char *readbuf,
5961 unsigned const char *writebuf,
5962 CORE_ADDR offset, int len)
07e059b5 5963{
d26e3629 5964 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5965}
5966
cb63de7c
TBA
5967void
5968linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5969 gdb_byte *inf_siginfo, int direction)
d0722149 5970{
cb63de7c 5971 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5972
5973 /* If there was no callback, or the callback didn't do anything,
5974 then just do a straight memcpy. */
5975 if (!done)
5976 {
5977 if (direction == 1)
a5362b9a 5978 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5979 else
a5362b9a 5980 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5981 }
5982}
5983
cb63de7c
TBA
5984bool
5985linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5986 int direction)
5987{
5988 return false;
5989}
5990
d7abedf7
TBA
5991bool
5992linux_process_target::supports_qxfer_siginfo ()
5993{
5994 return true;
5995}
5996
5997int
5998linux_process_target::qxfer_siginfo (const char *annex,
5999 unsigned char *readbuf,
6000 unsigned const char *writebuf,
6001 CORE_ADDR offset, int len)
4aa995e1 6002{
d0722149 6003 int pid;
a5362b9a 6004 siginfo_t siginfo;
8adce034 6005 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6006
0bfdf32f 6007 if (current_thread == NULL)
4aa995e1
PA
6008 return -1;
6009
0bfdf32f 6010 pid = lwpid_of (current_thread);
4aa995e1
PA
6011
6012 if (debug_threads)
87ce2a04
DE
6013 debug_printf ("%s siginfo for lwp %d.\n",
6014 readbuf != NULL ? "Reading" : "Writing",
6015 pid);
4aa995e1 6016
0adea5f7 6017 if (offset >= sizeof (siginfo))
4aa995e1
PA
6018 return -1;
6019
b8e1b30e 6020 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6021 return -1;
6022
d0722149
DE
6023 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6024 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6025 inferior with a 64-bit GDBSERVER should look the same as debugging it
6026 with a 32-bit GDBSERVER, we need to convert it. */
6027 siginfo_fixup (&siginfo, inf_siginfo, 0);
6028
4aa995e1
PA
6029 if (offset + len > sizeof (siginfo))
6030 len = sizeof (siginfo) - offset;
6031
6032 if (readbuf != NULL)
d0722149 6033 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6034 else
6035 {
d0722149
DE
6036 memcpy (inf_siginfo + offset, writebuf, len);
6037
6038 /* Convert back to ptrace layout before flushing it out. */
6039 siginfo_fixup (&siginfo, inf_siginfo, 1);
6040
b8e1b30e 6041 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6042 return -1;
6043 }
6044
6045 return len;
6046}
6047
bd99dc85
PA
6048/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6049 so we notice when children change state; as the handler for the
6050 sigsuspend in my_waitpid. */
6051
6052static void
6053sigchld_handler (int signo)
6054{
6055 int old_errno = errno;
6056
6057 if (debug_threads)
e581f2b4
PA
6058 {
6059 do
6060 {
a7e559cc
AH
6061 /* Use the async signal safe debug function. */
6062 if (debug_write ("sigchld_handler\n",
6063 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6064 break; /* just ignore */
6065 } while (0);
6066 }
bd99dc85
PA
6067
6068 if (target_is_async_p ())
6069 async_file_mark (); /* trigger a linux_wait */
6070
6071 errno = old_errno;
6072}
6073
0dc587d4
TBA
6074bool
6075linux_process_target::supports_non_stop ()
bd99dc85 6076{
0dc587d4 6077 return true;
bd99dc85
PA
6078}
6079
0dc587d4
TBA
6080bool
6081linux_process_target::async (bool enable)
bd99dc85 6082{
0dc587d4 6083 bool previous = target_is_async_p ();
bd99dc85 6084
8336d594 6085 if (debug_threads)
87ce2a04
DE
6086 debug_printf ("linux_async (%d), previous=%d\n",
6087 enable, previous);
8336d594 6088
bd99dc85
PA
6089 if (previous != enable)
6090 {
6091 sigset_t mask;
6092 sigemptyset (&mask);
6093 sigaddset (&mask, SIGCHLD);
6094
21987b9c 6095 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6096
6097 if (enable)
6098 {
6099 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6100 {
6101 linux_event_pipe[0] = -1;
6102 linux_event_pipe[1] = -1;
21987b9c 6103 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6104
6105 warning ("creating event pipe failed.");
6106 return previous;
6107 }
bd99dc85
PA
6108
6109 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6110 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6111
6112 /* Register the event loop handler. */
6113 add_file_handler (linux_event_pipe[0],
2554f6f5
SM
6114 handle_target_event, NULL,
6115 "linux-low");
bd99dc85
PA
6116
6117 /* Always trigger a linux_wait. */
6118 async_file_mark ();
6119 }
6120 else
6121 {
6122 delete_file_handler (linux_event_pipe[0]);
6123
6124 close (linux_event_pipe[0]);
6125 close (linux_event_pipe[1]);
6126 linux_event_pipe[0] = -1;
6127 linux_event_pipe[1] = -1;
6128 }
6129
21987b9c 6130 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6131 }
6132
6133 return previous;
6134}
6135
0dc587d4
TBA
6136int
6137linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6138{
6139 /* Register or unregister from event-loop accordingly. */
0dc587d4 6140 target_async (nonstop);
aa96c426 6141
0dc587d4 6142 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6143 return -1;
6144
bd99dc85
PA
6145 return 0;
6146}
6147
652aef77
TBA
6148bool
6149linux_process_target::supports_multi_process ()
cf8fd78b 6150{
652aef77 6151 return true;
cf8fd78b
PA
6152}
6153
89245bc0
DB
6154/* Check if fork events are supported. */
6155
9690a72a
TBA
6156bool
6157linux_process_target::supports_fork_events ()
89245bc0
DB
6158{
6159 return linux_supports_tracefork ();
6160}
6161
6162/* Check if vfork events are supported. */
6163
9690a72a
TBA
6164bool
6165linux_process_target::supports_vfork_events ()
89245bc0
DB
6166{
6167 return linux_supports_tracefork ();
6168}
6169
94585166
DB
6170/* Check if exec events are supported. */
6171
9690a72a
TBA
6172bool
6173linux_process_target::supports_exec_events ()
94585166
DB
6174{
6175 return linux_supports_traceexec ();
6176}
6177
de0d863e
DB
6178/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6179 ptrace flags for all inferiors. This is in case the new GDB connection
6180 doesn't support the same set of events that the previous one did. */
6181
fb00dfce
TBA
6182void
6183linux_process_target::handle_new_gdb_connection ()
de0d863e 6184{
de0d863e 6185 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6186 for_each_thread ([] (thread_info *thread)
6187 {
6188 struct lwp_info *lwp = get_thread_lwp (thread);
6189
6190 if (!lwp->stopped)
6191 {
6192 /* Stop the lwp so we can modify its ptrace options. */
6193 lwp->must_set_ptrace_flags = 1;
6194 linux_stop_lwp (lwp);
6195 }
6196 else
6197 {
6198 /* Already stopped; go ahead and set the ptrace options. */
6199 struct process_info *proc = find_process_pid (pid_of (thread));
6200 int options = linux_low_ptrace_options (proc->attached);
6201
6202 linux_enable_event_reporting (lwpid_of (thread), options);
6203 lwp->must_set_ptrace_flags = 0;
6204 }
6205 });
de0d863e
DB
6206}
6207
55cf3021
TBA
6208int
6209linux_process_target::handle_monitor_command (char *mon)
6210{
6211#ifdef USE_THREAD_DB
6212 return thread_db_handle_monitor_command (mon);
6213#else
6214 return 0;
6215#endif
6216}
6217
95a45fc1
TBA
6218int
6219linux_process_target::core_of_thread (ptid_t ptid)
6220{
6221 return linux_common_core_of_thread (ptid);
6222}
6223
c756403b
TBA
6224bool
6225linux_process_target::supports_disable_randomization ()
03583c20 6226{
c756403b 6227 return true;
03583c20 6228}
efcbbd14 6229
c0245cb9
TBA
6230bool
6231linux_process_target::supports_agent ()
d1feda86 6232{
c0245cb9 6233 return true;
d1feda86
YQ
6234}
6235
2526e0cd
TBA
6236bool
6237linux_process_target::supports_range_stepping ()
c2d6af84 6238{
7582c77c 6239 if (supports_software_single_step ())
2526e0cd 6240 return true;
c2d6af84 6241
9cfd8715
TBA
6242 return low_supports_range_stepping ();
6243}
6244
6245bool
6246linux_process_target::low_supports_range_stepping ()
6247{
6248 return false;
c2d6af84
PA
6249}
6250
8247b823
TBA
6251bool
6252linux_process_target::supports_pid_to_exec_file ()
6253{
6254 return true;
6255}
6256
04977957 6257const char *
8247b823
TBA
6258linux_process_target::pid_to_exec_file (int pid)
6259{
6260 return linux_proc_pid_to_exec_file (pid);
6261}
6262
c9b7b804
TBA
6263bool
6264linux_process_target::supports_multifs ()
6265{
6266 return true;
6267}
6268
6269int
6270linux_process_target::multifs_open (int pid, const char *filename,
6271 int flags, mode_t mode)
6272{
6273 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6274}
6275
6276int
6277linux_process_target::multifs_unlink (int pid, const char *filename)
6278{
6279 return linux_mntns_unlink (pid, filename);
6280}
6281
6282ssize_t
6283linux_process_target::multifs_readlink (int pid, const char *filename,
6284 char *buf, size_t bufsiz)
6285{
6286 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6287}
6288
723b724b 6289#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6290struct target_loadseg
6291{
6292 /* Core address to which the segment is mapped. */
6293 Elf32_Addr addr;
6294 /* VMA recorded in the program header. */
6295 Elf32_Addr p_vaddr;
6296 /* Size of this segment in memory. */
6297 Elf32_Word p_memsz;
6298};
6299
723b724b 6300# if defined PT_GETDSBT
78d85199
YQ
6301struct target_loadmap
6302{
6303 /* Protocol version number, must be zero. */
6304 Elf32_Word version;
6305 /* Pointer to the DSBT table, its size, and the DSBT index. */
6306 unsigned *dsbt_table;
6307 unsigned dsbt_size, dsbt_index;
6308 /* Number of segments in this map. */
6309 Elf32_Word nsegs;
6310 /* The actual memory map. */
6311 struct target_loadseg segs[/*nsegs*/];
6312};
723b724b
MF
6313# define LINUX_LOADMAP PT_GETDSBT
6314# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6315# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6316# else
6317struct target_loadmap
6318{
6319 /* Protocol version number, must be zero. */
6320 Elf32_Half version;
6321 /* Number of segments in this map. */
6322 Elf32_Half nsegs;
6323 /* The actual memory map. */
6324 struct target_loadseg segs[/*nsegs*/];
6325};
6326# define LINUX_LOADMAP PTRACE_GETFDPIC
6327# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6328# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6329# endif
78d85199 6330
9da41fda
TBA
6331bool
6332linux_process_target::supports_read_loadmap ()
6333{
6334 return true;
6335}
6336
6337int
6338linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6339 unsigned char *myaddr, unsigned int len)
78d85199 6340{
0bfdf32f 6341 int pid = lwpid_of (current_thread);
78d85199
YQ
6342 int addr = -1;
6343 struct target_loadmap *data = NULL;
6344 unsigned int actual_length, copy_length;
6345
6346 if (strcmp (annex, "exec") == 0)
723b724b 6347 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6348 else if (strcmp (annex, "interp") == 0)
723b724b 6349 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6350 else
6351 return -1;
6352
723b724b 6353 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6354 return -1;
6355
6356 if (data == NULL)
6357 return -1;
6358
6359 actual_length = sizeof (struct target_loadmap)
6360 + sizeof (struct target_loadseg) * data->nsegs;
6361
6362 if (offset < 0 || offset > actual_length)
6363 return -1;
6364
6365 copy_length = actual_length - offset < len ? actual_length - offset : len;
6366 memcpy (myaddr, (char *) data + offset, copy_length);
6367 return copy_length;
6368}
723b724b 6369#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6370
bc8d3ae4
TBA
6371bool
6372linux_process_target::supports_catch_syscall ()
82075af2 6373{
9eedd27d 6374 return (low_supports_catch_syscall ()
82075af2
JS
6375 && linux_supports_tracesysgood ());
6376}
6377
9eedd27d
TBA
6378bool
6379linux_process_target::low_supports_catch_syscall ()
6380{
6381 return false;
6382}
6383
770d8f6a
TBA
6384CORE_ADDR
6385linux_process_target::read_pc (regcache *regcache)
219f2f23 6386{
bf9ae9d8 6387 if (!low_supports_breakpoints ())
219f2f23
PA
6388 return 0;
6389
bf9ae9d8 6390 return low_get_pc (regcache);
219f2f23
PA
6391}
6392
770d8f6a
TBA
6393void
6394linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6395{
bf9ae9d8 6396 gdb_assert (low_supports_breakpoints ());
219f2f23 6397
bf9ae9d8 6398 low_set_pc (regcache, pc);
219f2f23
PA
6399}
6400
68119632
TBA
6401bool
6402linux_process_target::supports_thread_stopped ()
6403{
6404 return true;
6405}
6406
6407bool
6408linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6409{
6410 return get_thread_lwp (thread)->stopped;
6411}
6412
6413/* This exposes stop-all-threads functionality to other modules. */
6414
29e8dc09
TBA
6415void
6416linux_process_target::pause_all (bool freeze)
8336d594 6417{
7984d532
PA
6418 stop_all_lwps (freeze, NULL);
6419}
6420
6421/* This exposes unstop-all-threads functionality to other gdbserver
6422 modules. */
6423
29e8dc09
TBA
6424void
6425linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6426{
6427 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6428}
6429
79b44087
TBA
6430int
6431linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6432{
6433 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6434 running LWP. */
6435 if (non_stop)
29e8dc09 6436 target_pause_all (true);
90d74c30
PA
6437 return 0;
6438}
6439
79b44087
TBA
6440void
6441linux_process_target::done_accessing_memory ()
90d74c30
PA
6442{
6443 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6444 running LWP. */
6445 if (non_stop)
29e8dc09 6446 target_unpause_all (true);
90d74c30
PA
6447}
6448
2268b414
JK
6449/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6450
6451static int
6452get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6453 CORE_ADDR *phdr_memaddr, int *num_phdr)
6454{
6455 char filename[PATH_MAX];
6456 int fd;
6457 const int auxv_size = is_elf64
6458 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6459 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6460
6461 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6462
6463 fd = open (filename, O_RDONLY);
6464 if (fd < 0)
6465 return 1;
6466
6467 *phdr_memaddr = 0;
6468 *num_phdr = 0;
6469 while (read (fd, buf, auxv_size) == auxv_size
6470 && (*phdr_memaddr == 0 || *num_phdr == 0))
6471 {
6472 if (is_elf64)
6473 {
6474 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6475
6476 switch (aux->a_type)
6477 {
6478 case AT_PHDR:
6479 *phdr_memaddr = aux->a_un.a_val;
6480 break;
6481 case AT_PHNUM:
6482 *num_phdr = aux->a_un.a_val;
6483 break;
6484 }
6485 }
6486 else
6487 {
6488 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6489
6490 switch (aux->a_type)
6491 {
6492 case AT_PHDR:
6493 *phdr_memaddr = aux->a_un.a_val;
6494 break;
6495 case AT_PHNUM:
6496 *num_phdr = aux->a_un.a_val;
6497 break;
6498 }
6499 }
6500 }
6501
6502 close (fd);
6503
6504 if (*phdr_memaddr == 0 || *num_phdr == 0)
6505 {
6506 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6507 "phdr_memaddr = %ld, phdr_num = %d",
6508 (long) *phdr_memaddr, *num_phdr);
6509 return 2;
6510 }
6511
6512 return 0;
6513}
6514
6515/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6516
6517static CORE_ADDR
6518get_dynamic (const int pid, const int is_elf64)
6519{
6520 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6521 int num_phdr, i;
2268b414 6522 unsigned char *phdr_buf;
db1ff28b 6523 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6524
6525 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6526 return 0;
6527
6528 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6529 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6530
6531 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6532 return 0;
6533
6534 /* Compute relocation: it is expected to be 0 for "regular" executables,
6535 non-zero for PIE ones. */
6536 relocation = -1;
db1ff28b
JK
6537 for (i = 0; relocation == -1 && i < num_phdr; i++)
6538 if (is_elf64)
6539 {
6540 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6541
6542 if (p->p_type == PT_PHDR)
6543 relocation = phdr_memaddr - p->p_vaddr;
6544 }
6545 else
6546 {
6547 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6548
6549 if (p->p_type == PT_PHDR)
6550 relocation = phdr_memaddr - p->p_vaddr;
6551 }
6552
2268b414
JK
6553 if (relocation == -1)
6554 {
e237a7e2
JK
6555 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6556 any real world executables, including PIE executables, have always
6557 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6558 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6559 or present DT_DEBUG anyway (fpc binaries are statically linked).
6560
6561 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6562
6563 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6564
2268b414
JK
6565 return 0;
6566 }
6567
db1ff28b
JK
6568 for (i = 0; i < num_phdr; i++)
6569 {
6570 if (is_elf64)
6571 {
6572 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6573
6574 if (p->p_type == PT_DYNAMIC)
6575 return p->p_vaddr + relocation;
6576 }
6577 else
6578 {
6579 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6580
db1ff28b
JK
6581 if (p->p_type == PT_DYNAMIC)
6582 return p->p_vaddr + relocation;
6583 }
6584 }
2268b414
JK
6585
6586 return 0;
6587}
6588
6589/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6590 can be 0 if the inferior does not yet have the library list initialized.
6591 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6592 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6593
6594static CORE_ADDR
6595get_r_debug (const int pid, const int is_elf64)
6596{
6597 CORE_ADDR dynamic_memaddr;
6598 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6599 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6600 CORE_ADDR map = -1;
2268b414
JK
6601
6602 dynamic_memaddr = get_dynamic (pid, is_elf64);
6603 if (dynamic_memaddr == 0)
367ba2c2 6604 return map;
2268b414
JK
6605
6606 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6607 {
6608 if (is_elf64)
6609 {
6610 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6611#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6612 union
6613 {
6614 Elf64_Xword map;
6615 unsigned char buf[sizeof (Elf64_Xword)];
6616 }
6617 rld_map;
a738da3a
MF
6618#endif
6619#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6620 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6621 {
6622 if (linux_read_memory (dyn->d_un.d_val,
6623 rld_map.buf, sizeof (rld_map.buf)) == 0)
6624 return rld_map.map;
6625 else
6626 break;
6627 }
75f62ce7 6628#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6629#ifdef DT_MIPS_RLD_MAP_REL
6630 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6631 {
6632 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6633 rld_map.buf, sizeof (rld_map.buf)) == 0)
6634 return rld_map.map;
6635 else
6636 break;
6637 }
6638#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6639
367ba2c2
MR
6640 if (dyn->d_tag == DT_DEBUG && map == -1)
6641 map = dyn->d_un.d_val;
2268b414
JK
6642
6643 if (dyn->d_tag == DT_NULL)
6644 break;
6645 }
6646 else
6647 {
6648 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6649#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6650 union
6651 {
6652 Elf32_Word map;
6653 unsigned char buf[sizeof (Elf32_Word)];
6654 }
6655 rld_map;
a738da3a
MF
6656#endif
6657#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6658 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6659 {
6660 if (linux_read_memory (dyn->d_un.d_val,
6661 rld_map.buf, sizeof (rld_map.buf)) == 0)
6662 return rld_map.map;
6663 else
6664 break;
6665 }
75f62ce7 6666#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6667#ifdef DT_MIPS_RLD_MAP_REL
6668 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6669 {
6670 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6671 rld_map.buf, sizeof (rld_map.buf)) == 0)
6672 return rld_map.map;
6673 else
6674 break;
6675 }
6676#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6677
367ba2c2
MR
6678 if (dyn->d_tag == DT_DEBUG && map == -1)
6679 map = dyn->d_un.d_val;
2268b414
JK
6680
6681 if (dyn->d_tag == DT_NULL)
6682 break;
6683 }
6684
6685 dynamic_memaddr += dyn_size;
6686 }
6687
367ba2c2 6688 return map;
2268b414
JK
6689}
6690
6691/* Read one pointer from MEMADDR in the inferior. */
6692
6693static int
6694read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6695{
485f1ee4
PA
6696 int ret;
6697
6698 /* Go through a union so this works on either big or little endian
6699 hosts, when the inferior's pointer size is smaller than the size
6700 of CORE_ADDR. It is assumed the inferior's endianness is the
6701 same of the superior's. */
6702 union
6703 {
6704 CORE_ADDR core_addr;
6705 unsigned int ui;
6706 unsigned char uc;
6707 } addr;
6708
6709 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6710 if (ret == 0)
6711 {
6712 if (ptr_size == sizeof (CORE_ADDR))
6713 *ptr = addr.core_addr;
6714 else if (ptr_size == sizeof (unsigned int))
6715 *ptr = addr.ui;
6716 else
6717 gdb_assert_not_reached ("unhandled pointer size");
6718 }
6719 return ret;
2268b414
JK
6720}
6721
974387bb
TBA
6722bool
6723linux_process_target::supports_qxfer_libraries_svr4 ()
6724{
6725 return true;
6726}
6727
2268b414
JK
6728struct link_map_offsets
6729 {
6730 /* Offset and size of r_debug.r_version. */
6731 int r_version_offset;
6732
6733 /* Offset and size of r_debug.r_map. */
6734 int r_map_offset;
6735
6736 /* Offset to l_addr field in struct link_map. */
6737 int l_addr_offset;
6738
6739 /* Offset to l_name field in struct link_map. */
6740 int l_name_offset;
6741
6742 /* Offset to l_ld field in struct link_map. */
6743 int l_ld_offset;
6744
6745 /* Offset to l_next field in struct link_map. */
6746 int l_next_offset;
6747
6748 /* Offset to l_prev field in struct link_map. */
6749 int l_prev_offset;
6750 };
6751
fb723180 6752/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6753
974387bb
TBA
6754int
6755linux_process_target::qxfer_libraries_svr4 (const char *annex,
6756 unsigned char *readbuf,
6757 unsigned const char *writebuf,
6758 CORE_ADDR offset, int len)
2268b414 6759{
fe978cb0 6760 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6761 char filename[PATH_MAX];
6762 int pid, is_elf64;
6763
6764 static const struct link_map_offsets lmo_32bit_offsets =
6765 {
6766 0, /* r_version offset. */
6767 4, /* r_debug.r_map offset. */
6768 0, /* l_addr offset in link_map. */
6769 4, /* l_name offset in link_map. */
6770 8, /* l_ld offset in link_map. */
6771 12, /* l_next offset in link_map. */
6772 16 /* l_prev offset in link_map. */
6773 };
6774
6775 static const struct link_map_offsets lmo_64bit_offsets =
6776 {
6777 0, /* r_version offset. */
6778 8, /* r_debug.r_map offset. */
6779 0, /* l_addr offset in link_map. */
6780 8, /* l_name offset in link_map. */
6781 16, /* l_ld offset in link_map. */
6782 24, /* l_next offset in link_map. */
6783 32 /* l_prev offset in link_map. */
6784 };
6785 const struct link_map_offsets *lmo;
214d508e 6786 unsigned int machine;
b1fbec62
GB
6787 int ptr_size;
6788 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6789 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6790 int header_done = 0;
2268b414
JK
6791
6792 if (writebuf != NULL)
6793 return -2;
6794 if (readbuf == NULL)
6795 return -1;
6796
0bfdf32f 6797 pid = lwpid_of (current_thread);
2268b414 6798 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6799 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6800 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6801 ptr_size = is_elf64 ? 8 : 4;
2268b414 6802
b1fbec62
GB
6803 while (annex[0] != '\0')
6804 {
6805 const char *sep;
6806 CORE_ADDR *addrp;
da4ae14a 6807 int name_len;
2268b414 6808
b1fbec62
GB
6809 sep = strchr (annex, '=');
6810 if (sep == NULL)
6811 break;
0c5bf5a9 6812
da4ae14a
TT
6813 name_len = sep - annex;
6814 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6815 addrp = &lm_addr;
da4ae14a 6816 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6817 addrp = &lm_prev;
6818 else
6819 {
6820 annex = strchr (sep, ';');
6821 if (annex == NULL)
6822 break;
6823 annex++;
6824 continue;
6825 }
6826
6827 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6828 }
b1fbec62
GB
6829
6830 if (lm_addr == 0)
2268b414 6831 {
b1fbec62
GB
6832 int r_version = 0;
6833
6834 if (priv->r_debug == 0)
6835 priv->r_debug = get_r_debug (pid, is_elf64);
6836
6837 /* We failed to find DT_DEBUG. Such situation will not change
6838 for this inferior - do not retry it. Report it to GDB as
6839 E01, see for the reasons at the GDB solib-svr4.c side. */
6840 if (priv->r_debug == (CORE_ADDR) -1)
6841 return -1;
6842
6843 if (priv->r_debug != 0)
2268b414 6844 {
b1fbec62
GB
6845 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6846 (unsigned char *) &r_version,
6847 sizeof (r_version)) != 0
6848 || r_version != 1)
6849 {
6850 warning ("unexpected r_debug version %d", r_version);
6851 }
6852 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6853 &lm_addr, ptr_size) != 0)
6854 {
6855 warning ("unable to read r_map from 0x%lx",
6856 (long) priv->r_debug + lmo->r_map_offset);
6857 }
2268b414 6858 }
b1fbec62 6859 }
2268b414 6860
f6e8a41e 6861 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6862
6863 while (lm_addr
6864 && read_one_ptr (lm_addr + lmo->l_name_offset,
6865 &l_name, ptr_size) == 0
6866 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6867 &l_addr, ptr_size) == 0
6868 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6869 &l_ld, ptr_size) == 0
6870 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6871 &l_prev, ptr_size) == 0
6872 && read_one_ptr (lm_addr + lmo->l_next_offset,
6873 &l_next, ptr_size) == 0)
6874 {
6875 unsigned char libname[PATH_MAX];
6876
6877 if (lm_prev != l_prev)
2268b414 6878 {
b1fbec62
GB
6879 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6880 (long) lm_prev, (long) l_prev);
6881 break;
2268b414
JK
6882 }
6883
d878444c
JK
6884 /* Ignore the first entry even if it has valid name as the first entry
6885 corresponds to the main executable. The first entry should not be
6886 skipped if the dynamic loader was loaded late by a static executable
6887 (see solib-svr4.c parameter ignore_first). But in such case the main
6888 executable does not have PT_DYNAMIC present and this function already
6889 exited above due to failed get_r_debug. */
6890 if (lm_prev == 0)
f6e8a41e 6891 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
6892 else
6893 {
6894 /* Not checking for error because reading may stop before
6895 we've got PATH_MAX worth of characters. */
6896 libname[0] = '\0';
6897 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6898 libname[sizeof (libname) - 1] = '\0';
6899 if (libname[0] != '\0')
2268b414 6900 {
d878444c
JK
6901 if (!header_done)
6902 {
6903 /* Terminate `<library-list-svr4'. */
f6e8a41e 6904 document += '>';
d878444c
JK
6905 header_done = 1;
6906 }
2268b414 6907
e6a58aa8
SM
6908 string_appendf (document, "<library name=\"");
6909 xml_escape_text_append (&document, (char *) libname);
6910 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 6911 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
6912 (unsigned long) lm_addr, (unsigned long) l_addr,
6913 (unsigned long) l_ld);
d878444c 6914 }
0afae3cf 6915 }
b1fbec62
GB
6916
6917 lm_prev = lm_addr;
6918 lm_addr = l_next;
2268b414
JK
6919 }
6920
b1fbec62
GB
6921 if (!header_done)
6922 {
6923 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 6924 document += "/>";
b1fbec62
GB
6925 }
6926 else
f6e8a41e 6927 document += "</library-list-svr4>";
b1fbec62 6928
f6e8a41e 6929 int document_len = document.length ();
2268b414
JK
6930 if (offset < document_len)
6931 document_len -= offset;
6932 else
6933 document_len = 0;
6934 if (len > document_len)
6935 len = document_len;
6936
f6e8a41e 6937 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6938
6939 return len;
6940}
6941
9accd112
MM
6942#ifdef HAVE_LINUX_BTRACE
6943
79597bdd
TBA
6944btrace_target_info *
6945linux_process_target::enable_btrace (ptid_t ptid,
6946 const btrace_config *conf)
6947{
6948 return linux_enable_btrace (ptid, conf);
6949}
6950
969c39fb 6951/* See to_disable_btrace target method. */
9accd112 6952
79597bdd
TBA
6953int
6954linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6955{
6956 enum btrace_error err;
6957
6958 err = linux_disable_btrace (tinfo);
6959 return (err == BTRACE_ERR_NONE ? 0 : -1);
6960}
6961
bc504a31 6962/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6963
6964static void
6965linux_low_encode_pt_config (struct buffer *buffer,
6966 const struct btrace_data_pt_config *config)
6967{
6968 buffer_grow_str (buffer, "<pt-config>\n");
6969
6970 switch (config->cpu.vendor)
6971 {
6972 case CV_INTEL:
6973 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6974 "model=\"%u\" stepping=\"%u\"/>\n",
6975 config->cpu.family, config->cpu.model,
6976 config->cpu.stepping);
6977 break;
6978
6979 default:
6980 break;
6981 }
6982
6983 buffer_grow_str (buffer, "</pt-config>\n");
6984}
6985
6986/* Encode a raw buffer. */
6987
6988static void
6989linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6990 unsigned int size)
6991{
6992 if (size == 0)
6993 return;
6994
268a13a5 6995 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
6996 buffer_grow_str (buffer, "<raw>\n");
6997
6998 while (size-- > 0)
6999 {
7000 char elem[2];
7001
7002 elem[0] = tohex ((*data >> 4) & 0xf);
7003 elem[1] = tohex (*data++ & 0xf);
7004
7005 buffer_grow (buffer, elem, 2);
7006 }
7007
7008 buffer_grow_str (buffer, "</raw>\n");
7009}
7010
969c39fb
MM
7011/* See to_read_btrace target method. */
7012
79597bdd
TBA
7013int
7014linux_process_target::read_btrace (btrace_target_info *tinfo,
7015 buffer *buffer,
7016 enum btrace_read_type type)
9accd112 7017{
734b0e4b 7018 struct btrace_data btrace;
969c39fb 7019 enum btrace_error err;
9accd112 7020
969c39fb
MM
7021 err = linux_read_btrace (&btrace, tinfo, type);
7022 if (err != BTRACE_ERR_NONE)
7023 {
7024 if (err == BTRACE_ERR_OVERFLOW)
7025 buffer_grow_str0 (buffer, "E.Overflow.");
7026 else
7027 buffer_grow_str0 (buffer, "E.Generic Error.");
7028
8dcc53b3 7029 return -1;
969c39fb 7030 }
9accd112 7031
734b0e4b
MM
7032 switch (btrace.format)
7033 {
7034 case BTRACE_FORMAT_NONE:
7035 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7036 return -1;
734b0e4b
MM
7037
7038 case BTRACE_FORMAT_BTS:
7039 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7040 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7041
46f29a9a 7042 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7043 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7044 paddress (block.begin), paddress (block.end));
9accd112 7045
734b0e4b
MM
7046 buffer_grow_str0 (buffer, "</btrace>\n");
7047 break;
7048
b20a6524
MM
7049 case BTRACE_FORMAT_PT:
7050 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7051 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7052 buffer_grow_str (buffer, "<pt>\n");
7053
7054 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7055
b20a6524
MM
7056 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7057 btrace.variant.pt.size);
7058
7059 buffer_grow_str (buffer, "</pt>\n");
7060 buffer_grow_str0 (buffer, "</btrace>\n");
7061 break;
7062
7063 default:
7064 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7065 return -1;
734b0e4b 7066 }
969c39fb
MM
7067
7068 return 0;
9accd112 7069}
f4abbc16
MM
7070
7071/* See to_btrace_conf target method. */
7072
79597bdd
TBA
7073int
7074linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7075 buffer *buffer)
f4abbc16
MM
7076{
7077 const struct btrace_config *conf;
7078
7079 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7080 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7081
7082 conf = linux_btrace_conf (tinfo);
7083 if (conf != NULL)
7084 {
7085 switch (conf->format)
7086 {
7087 case BTRACE_FORMAT_NONE:
7088 break;
7089
7090 case BTRACE_FORMAT_BTS:
d33501a5
MM
7091 buffer_xml_printf (buffer, "<bts");
7092 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7093 buffer_xml_printf (buffer, " />\n");
f4abbc16 7094 break;
b20a6524
MM
7095
7096 case BTRACE_FORMAT_PT:
7097 buffer_xml_printf (buffer, "<pt");
7098 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7099 buffer_xml_printf (buffer, "/>\n");
7100 break;
f4abbc16
MM
7101 }
7102 }
7103
7104 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7105 return 0;
7106}
9accd112
MM
7107#endif /* HAVE_LINUX_BTRACE */
7108
7b669087
GB
7109/* See nat/linux-nat.h. */
7110
7111ptid_t
7112current_lwp_ptid (void)
7113{
7114 return ptid_of (current_thread);
7115}
7116
7f63b89b
TBA
7117const char *
7118linux_process_target::thread_name (ptid_t thread)
7119{
7120 return linux_proc_tid_get_name (thread);
7121}
7122
7123#if USE_THREAD_DB
7124bool
7125linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7126 int *handle_len)
7127{
7128 return thread_db_thread_handle (ptid, handle, handle_len);
7129}
7130#endif
7131
276d4552
YQ
7132/* Default implementation of linux_target_ops method "set_pc" for
7133 32-bit pc register which is literally named "pc". */
7134
7135void
7136linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7137{
7138 uint32_t newpc = pc;
7139
7140 supply_register_by_name (regcache, "pc", &newpc);
7141}
7142
7143/* Default implementation of linux_target_ops method "get_pc" for
7144 32-bit pc register which is literally named "pc". */
7145
7146CORE_ADDR
7147linux_get_pc_32bit (struct regcache *regcache)
7148{
7149 uint32_t pc;
7150
7151 collect_register_by_name (regcache, "pc", &pc);
7152 if (debug_threads)
7153 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7154 return pc;
7155}
7156
6f69e520
YQ
7157/* Default implementation of linux_target_ops method "set_pc" for
7158 64-bit pc register which is literally named "pc". */
7159
7160void
7161linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7162{
7163 uint64_t newpc = pc;
7164
7165 supply_register_by_name (regcache, "pc", &newpc);
7166}
7167
7168/* Default implementation of linux_target_ops method "get_pc" for
7169 64-bit pc register which is literally named "pc". */
7170
7171CORE_ADDR
7172linux_get_pc_64bit (struct regcache *regcache)
7173{
7174 uint64_t pc;
7175
7176 collect_register_by_name (regcache, "pc", &pc);
7177 if (debug_threads)
7178 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7179 return pc;
7180}
7181
0570503d 7182/* See linux-low.h. */
974c89e0 7183
0570503d
PFC
7184int
7185linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7186{
7187 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7188 int offset = 0;
7189
7190 gdb_assert (wordsize == 4 || wordsize == 8);
7191
52405d85 7192 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7193 {
7194 if (wordsize == 4)
7195 {
0570503d 7196 uint32_t *data_p = (uint32_t *) data;
974c89e0 7197 if (data_p[0] == match)
0570503d
PFC
7198 {
7199 *valp = data_p[1];
7200 return 1;
7201 }
974c89e0
AH
7202 }
7203 else
7204 {
0570503d 7205 uint64_t *data_p = (uint64_t *) data;
974c89e0 7206 if (data_p[0] == match)
0570503d
PFC
7207 {
7208 *valp = data_p[1];
7209 return 1;
7210 }
974c89e0
AH
7211 }
7212
7213 offset += 2 * wordsize;
7214 }
7215
7216 return 0;
7217}
7218
7219/* See linux-low.h. */
7220
7221CORE_ADDR
7222linux_get_hwcap (int wordsize)
7223{
0570503d
PFC
7224 CORE_ADDR hwcap = 0;
7225 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7226 return hwcap;
974c89e0
AH
7227}
7228
7229/* See linux-low.h. */
7230
7231CORE_ADDR
7232linux_get_hwcap2 (int wordsize)
7233{
0570503d
PFC
7234 CORE_ADDR hwcap2 = 0;
7235 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7236 return hwcap2;
974c89e0 7237}
6f69e520 7238
3aee8918
PA
7239#ifdef HAVE_LINUX_REGSETS
7240void
7241initialize_regsets_info (struct regsets_info *info)
7242{
7243 for (info->num_regsets = 0;
7244 info->regsets[info->num_regsets].size >= 0;
7245 info->num_regsets++)
7246 ;
3aee8918
PA
7247}
7248#endif
7249
da6d8c04
DJ
7250void
7251initialize_low (void)
7252{
bd99dc85 7253 struct sigaction sigchld_action;
dd373349 7254
bd99dc85 7255 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7256 set_target_ops (the_linux_target);
dd373349 7257
aa7c7447 7258 linux_ptrace_init_warnings ();
1b919490 7259 linux_proc_init_warnings ();
bd99dc85
PA
7260
7261 sigchld_action.sa_handler = sigchld_handler;
7262 sigemptyset (&sigchld_action.sa_mask);
7263 sigchld_action.sa_flags = SA_RESTART;
7264 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7265
7266 initialize_low_arch ();
89245bc0
DB
7267
7268 linux_check_ptrace_features ();
da6d8c04 7269}
This page took 2.162157 seconds and 4 git commands to generate.