gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
b811d2c2 2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
69f4c9cc
AH
74#ifndef AT_HWCAP2
75#define AT_HWCAP2 26
76#endif
77
db0dfaa0
LM
78/* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81#if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84#if defined(__mcoldfire__)
85/* These are still undefined in 3.10 kernels. */
86#define PT_TEXT_ADDR 49*4
87#define PT_DATA_ADDR 50*4
88#define PT_TEXT_END_ADDR 51*4
89/* BFIN already defines these since at least 2.6.32 kernels. */
90#elif defined(BFIN)
91#define PT_TEXT_ADDR 220
92#define PT_TEXT_END_ADDR 224
93#define PT_DATA_ADDR 228
94/* These are still undefined in 3.10 kernels. */
95#elif defined(__TMS320C6X__)
96#define PT_TEXT_ADDR (0x10000*4)
97#define PT_DATA_ADDR (0x10004*4)
98#define PT_TEXT_END_ADDR (0x10008*4)
99#endif
100#endif
101
5203ae1e
TBA
102#if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107#define SUPPORTS_READ_OFFSETS
108#endif
109
9accd112 110#ifdef HAVE_LINUX_BTRACE
125f8a3d 111# include "nat/linux-btrace.h"
268a13a5 112# include "gdbsupport/btrace-common.h"
9accd112
MM
113#endif
114
8365dcf5
TJB
115#ifndef HAVE_ELF32_AUXV_T
116/* Copied from glibc's elf.h. */
117typedef struct
118{
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127} Elf32_auxv_t;
128#endif
129
130#ifndef HAVE_ELF64_AUXV_T
131/* Copied from glibc's elf.h. */
132typedef struct
133{
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142} Elf64_auxv_t;
143#endif
144
ded48a5e
YQ
145/* Does the current host support PTRACE_GETREGSET? */
146int have_ptrace_getregset = -1;
147
cff068da
GB
148/* LWP accessors. */
149
150/* See nat/linux-nat.h. */
151
152ptid_t
153ptid_of_lwp (struct lwp_info *lwp)
154{
155 return ptid_of (get_lwp_thread (lwp));
156}
157
158/* See nat/linux-nat.h. */
159
4b134ca1
GB
160void
161lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163{
164 lwp->arch_private = info;
165}
166
167/* See nat/linux-nat.h. */
168
169struct arch_lwp_info *
170lwp_arch_private_info (struct lwp_info *lwp)
171{
172 return lwp->arch_private;
173}
174
175/* See nat/linux-nat.h. */
176
cff068da
GB
177int
178lwp_is_stopped (struct lwp_info *lwp)
179{
180 return lwp->stopped;
181}
182
183/* See nat/linux-nat.h. */
184
185enum target_stop_reason
186lwp_stop_reason (struct lwp_info *lwp)
187{
188 return lwp->stop_reason;
189}
190
0e00e962
AA
191/* See nat/linux-nat.h. */
192
193int
194lwp_is_stepping (struct lwp_info *lwp)
195{
196 return lwp->stepping;
197}
198
05044653
PA
199/* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
24a09b5f 202
05044653
PA
203struct simple_pid_list
204{
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
216/* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219static void
220add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221{
8d749320 222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228}
229
230static int
231pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232{
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246}
24a09b5f 247
bde24c0a
PA
248enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260/* This is set while stop_all_lwps is in effect. */
261enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
262
263/* FIXME make into a target method? */
24a09b5f 264int using_threads = 1;
24a09b5f 265
fa593d66
PA
266/* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268static int stabilizing_threads;
269
f50bf8e5 270static void unsuspend_all_lwps (struct lwp_info *except);
95954743 271static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 272static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 273static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 274static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 275static int linux_low_ptrace_options (int attached);
ced2dffb 276static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 277
582511be
PA
278/* When the event-loop is doing a step-over, this points at the thread
279 being stepped. */
280ptid_t step_over_bkpt;
281
bf9ae9d8
TBA
282bool
283linux_process_target::low_supports_breakpoints ()
284{
285 return false;
286}
d50171e4 287
bf9ae9d8
TBA
288CORE_ADDR
289linux_process_target::low_get_pc (regcache *regcache)
290{
291 return 0;
292}
293
294void
295linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 296{
bf9ae9d8 297 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 298}
0d62e5e8 299
7582c77c
TBA
300std::vector<CORE_ADDR>
301linux_process_target::low_get_next_pcs (regcache *regcache)
302{
303 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
304 "implemented");
305}
306
d4807ea2
TBA
307int
308linux_process_target::low_decr_pc_after_break ()
309{
310 return 0;
311}
312
c2d6af84
PA
313/* True if LWP is stopped in its stepping range. */
314
315static int
316lwp_in_step_range (struct lwp_info *lwp)
317{
318 CORE_ADDR pc = lwp->stop_pc;
319
320 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
321}
322
0d62e5e8
DJ
323struct pending_signals
324{
325 int signal;
32ca6d61 326 siginfo_t info;
0d62e5e8
DJ
327 struct pending_signals *prev;
328};
611cb4a5 329
bd99dc85
PA
330/* The read/write ends of the pipe registered as waitable file in the
331 event loop. */
332static int linux_event_pipe[2] = { -1, -1 };
333
334/* True if we're currently in async mode. */
335#define target_is_async_p() (linux_event_pipe[0] != -1)
336
02fc4de7 337static void send_sigstop (struct lwp_info *lwp);
bd99dc85 338
d0722149
DE
339/* Return non-zero if HEADER is a 64-bit ELF file. */
340
341static int
214d508e 342elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 343{
214d508e
L
344 if (header->e_ident[EI_MAG0] == ELFMAG0
345 && header->e_ident[EI_MAG1] == ELFMAG1
346 && header->e_ident[EI_MAG2] == ELFMAG2
347 && header->e_ident[EI_MAG3] == ELFMAG3)
348 {
349 *machine = header->e_machine;
350 return header->e_ident[EI_CLASS] == ELFCLASS64;
351
352 }
353 *machine = EM_NONE;
354 return -1;
d0722149
DE
355}
356
357/* Return non-zero if FILE is a 64-bit ELF file,
358 zero if the file is not a 64-bit ELF file,
359 and -1 if the file is not accessible or doesn't exist. */
360
be07f1a2 361static int
214d508e 362elf_64_file_p (const char *file, unsigned int *machine)
d0722149 363{
957f3f49 364 Elf64_Ehdr header;
d0722149
DE
365 int fd;
366
367 fd = open (file, O_RDONLY);
368 if (fd < 0)
369 return -1;
370
371 if (read (fd, &header, sizeof (header)) != sizeof (header))
372 {
373 close (fd);
374 return 0;
375 }
376 close (fd);
377
214d508e 378 return elf_64_header_p (&header, machine);
d0722149
DE
379}
380
be07f1a2
PA
381/* Accepts an integer PID; Returns true if the executable PID is
382 running is a 64-bit ELF file.. */
383
384int
214d508e 385linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 386{
d8d2a3ee 387 char file[PATH_MAX];
be07f1a2
PA
388
389 sprintf (file, "/proc/%d/exe", pid);
214d508e 390 return elf_64_file_p (file, machine);
be07f1a2
PA
391}
392
fd000fb3
TBA
393void
394linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 395{
fa96cb38
PA
396 struct thread_info *thr = get_lwp_thread (lwp);
397
398 if (debug_threads)
399 debug_printf ("deleting %ld\n", lwpid_of (thr));
400
401 remove_thread (thr);
466eecee 402
fd000fb3 403 low_delete_thread (lwp->arch_private);
466eecee 404
bd99dc85
PA
405 free (lwp);
406}
407
fd000fb3
TBA
408void
409linux_process_target::low_delete_thread (arch_lwp_info *info)
410{
411 /* Default implementation should be overridden if architecture-specific
412 info is being used. */
413 gdb_assert (info == nullptr);
414}
95954743 415
fd000fb3
TBA
416process_info *
417linux_process_target::add_linux_process (int pid, int attached)
95954743
PA
418{
419 struct process_info *proc;
420
95954743 421 proc = add_process (pid, attached);
8d749320 422 proc->priv = XCNEW (struct process_info_private);
95954743 423
fd000fb3 424 proc->priv->arch_private = low_new_process ();
aa5ca48f 425
95954743
PA
426 return proc;
427}
428
fd000fb3
TBA
429arch_process_info *
430linux_process_target::low_new_process ()
431{
432 return nullptr;
433}
434
435void
436linux_process_target::low_delete_process (arch_process_info *info)
437{
438 /* Default implementation must be overridden if architecture-specific
439 info exists. */
440 gdb_assert (info == nullptr);
441}
442
443void
444linux_process_target::low_new_fork (process_info *parent, process_info *child)
445{
446 /* Nop. */
447}
448
797bcff5
TBA
449void
450linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
451{
452 struct thread_info *saved_thread;
453
454 saved_thread = current_thread;
455 current_thread = thread;
456
797bcff5 457 low_arch_setup ();
94585166
DB
458
459 current_thread = saved_thread;
460}
461
d16f3f6c
TBA
462int
463linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
464 int wstat)
24a09b5f 465{
c12a5089 466 client_state &cs = get_client_state ();
94585166 467 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 468 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 469 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 470 struct lwp_info *new_lwp;
24a09b5f 471
65706a29
PA
472 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
473
82075af2
JS
474 /* All extended events we currently use are mid-syscall. Only
475 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
476 you have to be using PTRACE_SEIZE to get that. */
477 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
478
c269dbdb
DB
479 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
480 || (event == PTRACE_EVENT_CLONE))
24a09b5f 481 {
95954743 482 ptid_t ptid;
24a09b5f 483 unsigned long new_pid;
05044653 484 int ret, status;
24a09b5f 485
de0d863e 486 /* Get the pid of the new lwp. */
d86d4aaf 487 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 488 &new_pid);
24a09b5f
DJ
489
490 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 491 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
492 {
493 /* The new child has a pending SIGSTOP. We can't affect it until it
494 hits the SIGSTOP, but we're already attached. */
495
97438e3f 496 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
497
498 if (ret == -1)
499 perror_with_name ("waiting for new child");
500 else if (ret != new_pid)
501 warning ("wait returned unexpected PID %d", ret);
da5898ce 502 else if (!WIFSTOPPED (status))
24a09b5f
DJ
503 warning ("wait returned unexpected status 0x%x", status);
504 }
505
c269dbdb 506 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
507 {
508 struct process_info *parent_proc;
509 struct process_info *child_proc;
510 struct lwp_info *child_lwp;
bfacd19d 511 struct thread_info *child_thr;
de0d863e
DB
512 struct target_desc *tdesc;
513
fd79271b 514 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
515
516 if (debug_threads)
517 {
518 debug_printf ("HEW: Got fork event from LWP %ld, "
519 "new child is %d\n",
e38504b3 520 ptid_of (event_thr).lwp (),
e99b03dc 521 ptid.pid ());
de0d863e
DB
522 }
523
524 /* Add the new process to the tables and clone the breakpoint
525 lists of the parent. We need to do this even if the new process
526 will be detached, since we will need the process object and the
527 breakpoints to remove any breakpoints from memory when we
528 detach, and the client side will access registers. */
fd000fb3 529 child_proc = add_linux_process (new_pid, 0);
de0d863e
DB
530 gdb_assert (child_proc != NULL);
531 child_lwp = add_lwp (ptid);
532 gdb_assert (child_lwp != NULL);
533 child_lwp->stopped = 1;
bfacd19d
DB
534 child_lwp->must_set_ptrace_flags = 1;
535 child_lwp->status_pending_p = 0;
536 child_thr = get_lwp_thread (child_lwp);
537 child_thr->last_resume_kind = resume_stop;
998d452a
PA
538 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
539
863d01bd 540 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
541 too. If the fork/clone parent is stepping over a breakpoint,
542 all other threads have been suspended already. Leave the
543 child suspended too. */
544 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
545 || event_lwp->bp_reinsert != 0)
863d01bd
PA
546 {
547 if (debug_threads)
548 debug_printf ("HEW: leaving child suspended\n");
549 child_lwp->suspended = 1;
550 }
551
de0d863e
DB
552 parent_proc = get_thread_process (event_thr);
553 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
554
555 if (event_lwp->bp_reinsert != 0
7582c77c 556 && supports_software_single_step ()
2e7b624b
YQ
557 && event == PTRACE_EVENT_VFORK)
558 {
3b9a79ef
YQ
559 /* If we leave single-step breakpoints there, child will
560 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
561 (and child). Once vfork child is done, reinsert
562 them back to parent. */
3b9a79ef 563 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
564 }
565
63c40ec7 566 clone_all_breakpoints (child_thr, event_thr);
de0d863e 567
cc397f3a 568 tdesc = allocate_target_description ();
de0d863e
DB
569 copy_target_description (tdesc, parent_proc->tdesc);
570 child_proc->tdesc = tdesc;
de0d863e 571
3a8a0396 572 /* Clone arch-specific process data. */
fd000fb3 573 low_new_fork (parent_proc, child_proc);
3a8a0396 574
de0d863e 575 /* Save fork info in the parent thread. */
c269dbdb
DB
576 if (event == PTRACE_EVENT_FORK)
577 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
578 else if (event == PTRACE_EVENT_VFORK)
579 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
580
de0d863e 581 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 582
de0d863e
DB
583 /* The status_pending field contains bits denoting the
584 extended event, so when the pending event is handled,
585 the handler will look at lwp->waitstatus. */
586 event_lwp->status_pending_p = 1;
587 event_lwp->status_pending = wstat;
588
5a04c4cf
PA
589 /* Link the threads until the parent event is passed on to
590 higher layers. */
591 event_lwp->fork_relative = child_lwp;
592 child_lwp->fork_relative = event_lwp;
593
3b9a79ef
YQ
594 /* If the parent thread is doing step-over with single-step
595 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
596 from the parent's. Remove them from the child process.
597 In case of vfork, we'll reinsert them back once vforked
598 child is done. */
8a81c5d7 599 if (event_lwp->bp_reinsert != 0
7582c77c 600 && supports_software_single_step ())
8a81c5d7 601 {
8a81c5d7
YQ
602 /* The child process is forked and stopped, so it is safe
603 to access its memory without stopping all other threads
604 from other processes. */
3b9a79ef 605 delete_single_step_breakpoints (child_thr);
8a81c5d7 606
3b9a79ef
YQ
607 gdb_assert (has_single_step_breakpoints (event_thr));
608 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
609 }
610
de0d863e
DB
611 /* Report the event. */
612 return 0;
613 }
614
fa96cb38
PA
615 if (debug_threads)
616 debug_printf ("HEW: Got clone event "
617 "from LWP %ld, new child is LWP %ld\n",
618 lwpid_of (event_thr), new_pid);
619
fd79271b 620 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 621 new_lwp = add_lwp (ptid);
24a09b5f 622
e27d73f6 623 /* Either we're going to immediately resume the new thread
df95181f 624 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 625 thinks the thread is currently running, so set this first
df95181f 626 before calling resume_one_lwp. */
e27d73f6
DE
627 new_lwp->stopped = 1;
628
0f8288ae
YQ
629 /* If we're suspending all threads, leave this one suspended
630 too. If the fork/clone parent is stepping over a breakpoint,
631 all other threads have been suspended already. Leave the
632 child suspended too. */
633 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
634 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
635 new_lwp->suspended = 1;
636
da5898ce
DJ
637 /* Normally we will get the pending SIGSTOP. But in some cases
638 we might get another signal delivered to the group first.
f21cc1a2 639 If we do get another signal, be sure not to lose it. */
20ba1ce6 640 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 641 {
54a0b537 642 new_lwp->stop_expected = 1;
20ba1ce6
PA
643 new_lwp->status_pending_p = 1;
644 new_lwp->status_pending = status;
da5898ce 645 }
c12a5089 646 else if (cs.report_thread_events)
65706a29
PA
647 {
648 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
649 new_lwp->status_pending_p = 1;
650 new_lwp->status_pending = status;
651 }
de0d863e 652
a0aad537 653#ifdef USE_THREAD_DB
94c207e0 654 thread_db_notice_clone (event_thr, ptid);
a0aad537 655#endif
86299109 656
de0d863e
DB
657 /* Don't report the event. */
658 return 1;
24a09b5f 659 }
c269dbdb
DB
660 else if (event == PTRACE_EVENT_VFORK_DONE)
661 {
662 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
663
7582c77c 664 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 665 {
3b9a79ef 666 reinsert_single_step_breakpoints (event_thr);
2e7b624b 667
3b9a79ef 668 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
669 }
670
c269dbdb
DB
671 /* Report the event. */
672 return 0;
673 }
c12a5089 674 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
675 {
676 struct process_info *proc;
f27866ba 677 std::vector<int> syscalls_to_catch;
94585166
DB
678 ptid_t event_ptid;
679 pid_t event_pid;
680
681 if (debug_threads)
682 {
683 debug_printf ("HEW: Got exec event from LWP %ld\n",
684 lwpid_of (event_thr));
685 }
686
687 /* Get the event ptid. */
688 event_ptid = ptid_of (event_thr);
e99b03dc 689 event_pid = event_ptid.pid ();
94585166 690
82075af2 691 /* Save the syscall list from the execing process. */
94585166 692 proc = get_thread_process (event_thr);
f27866ba 693 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
694
695 /* Delete the execing process and all its threads. */
d16f3f6c 696 mourn (proc);
94585166
DB
697 current_thread = NULL;
698
699 /* Create a new process/lwp/thread. */
fd000fb3 700 proc = add_linux_process (event_pid, 0);
94585166
DB
701 event_lwp = add_lwp (event_ptid);
702 event_thr = get_lwp_thread (event_lwp);
703 gdb_assert (current_thread == event_thr);
797bcff5 704 arch_setup_thread (event_thr);
94585166
DB
705
706 /* Set the event status. */
707 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
708 event_lwp->waitstatus.value.execd_pathname
709 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
710
711 /* Mark the exec status as pending. */
712 event_lwp->stopped = 1;
713 event_lwp->status_pending_p = 1;
714 event_lwp->status_pending = wstat;
715 event_thr->last_resume_kind = resume_continue;
716 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
717
82075af2
JS
718 /* Update syscall state in the new lwp, effectively mid-syscall too. */
719 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
720
721 /* Restore the list to catch. Don't rely on the client, which is free
722 to avoid sending a new list when the architecture doesn't change.
723 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 724 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 725
94585166
DB
726 /* Report the event. */
727 *orig_event_lwp = event_lwp;
728 return 0;
729 }
de0d863e
DB
730
731 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
732}
733
df95181f
TBA
734CORE_ADDR
735linux_process_target::get_pc (lwp_info *lwp)
d50171e4 736{
0bfdf32f 737 struct thread_info *saved_thread;
d50171e4
PA
738 struct regcache *regcache;
739 CORE_ADDR pc;
740
bf9ae9d8 741 if (!low_supports_breakpoints ())
d50171e4
PA
742 return 0;
743
0bfdf32f
GB
744 saved_thread = current_thread;
745 current_thread = get_lwp_thread (lwp);
d50171e4 746
0bfdf32f 747 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 748 pc = low_get_pc (regcache);
d50171e4
PA
749
750 if (debug_threads)
87ce2a04 751 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 752
0bfdf32f 753 current_thread = saved_thread;
d50171e4
PA
754 return pc;
755}
756
9eedd27d
TBA
757void
758linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
82075af2
JS
759{
760 struct thread_info *saved_thread;
761 struct regcache *regcache;
762
82075af2
JS
763 saved_thread = current_thread;
764 current_thread = get_lwp_thread (lwp);
765
766 regcache = get_thread_regcache (current_thread, 1);
9eedd27d 767 low_get_syscall_trapinfo (regcache, sysno);
82075af2
JS
768
769 if (debug_threads)
4cc32bec 770 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
771
772 current_thread = saved_thread;
773}
774
9eedd27d
TBA
775void
776linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
777{
778 /* By default, report an unknown system call number. */
779 *sysno = UNKNOWN_SYSCALL;
780}
781
df95181f
TBA
782bool
783linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 784{
582511be
PA
785 CORE_ADDR pc;
786 CORE_ADDR sw_breakpoint_pc;
787 struct thread_info *saved_thread;
3e572f71
PA
788#if USE_SIGTRAP_SIGINFO
789 siginfo_t siginfo;
790#endif
d50171e4 791
bf9ae9d8 792 if (!low_supports_breakpoints ())
df95181f 793 return false;
0d62e5e8 794
582511be 795 pc = get_pc (lwp);
d4807ea2 796 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 797
582511be
PA
798 /* breakpoint_at reads from the current thread. */
799 saved_thread = current_thread;
800 current_thread = get_lwp_thread (lwp);
47c0c975 801
3e572f71
PA
802#if USE_SIGTRAP_SIGINFO
803 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
804 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
805 {
806 if (siginfo.si_signo == SIGTRAP)
807 {
e7ad2f14
PA
808 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
809 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 810 {
e7ad2f14
PA
811 /* The si_code is ambiguous on this arch -- check debug
812 registers. */
813 if (!check_stopped_by_watchpoint (lwp))
814 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
815 }
816 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
817 {
818 /* If we determine the LWP stopped for a SW breakpoint,
819 trust it. Particularly don't check watchpoint
820 registers, because at least on s390, we'd find
821 stopped-by-watchpoint as long as there's a watchpoint
822 set. */
3e572f71 823 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 824 }
e7ad2f14 825 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 826 {
e7ad2f14
PA
827 /* This can indicate either a hardware breakpoint or
828 hardware watchpoint. Check debug registers. */
829 if (!check_stopped_by_watchpoint (lwp))
830 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 831 }
2bf6fb9d
PA
832 else if (siginfo.si_code == TRAP_TRACE)
833 {
e7ad2f14
PA
834 /* We may have single stepped an instruction that
835 triggered a watchpoint. In that case, on some
836 architectures (such as x86), instead of TRAP_HWBKPT,
837 si_code indicates TRAP_TRACE, and we need to check
838 the debug registers separately. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 841 }
3e572f71
PA
842 }
843 }
844#else
582511be
PA
845 /* We may have just stepped a breakpoint instruction. E.g., in
846 non-stop mode, GDB first tells the thread A to step a range, and
847 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
848 case we need to report the breakpoint PC. */
849 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 850 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
851 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
852
853 if (hardware_breakpoint_inserted_here (pc))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855
856 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
857 check_stopped_by_watchpoint (lwp);
858#endif
859
860 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
861 {
862 if (debug_threads)
863 {
864 struct thread_info *thr = get_lwp_thread (lwp);
865
866 debug_printf ("CSBB: %s stopped by software breakpoint\n",
867 target_pid_to_str (ptid_of (thr)));
868 }
869
870 /* Back up the PC if necessary. */
871 if (pc != sw_breakpoint_pc)
e7ad2f14 872 {
582511be
PA
873 struct regcache *regcache
874 = get_thread_regcache (current_thread, 1);
bf9ae9d8 875 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
876 }
877
e7ad2f14
PA
878 /* Update this so we record the correct stop PC below. */
879 pc = sw_breakpoint_pc;
582511be 880 }
e7ad2f14 881 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
882 {
883 if (debug_threads)
884 {
885 struct thread_info *thr = get_lwp_thread (lwp);
886
887 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
888 target_pid_to_str (ptid_of (thr)));
889 }
e7ad2f14
PA
890 }
891 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
892 {
893 if (debug_threads)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 896
e7ad2f14
PA
897 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
898 target_pid_to_str (ptid_of (thr)));
899 }
582511be 900 }
e7ad2f14
PA
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
582511be 906
e7ad2f14
PA
907 debug_printf ("CSBB: %s stopped by trace\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911
912 lwp->stop_pc = pc;
582511be 913 current_thread = saved_thread;
df95181f 914 return true;
0d62e5e8 915}
ce3a066d 916
fd000fb3
TBA
917lwp_info *
918linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 919{
54a0b537 920 struct lwp_info *lwp;
0d62e5e8 921
8d749320 922 lwp = XCNEW (struct lwp_info);
00db26fa
PA
923
924 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 925
754e3168
AH
926 lwp->thread = add_thread (ptid, lwp);
927
fd000fb3 928 low_new_thread (lwp);
aa5ca48f 929
54a0b537 930 return lwp;
0d62e5e8 931}
611cb4a5 932
fd000fb3
TBA
933void
934linux_process_target::low_new_thread (lwp_info *info)
935{
936 /* Nop. */
937}
938
2090129c
SDJ
939/* Callback to be used when calling fork_inferior, responsible for
940 actually initiating the tracing of the inferior. */
941
942static void
943linux_ptrace_fun ()
944{
945 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
946 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 947 trace_start_error_with_name ("ptrace");
2090129c
SDJ
948
949 if (setpgid (0, 0) < 0)
950 trace_start_error_with_name ("setpgid");
951
952 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
953 stdout to stderr so that inferior i/o doesn't corrupt the connection.
954 Also, redirect stdin to /dev/null. */
955 if (remote_connection_is_stdio ())
956 {
957 if (close (0) < 0)
958 trace_start_error_with_name ("close");
959 if (open ("/dev/null", O_RDONLY) < 0)
960 trace_start_error_with_name ("open");
961 if (dup2 (2, 1) < 0)
962 trace_start_error_with_name ("dup2");
963 if (write (2, "stdin/stdout redirected\n",
964 sizeof ("stdin/stdout redirected\n") - 1) < 0)
965 {
966 /* Errors ignored. */;
967 }
968 }
969}
970
da6d8c04 971/* Start an inferior process and returns its pid.
2090129c
SDJ
972 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
973 are its arguments. */
da6d8c04 974
15295543
TBA
975int
976linux_process_target::create_inferior (const char *program,
977 const std::vector<char *> &program_args)
da6d8c04 978{
c12a5089 979 client_state &cs = get_client_state ();
a6dbe5df 980 struct lwp_info *new_lwp;
da6d8c04 981 int pid;
95954743 982 ptid_t ptid;
03583c20 983
41272101
TT
984 {
985 maybe_disable_address_space_randomization restore_personality
c12a5089 986 (cs.disable_randomization);
bea571eb 987 std::string str_program_args = construct_inferior_arguments (program_args);
41272101
TT
988
989 pid = fork_inferior (program,
990 str_program_args.c_str (),
991 get_environ ()->envp (), linux_ptrace_fun,
992 NULL, NULL, NULL, NULL);
993 }
03583c20 994
fd000fb3 995 add_linux_process (pid, 0);
95954743 996
fd79271b 997 ptid = ptid_t (pid, pid, 0);
95954743 998 new_lwp = add_lwp (ptid);
a6dbe5df 999 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1000
2090129c
SDJ
1001 post_fork_inferior (pid, program);
1002
a9fa9f7d 1003 return pid;
da6d8c04
DJ
1004}
1005
ece66d65
JS
1006/* Implement the post_create_inferior target_ops method. */
1007
6dee9afb
TBA
1008void
1009linux_process_target::post_create_inferior ()
ece66d65
JS
1010{
1011 struct lwp_info *lwp = get_thread_lwp (current_thread);
1012
797bcff5 1013 low_arch_setup ();
ece66d65
JS
1014
1015 if (lwp->must_set_ptrace_flags)
1016 {
1017 struct process_info *proc = current_process ();
1018 int options = linux_low_ptrace_options (proc->attached);
1019
1020 linux_enable_event_reporting (lwpid_of (current_thread), options);
1021 lwp->must_set_ptrace_flags = 0;
1022 }
1023}
1024
7ae1a6a6 1025int
fd000fb3 1026linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1027{
54a0b537 1028 struct lwp_info *new_lwp;
e38504b3 1029 int lwpid = ptid.lwp ();
611cb4a5 1030
b8e1b30e 1031 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1032 != 0)
7ae1a6a6 1033 return errno;
24a09b5f 1034
b3312d80 1035 new_lwp = add_lwp (ptid);
0d62e5e8 1036
a6dbe5df
PA
1037 /* We need to wait for SIGSTOP before being able to make the next
1038 ptrace call on this LWP. */
1039 new_lwp->must_set_ptrace_flags = 1;
1040
644cebc9 1041 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1042 {
1043 if (debug_threads)
87ce2a04 1044 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1045
1046 /* The process is definitely stopped. It is in a job control
1047 stop, unless the kernel predates the TASK_STOPPED /
1048 TASK_TRACED distinction, in which case it might be in a
1049 ptrace stop. Make sure it is in a ptrace stop; from there we
1050 can kill it, signal it, et cetera.
1051
1052 First make sure there is a pending SIGSTOP. Since we are
1053 already attached, the process can not transition from stopped
1054 to running without a PTRACE_CONT; so we know this signal will
1055 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1056 probably already in the queue (unless this kernel is old
1057 enough to use TASK_STOPPED for ptrace stops); but since
1058 SIGSTOP is not an RT signal, it can only be queued once. */
1059 kill_lwp (lwpid, SIGSTOP);
1060
1061 /* Finally, resume the stopped process. This will deliver the
1062 SIGSTOP (or a higher priority signal, just like normal
1063 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1064 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1065 }
1066
0d62e5e8 1067 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1068 brings it to a halt.
1069
1070 There are several cases to consider here:
1071
1072 1) gdbserver has already attached to the process and is being notified
1b3f6016 1073 of a new thread that is being created.
d50171e4
PA
1074 In this case we should ignore that SIGSTOP and resume the
1075 process. This is handled below by setting stop_expected = 1,
8336d594 1076 and the fact that add_thread sets last_resume_kind ==
d50171e4 1077 resume_continue.
0e21c1ec
DE
1078
1079 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1080 to it via attach_inferior.
1081 In this case we want the process thread to stop.
d50171e4
PA
1082 This is handled by having linux_attach set last_resume_kind ==
1083 resume_stop after we return.
e3deef73
LM
1084
1085 If the pid we are attaching to is also the tgid, we attach to and
1086 stop all the existing threads. Otherwise, we attach to pid and
1087 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1088
1089 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1090 existing threads.
1091 In this case we want the thread to stop.
1092 FIXME: This case is currently not properly handled.
1093 We should wait for the SIGSTOP but don't. Things work apparently
1094 because enough time passes between when we ptrace (ATTACH) and when
1095 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1096
1097 On the other hand, if we are currently trying to stop all threads, we
1098 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1099 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1100 end of the list, and so the new thread has not yet reached
1101 wait_for_sigstop (but will). */
d50171e4 1102 new_lwp->stop_expected = 1;
0d62e5e8 1103
7ae1a6a6 1104 return 0;
95954743
PA
1105}
1106
8784d563
PA
1107/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1108 already attached. Returns true if a new LWP is found, false
1109 otherwise. */
1110
1111static int
1112attach_proc_task_lwp_callback (ptid_t ptid)
1113{
1114 /* Is this a new thread? */
1115 if (find_thread_ptid (ptid) == NULL)
1116 {
e38504b3 1117 int lwpid = ptid.lwp ();
8784d563
PA
1118 int err;
1119
1120 if (debug_threads)
1121 debug_printf ("Found new lwp %d\n", lwpid);
1122
fd000fb3 1123 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1124
1125 /* Be quiet if we simply raced with the thread exiting. EPERM
1126 is returned if the thread's task still exists, and is marked
1127 as exited or zombie, as well as other conditions, so in that
1128 case, confirm the status in /proc/PID/status. */
1129 if (err == ESRCH
1130 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1131 {
1132 if (debug_threads)
1133 {
1134 debug_printf ("Cannot attach to lwp %d: "
1135 "thread is gone (%d: %s)\n",
6d91ce9a 1136 lwpid, err, safe_strerror (err));
8784d563
PA
1137 }
1138 }
1139 else if (err != 0)
1140 {
4d9b86e1 1141 std::string reason
50fa3001 1142 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1143
1144 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1145 }
1146
1147 return 1;
1148 }
1149 return 0;
1150}
1151
500c1d85
PA
1152static void async_file_mark (void);
1153
e3deef73
LM
1154/* Attach to PID. If PID is the tgid, attach to it and all
1155 of its threads. */
1156
ef03dad8
TBA
1157int
1158linux_process_target::attach (unsigned long pid)
0d62e5e8 1159{
500c1d85
PA
1160 struct process_info *proc;
1161 struct thread_info *initial_thread;
fd79271b 1162 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1163 int err;
1164
fd000fb3 1165 proc = add_linux_process (pid, 1);
df0da8a2 1166
e3deef73
LM
1167 /* Attach to PID. We will check for other threads
1168 soon. */
fd000fb3 1169 err = attach_lwp (ptid);
7ae1a6a6 1170 if (err != 0)
4d9b86e1 1171 {
df0da8a2 1172 remove_process (proc);
4d9b86e1 1173
50fa3001
SDJ
1174 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1175 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1176 }
7ae1a6a6 1177
500c1d85
PA
1178 /* Don't ignore the initial SIGSTOP if we just attached to this
1179 process. It will be collected by wait shortly. */
fd79271b 1180 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1181 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1182
8784d563
PA
1183 /* We must attach to every LWP. If /proc is mounted, use that to
1184 find them now. On the one hand, the inferior may be using raw
1185 clone instead of using pthreads. On the other hand, even if it
1186 is using pthreads, GDB may not be connected yet (thread_db needs
1187 to do symbol lookups, through qSymbol). Also, thread_db walks
1188 structures in the inferior's address space to find the list of
1189 threads/LWPs, and those structures may well be corrupted. Note
1190 that once thread_db is loaded, we'll still use it to list threads
1191 and associate pthread info with each LWP. */
1192 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1193
1194 /* GDB will shortly read the xml target description for this
1195 process, to figure out the process' architecture. But the target
1196 description is only filled in when the first process/thread in
1197 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1198 that now, otherwise, if GDB is fast enough, it could read the
1199 target description _before_ that initial stop. */
1200 if (non_stop)
1201 {
1202 struct lwp_info *lwp;
1203 int wstat, lwpid;
f2907e49 1204 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1205
d16f3f6c 1206 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1207 gdb_assert (lwpid > 0);
1208
f2907e49 1209 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1210
1211 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1212 {
1213 lwp->status_pending_p = 1;
1214 lwp->status_pending = wstat;
1215 }
1216
1217 initial_thread->last_resume_kind = resume_continue;
1218
1219 async_file_mark ();
1220
1221 gdb_assert (proc->tdesc != NULL);
1222 }
1223
95954743
PA
1224 return 0;
1225}
1226
95954743 1227static int
e4eb0dec 1228last_thread_of_process_p (int pid)
95954743 1229{
e4eb0dec 1230 bool seen_one = false;
95954743 1231
da4ae14a 1232 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1233 {
e4eb0dec
SM
1234 if (!seen_one)
1235 {
1236 /* This is the first thread of this process we see. */
1237 seen_one = true;
1238 return false;
1239 }
1240 else
1241 {
1242 /* This is the second thread of this process we see. */
1243 return true;
1244 }
1245 });
da6d8c04 1246
e4eb0dec 1247 return thread == NULL;
95954743
PA
1248}
1249
da84f473
PA
1250/* Kill LWP. */
1251
1252static void
1253linux_kill_one_lwp (struct lwp_info *lwp)
1254{
d86d4aaf
DE
1255 struct thread_info *thr = get_lwp_thread (lwp);
1256 int pid = lwpid_of (thr);
da84f473
PA
1257
1258 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1259 there is no signal context, and ptrace(PTRACE_KILL) (or
1260 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1261 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1262 alternative is to kill with SIGKILL. We only need one SIGKILL
1263 per process, not one for each thread. But since we still support
4a6ed09b
PA
1264 support debugging programs using raw clone without CLONE_THREAD,
1265 we send one for each thread. For years, we used PTRACE_KILL
1266 only, so we're being a bit paranoid about some old kernels where
1267 PTRACE_KILL might work better (dubious if there are any such, but
1268 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1269 second, and so we're fine everywhere. */
da84f473
PA
1270
1271 errno = 0;
69ff6be5 1272 kill_lwp (pid, SIGKILL);
da84f473 1273 if (debug_threads)
ce9e3fe7
PA
1274 {
1275 int save_errno = errno;
1276
1277 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1278 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1279 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1280 }
da84f473
PA
1281
1282 errno = 0;
b8e1b30e 1283 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1284 if (debug_threads)
ce9e3fe7
PA
1285 {
1286 int save_errno = errno;
1287
1288 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1289 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1290 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1291 }
da84f473
PA
1292}
1293
e76126e8
PA
1294/* Kill LWP and wait for it to die. */
1295
1296static void
1297kill_wait_lwp (struct lwp_info *lwp)
1298{
1299 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1300 int pid = ptid_of (thr).pid ();
e38504b3 1301 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1302 int wstat;
1303 int res;
1304
1305 if (debug_threads)
1306 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1307
1308 do
1309 {
1310 linux_kill_one_lwp (lwp);
1311
1312 /* Make sure it died. Notes:
1313
1314 - The loop is most likely unnecessary.
1315
d16f3f6c 1316 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1317 while we're iterating over them. We're not interested in
1318 any pending status at this point, only in making sure all
1319 wait status on the kernel side are collected until the
1320 process is reaped.
1321
1322 - We don't use __WALL here as the __WALL emulation relies on
1323 SIGCHLD, and killing a stopped process doesn't generate
1324 one, nor an exit status.
1325 */
1326 res = my_waitpid (lwpid, &wstat, 0);
1327 if (res == -1 && errno == ECHILD)
1328 res = my_waitpid (lwpid, &wstat, __WCLONE);
1329 } while (res > 0 && WIFSTOPPED (wstat));
1330
586b02a9
PA
1331 /* Even if it was stopped, the child may have already disappeared.
1332 E.g., if it was killed by SIGKILL. */
1333 if (res < 0 && errno != ECHILD)
1334 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1335}
1336
578290ec 1337/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1338 except the leader. */
95954743 1339
578290ec
SM
1340static void
1341kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1342{
54a0b537 1343 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1344
fd500816
DJ
1345 /* We avoid killing the first thread here, because of a Linux kernel (at
1346 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1347 the children get a chance to be reaped, it will remain a zombie
1348 forever. */
95954743 1349
d86d4aaf 1350 if (lwpid_of (thread) == pid)
95954743
PA
1351 {
1352 if (debug_threads)
87ce2a04 1353 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1354 target_pid_to_str (thread->id));
578290ec 1355 return;
95954743 1356 }
fd500816 1357
e76126e8 1358 kill_wait_lwp (lwp);
da6d8c04
DJ
1359}
1360
c6885a57
TBA
1361int
1362linux_process_target::kill (process_info *process)
0d62e5e8 1363{
a780ef4f 1364 int pid = process->pid;
9d606399 1365
f9e39928
PA
1366 /* If we're killing a running inferior, make sure it is stopped
1367 first, as PTRACE_KILL will not work otherwise. */
7984d532 1368 stop_all_lwps (0, NULL);
f9e39928 1369
578290ec
SM
1370 for_each_thread (pid, [&] (thread_info *thread)
1371 {
1372 kill_one_lwp_callback (thread, pid);
1373 });
fd500816 1374
54a0b537 1375 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1376 thread in the list, so do so now. */
a780ef4f 1377 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1378
784867a5 1379 if (lwp == NULL)
fd500816 1380 {
784867a5 1381 if (debug_threads)
d86d4aaf
DE
1382 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1383 pid);
784867a5
JK
1384 }
1385 else
e76126e8 1386 kill_wait_lwp (lwp);
2d717e4f 1387
8adb37b9 1388 mourn (process);
f9e39928
PA
1389
1390 /* Since we presently can only stop all lwps of all processes, we
1391 need to unstop lwps of other processes. */
7984d532 1392 unstop_all_lwps (0, NULL);
95954743 1393 return 0;
0d62e5e8
DJ
1394}
1395
9b224c5e
PA
1396/* Get pending signal of THREAD, for detaching purposes. This is the
1397 signal the thread last stopped for, which we need to deliver to the
1398 thread when detaching, otherwise, it'd be suppressed/lost. */
1399
1400static int
1401get_detach_signal (struct thread_info *thread)
1402{
c12a5089 1403 client_state &cs = get_client_state ();
a493e3e2 1404 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1405 int status;
1406 struct lwp_info *lp = get_thread_lwp (thread);
1407
1408 if (lp->status_pending_p)
1409 status = lp->status_pending;
1410 else
1411 {
1412 /* If the thread had been suspended by gdbserver, and it stopped
1413 cleanly, then it'll have stopped with SIGSTOP. But we don't
1414 want to deliver that SIGSTOP. */
1415 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1416 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1417 return 0;
1418
1419 /* Otherwise, we may need to deliver the signal we
1420 intercepted. */
1421 status = lp->last_status;
1422 }
1423
1424 if (!WIFSTOPPED (status))
1425 {
1426 if (debug_threads)
87ce2a04 1427 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1428 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1429 return 0;
1430 }
1431
1432 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1433 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1434 {
1435 if (debug_threads)
87ce2a04
DE
1436 debug_printf ("GPS: lwp %s had stopped with extended "
1437 "status: no pending signal\n",
d86d4aaf 1438 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1439 return 0;
1440 }
1441
2ea28649 1442 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1443
c12a5089 1444 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1445 {
1446 if (debug_threads)
87ce2a04 1447 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1448 target_pid_to_str (ptid_of (thread)),
87ce2a04 1449 gdb_signal_to_string (signo));
9b224c5e
PA
1450 return 0;
1451 }
c12a5089 1452 else if (!cs.program_signals_p
9b224c5e
PA
1453 /* If we have no way to know which signals GDB does not
1454 want to have passed to the program, assume
1455 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1456 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1457 {
1458 if (debug_threads)
87ce2a04
DE
1459 debug_printf ("GPS: lwp %s had signal %s, "
1460 "but we don't know if we should pass it. "
1461 "Default to not.\n",
d86d4aaf 1462 target_pid_to_str (ptid_of (thread)),
87ce2a04 1463 gdb_signal_to_string (signo));
9b224c5e
PA
1464 return 0;
1465 }
1466 else
1467 {
1468 if (debug_threads)
87ce2a04 1469 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1470 target_pid_to_str (ptid_of (thread)),
87ce2a04 1471 gdb_signal_to_string (signo));
9b224c5e
PA
1472
1473 return WSTOPSIG (status);
1474 }
1475}
1476
fd000fb3
TBA
1477void
1478linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1479{
ced2dffb 1480 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1481 int sig;
ced2dffb 1482 int lwpid;
6ad8ae5c 1483
9b224c5e 1484 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1485 if (lwp->stop_expected)
ae13219e 1486 {
9b224c5e 1487 if (debug_threads)
87ce2a04 1488 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1489 target_pid_to_str (ptid_of (thread)));
9b224c5e 1490
d86d4aaf 1491 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1492 lwp->stop_expected = 0;
ae13219e
DJ
1493 }
1494
9b224c5e
PA
1495 /* Pass on any pending signal for this thread. */
1496 sig = get_detach_signal (thread);
1497
ced2dffb
PA
1498 /* Preparing to resume may try to write registers, and fail if the
1499 lwp is zombie. If that happens, ignore the error. We'll handle
1500 it below, when detach fails with ESRCH. */
a70b8144 1501 try
ced2dffb
PA
1502 {
1503 /* Flush any pending changes to the process's registers. */
1504 regcache_invalidate_thread (thread);
1505
1506 /* Finally, let it resume. */
d7599cc0 1507 low_prepare_to_resume (lwp);
ced2dffb 1508 }
230d2906 1509 catch (const gdb_exception_error &ex)
ced2dffb
PA
1510 {
1511 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1512 throw;
ced2dffb 1513 }
ced2dffb
PA
1514
1515 lwpid = lwpid_of (thread);
1516 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1517 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1518 {
1519 int save_errno = errno;
1520
1521 /* We know the thread exists, so ESRCH must mean the lwp is
1522 zombie. This can happen if one of the already-detached
1523 threads exits the whole thread group. In that case we're
1524 still attached, and must reap the lwp. */
1525 if (save_errno == ESRCH)
1526 {
1527 int ret, status;
1528
1529 ret = my_waitpid (lwpid, &status, __WALL);
1530 if (ret == -1)
1531 {
1532 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1533 lwpid, safe_strerror (errno));
ced2dffb
PA
1534 }
1535 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1536 {
1537 warning (_("Reaping LWP %d while detaching "
1538 "returned unexpected status 0x%x"),
1539 lwpid, status);
1540 }
1541 }
1542 else
1543 {
1544 error (_("Can't detach %s: %s"),
1545 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1546 safe_strerror (save_errno));
ced2dffb
PA
1547 }
1548 }
1549 else if (debug_threads)
1550 {
1551 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1552 target_pid_to_str (ptid_of (thread)),
1553 strsignal (sig));
1554 }
bd99dc85
PA
1555
1556 delete_lwp (lwp);
ced2dffb
PA
1557}
1558
9061c9cf
TBA
1559int
1560linux_process_target::detach (process_info *process)
95954743 1561{
ced2dffb 1562 struct lwp_info *main_lwp;
95954743 1563
863d01bd
PA
1564 /* As there's a step over already in progress, let it finish first,
1565 otherwise nesting a stabilize_threads operation on top gets real
1566 messy. */
1567 complete_ongoing_step_over ();
1568
f9e39928 1569 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1570 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1571 may need to uninstall thread event breakpoints from memory, which
1572 only works with a stopped process anyway. */
7984d532 1573 stop_all_lwps (0, NULL);
f9e39928 1574
ca5c370d 1575#ifdef USE_THREAD_DB
8336d594 1576 thread_db_detach (process);
ca5c370d
PA
1577#endif
1578
fa593d66 1579 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1580 target_stabilize_threads ();
fa593d66 1581
ced2dffb
PA
1582 /* Detach from the clone lwps first. If the thread group exits just
1583 while we're detaching, we must reap the clone lwps before we're
1584 able to reap the leader. */
fd000fb3
TBA
1585 for_each_thread (process->pid, [this] (thread_info *thread)
1586 {
1587 /* We don't actually detach from the thread group leader just yet.
1588 If the thread group exits, we must reap the zombie clone lwps
1589 before we're able to reap the leader. */
1590 if (thread->id.pid () == thread->id.lwp ())
1591 return;
1592
1593 lwp_info *lwp = get_thread_lwp (thread);
1594 detach_one_lwp (lwp);
1595 });
ced2dffb 1596
ef2ddb33 1597 main_lwp = find_lwp_pid (ptid_t (process->pid));
fd000fb3 1598 detach_one_lwp (main_lwp);
8336d594 1599
8adb37b9 1600 mourn (process);
f9e39928
PA
1601
1602 /* Since we presently can only stop all lwps of all processes, we
1603 need to unstop lwps of other processes. */
7984d532 1604 unstop_all_lwps (0, NULL);
f9e39928
PA
1605 return 0;
1606}
1607
1608/* Remove all LWPs that belong to process PROC from the lwp list. */
1609
8adb37b9
TBA
1610void
1611linux_process_target::mourn (process_info *process)
8336d594
PA
1612{
1613 struct process_info_private *priv;
1614
1615#ifdef USE_THREAD_DB
1616 thread_db_mourn (process);
1617#endif
1618
fd000fb3 1619 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1620 {
1621 delete_lwp (get_thread_lwp (thread));
1622 });
f9e39928 1623
8336d594 1624 /* Freeing all private data. */
fe978cb0 1625 priv = process->priv;
fd000fb3 1626 low_delete_process (priv->arch_private);
8336d594 1627 free (priv);
fe978cb0 1628 process->priv = NULL;
505106cd
PA
1629
1630 remove_process (process);
8336d594
PA
1631}
1632
95a49a39
TBA
1633void
1634linux_process_target::join (int pid)
444d6139 1635{
444d6139
PA
1636 int status, ret;
1637
1638 do {
d105de22 1639 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1640 if (WIFEXITED (status) || WIFSIGNALED (status))
1641 break;
1642 } while (ret != -1 || errno != ECHILD);
1643}
1644
13d3d99b
TBA
1645/* Return true if the given thread is still alive. */
1646
1647bool
1648linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1649{
95954743
PA
1650 struct lwp_info *lwp = find_lwp_pid (ptid);
1651
1652 /* We assume we always know if a thread exits. If a whole process
1653 exited but we still haven't been able to report it to GDB, we'll
1654 hold on to the last lwp of the dead process. */
1655 if (lwp != NULL)
00db26fa 1656 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1657 else
1658 return 0;
1659}
1660
df95181f
TBA
1661bool
1662linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1663{
1664 struct lwp_info *lp = get_thread_lwp (thread);
1665
1666 if (!lp->status_pending_p)
1667 return 0;
1668
582511be 1669 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1670 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1671 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1672 {
1673 struct thread_info *saved_thread;
1674 CORE_ADDR pc;
1675 int discard = 0;
1676
1677 gdb_assert (lp->last_status != 0);
1678
1679 pc = get_pc (lp);
1680
1681 saved_thread = current_thread;
1682 current_thread = thread;
1683
1684 if (pc != lp->stop_pc)
1685 {
1686 if (debug_threads)
1687 debug_printf ("PC of %ld changed\n",
1688 lwpid_of (thread));
1689 discard = 1;
1690 }
3e572f71
PA
1691
1692#if !USE_SIGTRAP_SIGINFO
15c66dd6 1693 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1694 && !low_breakpoint_at (pc))
582511be
PA
1695 {
1696 if (debug_threads)
1697 debug_printf ("previous SW breakpoint of %ld gone\n",
1698 lwpid_of (thread));
1699 discard = 1;
1700 }
15c66dd6 1701 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1702 && !hardware_breakpoint_inserted_here (pc))
1703 {
1704 if (debug_threads)
1705 debug_printf ("previous HW breakpoint of %ld gone\n",
1706 lwpid_of (thread));
1707 discard = 1;
1708 }
3e572f71 1709#endif
582511be
PA
1710
1711 current_thread = saved_thread;
1712
1713 if (discard)
1714 {
1715 if (debug_threads)
1716 debug_printf ("discarding pending breakpoint status\n");
1717 lp->status_pending_p = 0;
1718 return 0;
1719 }
1720 }
1721
1722 return 1;
1723}
1724
a681f9c9
PA
1725/* Returns true if LWP is resumed from the client's perspective. */
1726
1727static int
1728lwp_resumed (struct lwp_info *lwp)
1729{
1730 struct thread_info *thread = get_lwp_thread (lwp);
1731
1732 if (thread->last_resume_kind != resume_stop)
1733 return 1;
1734
1735 /* Did gdb send us a `vCont;t', but we haven't reported the
1736 corresponding stop to gdb yet? If so, the thread is still
1737 resumed/running from gdb's perspective. */
1738 if (thread->last_resume_kind == resume_stop
1739 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1740 return 1;
1741
1742 return 0;
1743}
1744
df95181f
TBA
1745bool
1746linux_process_target::status_pending_p_callback (thread_info *thread,
1747 ptid_t ptid)
0d62e5e8 1748{
582511be 1749 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1750
1751 /* Check if we're only interested in events from a specific process
afa8d396 1752 or a specific LWP. */
83e1b6c1 1753 if (!thread->id.matches (ptid))
95954743 1754 return 0;
0d62e5e8 1755
a681f9c9
PA
1756 if (!lwp_resumed (lp))
1757 return 0;
1758
582511be 1759 if (lp->status_pending_p
df95181f 1760 && !thread_still_has_status_pending (thread))
582511be 1761 {
df95181f 1762 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1763 return 0;
1764 }
0d62e5e8 1765
582511be 1766 return lp->status_pending_p;
0d62e5e8
DJ
1767}
1768
95954743
PA
1769struct lwp_info *
1770find_lwp_pid (ptid_t ptid)
1771{
da4ae14a 1772 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1773 {
1774 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1775 return thr_arg->id.lwp () == lwp;
454296a2 1776 });
d86d4aaf
DE
1777
1778 if (thread == NULL)
1779 return NULL;
1780
9c80ecd6 1781 return get_thread_lwp (thread);
95954743
PA
1782}
1783
fa96cb38 1784/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1785
fa96cb38
PA
1786static int
1787num_lwps (int pid)
1788{
fa96cb38 1789 int count = 0;
0d62e5e8 1790
4d3bb80e
SM
1791 for_each_thread (pid, [&] (thread_info *thread)
1792 {
9c80ecd6 1793 count++;
4d3bb80e 1794 });
3aee8918 1795
fa96cb38
PA
1796 return count;
1797}
d61ddec4 1798
6d4ee8c6
GB
1799/* See nat/linux-nat.h. */
1800
1801struct lwp_info *
1802iterate_over_lwps (ptid_t filter,
d3a70e03 1803 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1804{
da4ae14a 1805 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1806 {
da4ae14a 1807 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1808
d3a70e03 1809 return callback (lwp);
6d1e5673 1810 });
6d4ee8c6 1811
9c80ecd6 1812 if (thread == NULL)
6d4ee8c6
GB
1813 return NULL;
1814
9c80ecd6 1815 return get_thread_lwp (thread);
6d4ee8c6
GB
1816}
1817
fd000fb3
TBA
1818void
1819linux_process_target::check_zombie_leaders ()
fa96cb38 1820{
fd000fb3 1821 for_each_process ([this] (process_info *proc) {
9179355e
SM
1822 pid_t leader_pid = pid_of (proc);
1823 struct lwp_info *leader_lp;
1824
f2907e49 1825 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1826
1827 if (debug_threads)
1828 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1829 "num_lwps=%d, zombie=%d\n",
1830 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1831 linux_proc_pid_is_zombie (leader_pid));
1832
1833 if (leader_lp != NULL && !leader_lp->stopped
1834 /* Check if there are other threads in the group, as we may
1835 have raced with the inferior simply exiting. */
1836 && !last_thread_of_process_p (leader_pid)
1837 && linux_proc_pid_is_zombie (leader_pid))
1838 {
1839 /* A leader zombie can mean one of two things:
1840
1841 - It exited, and there's an exit status pending
1842 available, or only the leader exited (not the whole
1843 program). In the latter case, we can't waitpid the
1844 leader's exit status until all other threads are gone.
1845
1846 - There are 3 or more threads in the group, and a thread
1847 other than the leader exec'd. On an exec, the Linux
1848 kernel destroys all other threads (except the execing
1849 one) in the thread group, and resets the execing thread's
1850 tid to the tgid. No exit notification is sent for the
1851 execing thread -- from the ptracer's perspective, it
1852 appears as though the execing thread just vanishes.
1853 Until we reap all other threads except the leader and the
1854 execing thread, the leader will be zombie, and the
1855 execing thread will be in `D (disc sleep)'. As soon as
1856 all other threads are reaped, the execing thread changes
1857 it's tid to the tgid, and the previous (zombie) leader
1858 vanishes, giving place to the "new" leader. We could try
1859 distinguishing the exit and exec cases, by waiting once
1860 more, and seeing if something comes out, but it doesn't
1861 sound useful. The previous leader _does_ go away, and
1862 we'll re-add the new one once we see the exec event
1863 (which is just the same as what would happen if the
1864 previous leader did exit voluntarily before some other
1865 thread execs). */
1866
1867 if (debug_threads)
1868 debug_printf ("CZL: Thread group leader %d zombie "
1869 "(it exited, or another thread execd).\n",
1870 leader_pid);
1871
1872 delete_lwp (leader_lp);
1873 }
1874 });
fa96cb38 1875}
c3adc08c 1876
a1385b7b
SM
1877/* Callback for `find_thread'. Returns the first LWP that is not
1878 stopped. */
d50171e4 1879
a1385b7b
SM
1880static bool
1881not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1882{
a1385b7b
SM
1883 if (!thread->id.matches (filter))
1884 return false;
47c0c975 1885
a1385b7b 1886 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1887
a1385b7b 1888 return !lwp->stopped;
0d62e5e8 1889}
611cb4a5 1890
863d01bd
PA
1891/* Increment LWP's suspend count. */
1892
1893static void
1894lwp_suspended_inc (struct lwp_info *lwp)
1895{
1896 lwp->suspended++;
1897
1898 if (debug_threads && lwp->suspended > 4)
1899 {
1900 struct thread_info *thread = get_lwp_thread (lwp);
1901
1902 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1903 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1904 }
1905}
1906
1907/* Decrement LWP's suspend count. */
1908
1909static void
1910lwp_suspended_decr (struct lwp_info *lwp)
1911{
1912 lwp->suspended--;
1913
1914 if (lwp->suspended < 0)
1915 {
1916 struct thread_info *thread = get_lwp_thread (lwp);
1917
1918 internal_error (__FILE__, __LINE__,
1919 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1920 lwp->suspended);
1921 }
1922}
1923
219f2f23
PA
1924/* This function should only be called if the LWP got a SIGTRAP.
1925
1926 Handle any tracepoint steps or hits. Return true if a tracepoint
1927 event was handled, 0 otherwise. */
1928
1929static int
1930handle_tracepoints (struct lwp_info *lwp)
1931{
1932 struct thread_info *tinfo = get_lwp_thread (lwp);
1933 int tpoint_related_event = 0;
1934
582511be
PA
1935 gdb_assert (lwp->suspended == 0);
1936
7984d532
PA
1937 /* If this tracepoint hit causes a tracing stop, we'll immediately
1938 uninsert tracepoints. To do this, we temporarily pause all
1939 threads, unpatch away, and then unpause threads. We need to make
1940 sure the unpausing doesn't resume LWP too. */
863d01bd 1941 lwp_suspended_inc (lwp);
7984d532 1942
219f2f23
PA
1943 /* And we need to be sure that any all-threads-stopping doesn't try
1944 to move threads out of the jump pads, as it could deadlock the
1945 inferior (LWP could be in the jump pad, maybe even holding the
1946 lock.) */
1947
1948 /* Do any necessary step collect actions. */
1949 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1950
fa593d66
PA
1951 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1952
219f2f23
PA
1953 /* See if we just hit a tracepoint and do its main collect
1954 actions. */
1955 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1956
863d01bd 1957 lwp_suspended_decr (lwp);
7984d532
PA
1958
1959 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1960 gdb_assert (!stabilizing_threads
1961 || (lwp->collecting_fast_tracepoint
1962 != fast_tpoint_collect_result::not_collecting));
7984d532 1963
219f2f23
PA
1964 if (tpoint_related_event)
1965 {
1966 if (debug_threads)
87ce2a04 1967 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1968 return 1;
1969 }
1970
1971 return 0;
1972}
1973
13e567af
TBA
1974fast_tpoint_collect_result
1975linux_process_target::linux_fast_tracepoint_collecting
1976 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1977{
1978 CORE_ADDR thread_area;
d86d4aaf 1979 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1980
fa593d66
PA
1981 /* Get the thread area address. This is used to recognize which
1982 thread is which when tracing with the in-process agent library.
1983 We don't read anything from the address, and treat it as opaque;
1984 it's the address itself that we assume is unique per-thread. */
13e567af 1985 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 1986 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
1987
1988 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1989}
1990
13e567af
TBA
1991int
1992linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1993{
1994 return -1;
1995}
1996
d16f3f6c
TBA
1997bool
1998linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 1999{
0bfdf32f 2000 struct thread_info *saved_thread;
fa593d66 2001
0bfdf32f
GB
2002 saved_thread = current_thread;
2003 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2004
2005 if ((wstat == NULL
2006 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2007 && supports_fast_tracepoints ()
58b4daa5 2008 && agent_loaded_p ())
fa593d66
PA
2009 {
2010 struct fast_tpoint_collect_status status;
fa593d66
PA
2011
2012 if (debug_threads)
87ce2a04
DE
2013 debug_printf ("Checking whether LWP %ld needs to move out of the "
2014 "jump pad.\n",
0bfdf32f 2015 lwpid_of (current_thread));
fa593d66 2016
229d26fc
SM
2017 fast_tpoint_collect_result r
2018 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2019
2020 if (wstat == NULL
2021 || (WSTOPSIG (*wstat) != SIGILL
2022 && WSTOPSIG (*wstat) != SIGFPE
2023 && WSTOPSIG (*wstat) != SIGSEGV
2024 && WSTOPSIG (*wstat) != SIGBUS))
2025 {
2026 lwp->collecting_fast_tracepoint = r;
2027
229d26fc 2028 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2029 {
229d26fc
SM
2030 if (r == fast_tpoint_collect_result::before_insn
2031 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2032 {
2033 /* Haven't executed the original instruction yet.
2034 Set breakpoint there, and wait till it's hit,
2035 then single-step until exiting the jump pad. */
2036 lwp->exit_jump_pad_bkpt
2037 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2038 }
2039
2040 if (debug_threads)
87ce2a04
DE
2041 debug_printf ("Checking whether LWP %ld needs to move out of "
2042 "the jump pad...it does\n",
0bfdf32f
GB
2043 lwpid_of (current_thread));
2044 current_thread = saved_thread;
fa593d66 2045
d16f3f6c 2046 return true;
fa593d66
PA
2047 }
2048 }
2049 else
2050 {
2051 /* If we get a synchronous signal while collecting, *and*
2052 while executing the (relocated) original instruction,
2053 reset the PC to point at the tpoint address, before
2054 reporting to GDB. Otherwise, it's an IPA lib bug: just
2055 report the signal to GDB, and pray for the best. */
2056
229d26fc
SM
2057 lwp->collecting_fast_tracepoint
2058 = fast_tpoint_collect_result::not_collecting;
fa593d66 2059
229d26fc 2060 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2061 && (status.adjusted_insn_addr <= lwp->stop_pc
2062 && lwp->stop_pc < status.adjusted_insn_addr_end))
2063 {
2064 siginfo_t info;
2065 struct regcache *regcache;
2066
2067 /* The si_addr on a few signals references the address
2068 of the faulting instruction. Adjust that as
2069 well. */
2070 if ((WSTOPSIG (*wstat) == SIGILL
2071 || WSTOPSIG (*wstat) == SIGFPE
2072 || WSTOPSIG (*wstat) == SIGBUS
2073 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2074 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2075 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2076 /* Final check just to make sure we don't clobber
2077 the siginfo of non-kernel-sent signals. */
2078 && (uintptr_t) info.si_addr == lwp->stop_pc)
2079 {
2080 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2081 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2082 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2083 }
2084
0bfdf32f 2085 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2086 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2087 lwp->stop_pc = status.tpoint_addr;
2088
2089 /* Cancel any fast tracepoint lock this thread was
2090 holding. */
2091 force_unlock_trace_buffer ();
2092 }
2093
2094 if (lwp->exit_jump_pad_bkpt != NULL)
2095 {
2096 if (debug_threads)
87ce2a04
DE
2097 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2098 "stopping all threads momentarily.\n");
fa593d66
PA
2099
2100 stop_all_lwps (1, lwp);
fa593d66
PA
2101
2102 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2103 lwp->exit_jump_pad_bkpt = NULL;
2104
2105 unstop_all_lwps (1, lwp);
2106
2107 gdb_assert (lwp->suspended >= 0);
2108 }
2109 }
2110 }
2111
2112 if (debug_threads)
87ce2a04
DE
2113 debug_printf ("Checking whether LWP %ld needs to move out of the "
2114 "jump pad...no\n",
0bfdf32f 2115 lwpid_of (current_thread));
0cccb683 2116
0bfdf32f 2117 current_thread = saved_thread;
d16f3f6c 2118 return false;
fa593d66
PA
2119}
2120
2121/* Enqueue one signal in the "signals to report later when out of the
2122 jump pad" list. */
2123
2124static void
2125enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2126{
2127 struct pending_signals *p_sig;
d86d4aaf 2128 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2129
2130 if (debug_threads)
87ce2a04 2131 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2132 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2133
2134 if (debug_threads)
2135 {
2136 struct pending_signals *sig;
2137
2138 for (sig = lwp->pending_signals_to_report;
2139 sig != NULL;
2140 sig = sig->prev)
87ce2a04
DE
2141 debug_printf (" Already queued %d\n",
2142 sig->signal);
fa593d66 2143
87ce2a04 2144 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2145 }
2146
1a981360
PA
2147 /* Don't enqueue non-RT signals if they are already in the deferred
2148 queue. (SIGSTOP being the easiest signal to see ending up here
2149 twice) */
2150 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2151 {
2152 struct pending_signals *sig;
2153
2154 for (sig = lwp->pending_signals_to_report;
2155 sig != NULL;
2156 sig = sig->prev)
2157 {
2158 if (sig->signal == WSTOPSIG (*wstat))
2159 {
2160 if (debug_threads)
87ce2a04
DE
2161 debug_printf ("Not requeuing already queued non-RT signal %d"
2162 " for LWP %ld\n",
2163 sig->signal,
d86d4aaf 2164 lwpid_of (thread));
1a981360
PA
2165 return;
2166 }
2167 }
2168 }
2169
8d749320 2170 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2171 p_sig->prev = lwp->pending_signals_to_report;
2172 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2173
d86d4aaf 2174 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2175 &p_sig->info);
fa593d66
PA
2176
2177 lwp->pending_signals_to_report = p_sig;
2178}
2179
2180/* Dequeue one signal from the "signals to report later when out of
2181 the jump pad" list. */
2182
2183static int
2184dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2185{
d86d4aaf
DE
2186 struct thread_info *thread = get_lwp_thread (lwp);
2187
fa593d66
PA
2188 if (lwp->pending_signals_to_report != NULL)
2189 {
2190 struct pending_signals **p_sig;
2191
2192 p_sig = &lwp->pending_signals_to_report;
2193 while ((*p_sig)->prev != NULL)
2194 p_sig = &(*p_sig)->prev;
2195
2196 *wstat = W_STOPCODE ((*p_sig)->signal);
2197 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2198 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2199 &(*p_sig)->info);
fa593d66
PA
2200 free (*p_sig);
2201 *p_sig = NULL;
2202
2203 if (debug_threads)
87ce2a04 2204 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2205 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2206
2207 if (debug_threads)
2208 {
2209 struct pending_signals *sig;
2210
2211 for (sig = lwp->pending_signals_to_report;
2212 sig != NULL;
2213 sig = sig->prev)
87ce2a04
DE
2214 debug_printf (" Still queued %d\n",
2215 sig->signal);
fa593d66 2216
87ce2a04 2217 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2218 }
2219
2220 return 1;
2221 }
2222
2223 return 0;
2224}
2225
ac1bbaca
TBA
2226bool
2227linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2228{
ac1bbaca
TBA
2229 struct thread_info *saved_thread = current_thread;
2230 current_thread = get_lwp_thread (child);
d50171e4 2231
ac1bbaca
TBA
2232 if (low_stopped_by_watchpoint ())
2233 {
2234 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2235 child->stopped_data_address = low_stopped_data_address ();
2236 }
582511be 2237
ac1bbaca 2238 current_thread = saved_thread;
582511be 2239
ac1bbaca
TBA
2240 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2241}
d50171e4 2242
ac1bbaca
TBA
2243bool
2244linux_process_target::low_stopped_by_watchpoint ()
2245{
2246 return false;
2247}
d50171e4 2248
ac1bbaca
TBA
2249CORE_ADDR
2250linux_process_target::low_stopped_data_address ()
2251{
2252 return 0;
c4d9ceb6
YQ
2253}
2254
de0d863e
DB
2255/* Return the ptrace options that we want to try to enable. */
2256
2257static int
2258linux_low_ptrace_options (int attached)
2259{
c12a5089 2260 client_state &cs = get_client_state ();
de0d863e
DB
2261 int options = 0;
2262
2263 if (!attached)
2264 options |= PTRACE_O_EXITKILL;
2265
c12a5089 2266 if (cs.report_fork_events)
de0d863e
DB
2267 options |= PTRACE_O_TRACEFORK;
2268
c12a5089 2269 if (cs.report_vfork_events)
c269dbdb
DB
2270 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2271
c12a5089 2272 if (cs.report_exec_events)
94585166
DB
2273 options |= PTRACE_O_TRACEEXEC;
2274
82075af2
JS
2275 options |= PTRACE_O_TRACESYSGOOD;
2276
de0d863e
DB
2277 return options;
2278}
2279
d16f3f6c
TBA
2280lwp_info *
2281linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2282{
c12a5089 2283 client_state &cs = get_client_state ();
fa96cb38
PA
2284 struct lwp_info *child;
2285 struct thread_info *thread;
582511be 2286 int have_stop_pc = 0;
fa96cb38 2287
f2907e49 2288 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2289
94585166
DB
2290 /* Check for stop events reported by a process we didn't already
2291 know about - anything not already in our LWP list.
2292
2293 If we're expecting to receive stopped processes after
2294 fork, vfork, and clone events, then we'll just add the
2295 new one to our list and go back to waiting for the event
2296 to be reported - the stopped process might be returned
2297 from waitpid before or after the event is.
2298
2299 But note the case of a non-leader thread exec'ing after the
2300 leader having exited, and gone from our lists (because
2301 check_zombie_leaders deleted it). The non-leader thread
2302 changes its tid to the tgid. */
2303
2304 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2305 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2306 {
2307 ptid_t child_ptid;
2308
2309 /* A multi-thread exec after we had seen the leader exiting. */
2310 if (debug_threads)
2311 {
2312 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2313 "after exec.\n", lwpid);
2314 }
2315
fd79271b 2316 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2317 child = add_lwp (child_ptid);
2318 child->stopped = 1;
2319 current_thread = child->thread;
2320 }
2321
fa96cb38
PA
2322 /* If we didn't find a process, one of two things presumably happened:
2323 - A process we started and then detached from has exited. Ignore it.
2324 - A process we are controlling has forked and the new child's stop
2325 was reported to us by the kernel. Save its PID. */
2326 if (child == NULL && WIFSTOPPED (wstat))
2327 {
2328 add_to_pid_list (&stopped_pids, lwpid, wstat);
2329 return NULL;
2330 }
2331 else if (child == NULL)
2332 return NULL;
2333
2334 thread = get_lwp_thread (child);
2335
2336 child->stopped = 1;
2337
2338 child->last_status = wstat;
2339
582511be
PA
2340 /* Check if the thread has exited. */
2341 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2342 {
2343 if (debug_threads)
2344 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2345
2346 if (finish_step_over (child))
2347 {
2348 /* Unsuspend all other LWPs, and set them back running again. */
2349 unsuspend_all_lwps (child);
2350 }
2351
65706a29
PA
2352 /* If there is at least one more LWP, then the exit signal was
2353 not the end of the debugged application and should be
2354 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2355 if (cs.report_thread_events
65706a29 2356 || last_thread_of_process_p (pid_of (thread)))
582511be 2357 {
65706a29
PA
2358 /* Since events are serialized to GDB core, and we can't
2359 report this one right now. Leave the status pending for
2360 the next time we're able to report it. */
2361 mark_lwp_dead (child, wstat);
2362 return child;
582511be
PA
2363 }
2364 else
2365 {
65706a29
PA
2366 delete_lwp (child);
2367 return NULL;
582511be
PA
2368 }
2369 }
2370
2371 gdb_assert (WIFSTOPPED (wstat));
2372
fa96cb38
PA
2373 if (WIFSTOPPED (wstat))
2374 {
2375 struct process_info *proc;
2376
c06cbd92 2377 /* Architecture-specific setup after inferior is running. */
fa96cb38 2378 proc = find_process_pid (pid_of (thread));
c06cbd92 2379 if (proc->tdesc == NULL)
fa96cb38 2380 {
c06cbd92
YQ
2381 if (proc->attached)
2382 {
c06cbd92
YQ
2383 /* This needs to happen after we have attached to the
2384 inferior and it is stopped for the first time, but
2385 before we access any inferior registers. */
797bcff5 2386 arch_setup_thread (thread);
c06cbd92
YQ
2387 }
2388 else
2389 {
2390 /* The process is started, but GDBserver will do
2391 architecture-specific setup after the program stops at
2392 the first instruction. */
2393 child->status_pending_p = 1;
2394 child->status_pending = wstat;
2395 return child;
2396 }
fa96cb38
PA
2397 }
2398 }
2399
fa96cb38
PA
2400 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2401 {
beed38b8 2402 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2403 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2404
de0d863e 2405 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2406 child->must_set_ptrace_flags = 0;
2407 }
2408
82075af2
JS
2409 /* Always update syscall_state, even if it will be filtered later. */
2410 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2411 {
2412 child->syscall_state
2413 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2414 ? TARGET_WAITKIND_SYSCALL_RETURN
2415 : TARGET_WAITKIND_SYSCALL_ENTRY);
2416 }
2417 else
2418 {
2419 /* Almost all other ptrace-stops are known to be outside of system
2420 calls, with further exceptions in handle_extended_wait. */
2421 child->syscall_state = TARGET_WAITKIND_IGNORE;
2422 }
2423
e7ad2f14
PA
2424 /* Be careful to not overwrite stop_pc until save_stop_reason is
2425 called. */
fa96cb38 2426 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2427 && linux_is_extended_waitstatus (wstat))
fa96cb38 2428 {
582511be 2429 child->stop_pc = get_pc (child);
94585166 2430 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2431 {
2432 /* The event has been handled, so just return without
2433 reporting it. */
2434 return NULL;
2435 }
fa96cb38
PA
2436 }
2437
80aea927 2438 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2439 {
e7ad2f14 2440 if (save_stop_reason (child))
582511be
PA
2441 have_stop_pc = 1;
2442 }
2443
2444 if (!have_stop_pc)
2445 child->stop_pc = get_pc (child);
2446
fa96cb38
PA
2447 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2448 && child->stop_expected)
2449 {
2450 if (debug_threads)
2451 debug_printf ("Expected stop.\n");
2452 child->stop_expected = 0;
2453
2454 if (thread->last_resume_kind == resume_stop)
2455 {
2456 /* We want to report the stop to the core. Treat the
2457 SIGSTOP as a normal event. */
2bf6fb9d
PA
2458 if (debug_threads)
2459 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2460 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2461 }
2462 else if (stopping_threads != NOT_STOPPING_THREADS)
2463 {
2464 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2465 pending. */
2bf6fb9d
PA
2466 if (debug_threads)
2467 debug_printf ("LLW: SIGSTOP caught for %s "
2468 "while stopping threads.\n",
2469 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2470 return NULL;
2471 }
2472 else
2473 {
2bf6fb9d
PA
2474 /* This is a delayed SIGSTOP. Filter out the event. */
2475 if (debug_threads)
2476 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2477 child->stepping ? "step" : "continue",
2478 target_pid_to_str (ptid_of (thread)));
2479
df95181f 2480 resume_one_lwp (child, child->stepping, 0, NULL);
fa96cb38
PA
2481 return NULL;
2482 }
2483 }
2484
582511be
PA
2485 child->status_pending_p = 1;
2486 child->status_pending = wstat;
fa96cb38
PA
2487 return child;
2488}
2489
b31cdfa6
TBA
2490bool
2491linux_process_target::maybe_hw_step (thread_info *thread)
f79b145d 2492{
b31cdfa6
TBA
2493 if (supports_hardware_single_step ())
2494 return true;
f79b145d
YQ
2495 else
2496 {
3b9a79ef 2497 /* GDBserver must insert single-step breakpoint for software
f79b145d 2498 single step. */
3b9a79ef 2499 gdb_assert (has_single_step_breakpoints (thread));
b31cdfa6 2500 return false;
f79b145d
YQ
2501 }
2502}
2503
df95181f
TBA
2504void
2505linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2506{
20ba1ce6
PA
2507 struct lwp_info *lp = get_thread_lwp (thread);
2508
2509 if (lp->stopped
863d01bd 2510 && !lp->suspended
20ba1ce6 2511 && !lp->status_pending_p
20ba1ce6
PA
2512 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2513 {
8901d193
YQ
2514 int step = 0;
2515
2516 if (thread->last_resume_kind == resume_step)
2517 step = maybe_hw_step (thread);
20ba1ce6
PA
2518
2519 if (debug_threads)
2520 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2521 target_pid_to_str (ptid_of (thread)),
2522 paddress (lp->stop_pc),
2523 step);
2524
df95181f 2525 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2526 }
2527}
2528
d16f3f6c
TBA
2529int
2530linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2531 ptid_t filter_ptid,
2532 int *wstatp, int options)
0d62e5e8 2533{
d86d4aaf 2534 struct thread_info *event_thread;
d50171e4 2535 struct lwp_info *event_child, *requested_child;
fa96cb38 2536 sigset_t block_mask, prev_mask;
d50171e4 2537
fa96cb38 2538 retry:
d86d4aaf
DE
2539 /* N.B. event_thread points to the thread_info struct that contains
2540 event_child. Keep them in sync. */
2541 event_thread = NULL;
d50171e4
PA
2542 event_child = NULL;
2543 requested_child = NULL;
0d62e5e8 2544
95954743 2545 /* Check for a lwp with a pending status. */
bd99dc85 2546
d7e15655 2547 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2548 {
83e1b6c1
SM
2549 event_thread = find_thread_in_random ([&] (thread_info *thread)
2550 {
2551 return status_pending_p_callback (thread, filter_ptid);
2552 });
2553
d86d4aaf
DE
2554 if (event_thread != NULL)
2555 event_child = get_thread_lwp (event_thread);
2556 if (debug_threads && event_thread)
2557 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2558 }
d7e15655 2559 else if (filter_ptid != null_ptid)
0d62e5e8 2560 {
fa96cb38 2561 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2562
bde24c0a 2563 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2564 && requested_child->status_pending_p
229d26fc
SM
2565 && (requested_child->collecting_fast_tracepoint
2566 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2567 {
2568 enqueue_one_deferred_signal (requested_child,
2569 &requested_child->status_pending);
2570 requested_child->status_pending_p = 0;
2571 requested_child->status_pending = 0;
df95181f 2572 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2573 }
2574
2575 if (requested_child->suspended
2576 && requested_child->status_pending_p)
38e08fca
GB
2577 {
2578 internal_error (__FILE__, __LINE__,
2579 "requesting an event out of a"
2580 " suspended child?");
2581 }
fa593d66 2582
d50171e4 2583 if (requested_child->status_pending_p)
d86d4aaf
DE
2584 {
2585 event_child = requested_child;
2586 event_thread = get_lwp_thread (event_child);
2587 }
0d62e5e8 2588 }
611cb4a5 2589
0d62e5e8
DJ
2590 if (event_child != NULL)
2591 {
bd99dc85 2592 if (debug_threads)
87ce2a04 2593 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2594 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2595 *wstatp = event_child->status_pending;
bd99dc85
PA
2596 event_child->status_pending_p = 0;
2597 event_child->status_pending = 0;
0bfdf32f 2598 current_thread = event_thread;
d86d4aaf 2599 return lwpid_of (event_thread);
0d62e5e8
DJ
2600 }
2601
fa96cb38
PA
2602 /* But if we don't find a pending event, we'll have to wait.
2603
2604 We only enter this loop if no process has a pending wait status.
2605 Thus any action taken in response to a wait status inside this
2606 loop is responding as soon as we detect the status, not after any
2607 pending events. */
d8301ad1 2608
fa96cb38
PA
2609 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2610 all signals while here. */
2611 sigfillset (&block_mask);
21987b9c 2612 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2613
582511be
PA
2614 /* Always pull all events out of the kernel. We'll randomly select
2615 an event LWP out of all that have events, to prevent
2616 starvation. */
fa96cb38 2617 while (event_child == NULL)
0d62e5e8 2618 {
fa96cb38 2619 pid_t ret = 0;
0d62e5e8 2620
fa96cb38
PA
2621 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2622 quirks:
0d62e5e8 2623
fa96cb38
PA
2624 - If the thread group leader exits while other threads in the
2625 thread group still exist, waitpid(TGID, ...) hangs. That
2626 waitpid won't return an exit status until the other threads
2627 in the group are reaped.
611cb4a5 2628
fa96cb38
PA
2629 - When a non-leader thread execs, that thread just vanishes
2630 without reporting an exit (so we'd hang if we waited for it
2631 explicitly in that case). The exec event is reported to
94585166 2632 the TGID pid. */
fa96cb38
PA
2633 errno = 0;
2634 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2635
fa96cb38
PA
2636 if (debug_threads)
2637 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2638 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2639
fa96cb38 2640 if (ret > 0)
0d62e5e8 2641 {
89be2091 2642 if (debug_threads)
bd99dc85 2643 {
fa96cb38
PA
2644 debug_printf ("LLW: waitpid %ld received %s\n",
2645 (long) ret, status_to_str (*wstatp));
bd99dc85 2646 }
89be2091 2647
582511be
PA
2648 /* Filter all events. IOW, leave all events pending. We'll
2649 randomly select an event LWP out of all that have events
2650 below. */
d16f3f6c 2651 filter_event (ret, *wstatp);
fa96cb38
PA
2652 /* Retry until nothing comes out of waitpid. A single
2653 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2654 continue;
2655 }
2656
20ba1ce6
PA
2657 /* Now that we've pulled all events out of the kernel, resume
2658 LWPs that don't have an interesting event to report. */
2659 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2660 for_each_thread ([this] (thread_info *thread)
2661 {
2662 resume_stopped_resumed_lwps (thread);
2663 });
20ba1ce6
PA
2664
2665 /* ... and find an LWP with a status to report to the core, if
2666 any. */
83e1b6c1
SM
2667 event_thread = find_thread_in_random ([&] (thread_info *thread)
2668 {
2669 return status_pending_p_callback (thread, filter_ptid);
2670 });
2671
582511be
PA
2672 if (event_thread != NULL)
2673 {
2674 event_child = get_thread_lwp (event_thread);
2675 *wstatp = event_child->status_pending;
2676 event_child->status_pending_p = 0;
2677 event_child->status_pending = 0;
2678 break;
2679 }
2680
fa96cb38
PA
2681 /* Check for zombie thread group leaders. Those can't be reaped
2682 until all other threads in the thread group are. */
2683 check_zombie_leaders ();
2684
a1385b7b
SM
2685 auto not_stopped = [&] (thread_info *thread)
2686 {
2687 return not_stopped_callback (thread, wait_ptid);
2688 };
2689
fa96cb38
PA
2690 /* If there are no resumed children left in the set of LWPs we
2691 want to wait for, bail. We can't just block in
2692 waitpid/sigsuspend, because lwps might have been left stopped
2693 in trace-stop state, and we'd be stuck forever waiting for
2694 their status to change (which would only happen if we resumed
2695 them). Even if WNOHANG is set, this return code is preferred
2696 over 0 (below), as it is more detailed. */
a1385b7b 2697 if (find_thread (not_stopped) == NULL)
a6dbe5df 2698 {
fa96cb38
PA
2699 if (debug_threads)
2700 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2701 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2702 return -1;
a6dbe5df
PA
2703 }
2704
fa96cb38
PA
2705 /* No interesting event to report to the caller. */
2706 if ((options & WNOHANG))
24a09b5f 2707 {
fa96cb38
PA
2708 if (debug_threads)
2709 debug_printf ("WNOHANG set, no event found\n");
2710
21987b9c 2711 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2712 return 0;
24a09b5f
DJ
2713 }
2714
fa96cb38
PA
2715 /* Block until we get an event reported with SIGCHLD. */
2716 if (debug_threads)
2717 debug_printf ("sigsuspend'ing\n");
d50171e4 2718
fa96cb38 2719 sigsuspend (&prev_mask);
21987b9c 2720 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2721 goto retry;
2722 }
d50171e4 2723
21987b9c 2724 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2725
0bfdf32f 2726 current_thread = event_thread;
d50171e4 2727
fa96cb38
PA
2728 return lwpid_of (event_thread);
2729}
2730
d16f3f6c
TBA
2731int
2732linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2733{
d16f3f6c 2734 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2735}
2736
6bf5e0ba
PA
2737/* Select one LWP out of those that have events pending. */
2738
2739static void
2740select_event_lwp (struct lwp_info **orig_lp)
2741{
582511be
PA
2742 struct thread_info *event_thread = NULL;
2743
2744 /* In all-stop, give preference to the LWP that is being
2745 single-stepped. There will be at most one, and it's the LWP that
2746 the core is most interested in. If we didn't do this, then we'd
2747 have to handle pending step SIGTRAPs somehow in case the core
2748 later continues the previously-stepped thread, otherwise we'd
2749 report the pending SIGTRAP, and the core, not having stepped the
2750 thread, wouldn't understand what the trap was for, and therefore
2751 would report it to the user as a random signal. */
2752 if (!non_stop)
6bf5e0ba 2753 {
39a64da5
SM
2754 event_thread = find_thread ([] (thread_info *thread)
2755 {
2756 lwp_info *lp = get_thread_lwp (thread);
2757
2758 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2759 && thread->last_resume_kind == resume_step
2760 && lp->status_pending_p);
2761 });
2762
582511be
PA
2763 if (event_thread != NULL)
2764 {
2765 if (debug_threads)
2766 debug_printf ("SEL: Select single-step %s\n",
2767 target_pid_to_str (ptid_of (event_thread)));
2768 }
6bf5e0ba 2769 }
582511be 2770 if (event_thread == NULL)
6bf5e0ba
PA
2771 {
2772 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2773 which have had events. */
6bf5e0ba 2774
b0319eaa 2775 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2776 {
2777 lwp_info *lp = get_thread_lwp (thread);
2778
b0319eaa
TT
2779 /* Only resumed LWPs that have an event pending. */
2780 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2781 && lp->status_pending_p);
39a64da5 2782 });
6bf5e0ba
PA
2783 }
2784
d86d4aaf 2785 if (event_thread != NULL)
6bf5e0ba 2786 {
d86d4aaf
DE
2787 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2788
6bf5e0ba
PA
2789 /* Switch the event LWP. */
2790 *orig_lp = event_lp;
2791 }
2792}
2793
7984d532
PA
2794/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2795 NULL. */
2796
2797static void
2798unsuspend_all_lwps (struct lwp_info *except)
2799{
139720c5
SM
2800 for_each_thread ([&] (thread_info *thread)
2801 {
2802 lwp_info *lwp = get_thread_lwp (thread);
2803
2804 if (lwp != except)
2805 lwp_suspended_decr (lwp);
2806 });
7984d532
PA
2807}
2808
5a6b0a41 2809static bool lwp_running (thread_info *thread);
fa593d66
PA
2810
2811/* Stabilize threads (move out of jump pads).
2812
2813 If a thread is midway collecting a fast tracepoint, we need to
2814 finish the collection and move it out of the jump pad before
2815 reporting the signal.
2816
2817 This avoids recursion while collecting (when a signal arrives
2818 midway, and the signal handler itself collects), which would trash
2819 the trace buffer. In case the user set a breakpoint in a signal
2820 handler, this avoids the backtrace showing the jump pad, etc..
2821 Most importantly, there are certain things we can't do safely if
2822 threads are stopped in a jump pad (or in its callee's). For
2823 example:
2824
2825 - starting a new trace run. A thread still collecting the
2826 previous run, could trash the trace buffer when resumed. The trace
2827 buffer control structures would have been reset but the thread had
2828 no way to tell. The thread could even midway memcpy'ing to the
2829 buffer, which would mean that when resumed, it would clobber the
2830 trace buffer that had been set for a new run.
2831
2832 - we can't rewrite/reuse the jump pads for new tracepoints
2833 safely. Say you do tstart while a thread is stopped midway while
2834 collecting. When the thread is later resumed, it finishes the
2835 collection, and returns to the jump pad, to execute the original
2836 instruction that was under the tracepoint jump at the time the
2837 older run had been started. If the jump pad had been rewritten
2838 since for something else in the new run, the thread would now
2839 execute the wrong / random instructions. */
2840
5c9eb2f2
TBA
2841void
2842linux_process_target::stabilize_threads ()
fa593d66 2843{
13e567af
TBA
2844 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2845 {
2846 return stuck_in_jump_pad (thread);
2847 });
fa593d66 2848
d86d4aaf 2849 if (thread_stuck != NULL)
fa593d66 2850 {
b4d51a55 2851 if (debug_threads)
87ce2a04 2852 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2853 lwpid_of (thread_stuck));
fa593d66
PA
2854 return;
2855 }
2856
fcb056a5 2857 thread_info *saved_thread = current_thread;
fa593d66
PA
2858
2859 stabilizing_threads = 1;
2860
2861 /* Kick 'em all. */
d16f3f6c
TBA
2862 for_each_thread ([this] (thread_info *thread)
2863 {
2864 move_out_of_jump_pad (thread);
2865 });
fa593d66
PA
2866
2867 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2868 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2869 {
2870 struct target_waitstatus ourstatus;
2871 struct lwp_info *lwp;
fa593d66
PA
2872 int wstat;
2873
2874 /* Note that we go through the full wait even loop. While
2875 moving threads out of jump pad, we need to be able to step
2876 over internal breakpoints and such. */
d16f3f6c 2877 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2878
2879 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2880 {
0bfdf32f 2881 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2882
2883 /* Lock it. */
863d01bd 2884 lwp_suspended_inc (lwp);
fa593d66 2885
a493e3e2 2886 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2887 || current_thread->last_resume_kind == resume_stop)
fa593d66 2888 {
2ea28649 2889 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2890 enqueue_one_deferred_signal (lwp, &wstat);
2891 }
2892 }
2893 }
2894
fcdad592 2895 unsuspend_all_lwps (NULL);
fa593d66
PA
2896
2897 stabilizing_threads = 0;
2898
0bfdf32f 2899 current_thread = saved_thread;
fa593d66 2900
b4d51a55 2901 if (debug_threads)
fa593d66 2902 {
13e567af
TBA
2903 thread_stuck = find_thread ([this] (thread_info *thread)
2904 {
2905 return stuck_in_jump_pad (thread);
2906 });
fcb056a5 2907
d86d4aaf 2908 if (thread_stuck != NULL)
87ce2a04 2909 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2910 lwpid_of (thread_stuck));
fa593d66
PA
2911 }
2912}
2913
582511be
PA
2914/* Convenience function that is called when the kernel reports an
2915 event that is not passed out to GDB. */
2916
2917static ptid_t
2918ignore_event (struct target_waitstatus *ourstatus)
2919{
2920 /* If we got an event, there may still be others, as a single
2921 SIGCHLD can indicate more than one child stopped. This forces
2922 another target_wait call. */
2923 async_file_mark ();
2924
2925 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2926 return null_ptid;
2927}
2928
fd000fb3
TBA
2929ptid_t
2930linux_process_target::filter_exit_event (lwp_info *event_child,
2931 target_waitstatus *ourstatus)
65706a29 2932{
c12a5089 2933 client_state &cs = get_client_state ();
65706a29
PA
2934 struct thread_info *thread = get_lwp_thread (event_child);
2935 ptid_t ptid = ptid_of (thread);
2936
2937 if (!last_thread_of_process_p (pid_of (thread)))
2938 {
c12a5089 2939 if (cs.report_thread_events)
65706a29
PA
2940 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2941 else
2942 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2943
2944 delete_lwp (event_child);
2945 }
2946 return ptid;
2947}
2948
82075af2
JS
2949/* Returns 1 if GDB is interested in any event_child syscalls. */
2950
2951static int
2952gdb_catching_syscalls_p (struct lwp_info *event_child)
2953{
2954 struct thread_info *thread = get_lwp_thread (event_child);
2955 struct process_info *proc = get_thread_process (thread);
2956
f27866ba 2957 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2958}
2959
9eedd27d
TBA
2960bool
2961linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
82075af2 2962{
4cc32bec 2963 int sysno;
82075af2
JS
2964 struct thread_info *thread = get_lwp_thread (event_child);
2965 struct process_info *proc = get_thread_process (thread);
2966
f27866ba 2967 if (proc->syscalls_to_catch.empty ())
9eedd27d 2968 return false;
82075af2 2969
f27866ba 2970 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
9eedd27d 2971 return true;
82075af2 2972
4cc32bec 2973 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2974
2975 for (int iter : proc->syscalls_to_catch)
82075af2 2976 if (iter == sysno)
9eedd27d 2977 return true;
82075af2 2978
9eedd27d 2979 return false;
82075af2
JS
2980}
2981
d16f3f6c
TBA
2982ptid_t
2983linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2984 int target_options)
da6d8c04 2985{
c12a5089 2986 client_state &cs = get_client_state ();
e5f1222d 2987 int w;
fc7238bb 2988 struct lwp_info *event_child;
bd99dc85 2989 int options;
bd99dc85 2990 int pid;
6bf5e0ba
PA
2991 int step_over_finished;
2992 int bp_explains_trap;
2993 int maybe_internal_trap;
2994 int report_to_gdb;
219f2f23 2995 int trace_event;
c2d6af84 2996 int in_step_range;
f2faf941 2997 int any_resumed;
bd99dc85 2998
87ce2a04
DE
2999 if (debug_threads)
3000 {
3001 debug_enter ();
d16f3f6c 3002 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
3003 }
3004
bd99dc85
PA
3005 /* Translate generic target options into linux options. */
3006 options = __WALL;
3007 if (target_options & TARGET_WNOHANG)
3008 options |= WNOHANG;
0d62e5e8 3009
fa593d66
PA
3010 bp_explains_trap = 0;
3011 trace_event = 0;
c2d6af84 3012 in_step_range = 0;
bd99dc85
PA
3013 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3014
83e1b6c1
SM
3015 auto status_pending_p_any = [&] (thread_info *thread)
3016 {
3017 return status_pending_p_callback (thread, minus_one_ptid);
3018 };
3019
a1385b7b
SM
3020 auto not_stopped = [&] (thread_info *thread)
3021 {
3022 return not_stopped_callback (thread, minus_one_ptid);
3023 };
3024
f2faf941 3025 /* Find a resumed LWP, if any. */
83e1b6c1 3026 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3027 any_resumed = 1;
a1385b7b 3028 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3029 any_resumed = 1;
3030 else
3031 any_resumed = 0;
3032
d7e15655 3033 if (step_over_bkpt == null_ptid)
d16f3f6c 3034 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3035 else
3036 {
3037 if (debug_threads)
87ce2a04
DE
3038 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3039 target_pid_to_str (step_over_bkpt));
d16f3f6c 3040 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3041 }
3042
f2faf941 3043 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3044 {
fa96cb38
PA
3045 gdb_assert (target_options & TARGET_WNOHANG);
3046
87ce2a04
DE
3047 if (debug_threads)
3048 {
d16f3f6c 3049 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3050 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3051 debug_exit ();
3052 }
fa96cb38
PA
3053
3054 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3055 return null_ptid;
3056 }
fa96cb38
PA
3057 else if (pid == -1)
3058 {
3059 if (debug_threads)
3060 {
d16f3f6c 3061 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3062 "TARGET_WAITKIND_NO_RESUMED\n");
3063 debug_exit ();
3064 }
bd99dc85 3065
fa96cb38
PA
3066 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3067 return null_ptid;
3068 }
0d62e5e8 3069
0bfdf32f 3070 event_child = get_thread_lwp (current_thread);
0d62e5e8 3071
d16f3f6c 3072 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3073 child of a process. Report it. */
3074 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3075 {
fa96cb38 3076 if (WIFEXITED (w))
0d62e5e8 3077 {
fa96cb38
PA
3078 ourstatus->kind = TARGET_WAITKIND_EXITED;
3079 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3080
fa96cb38 3081 if (debug_threads)
bd99dc85 3082 {
d16f3f6c 3083 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3084 "retcode %d\n",
0bfdf32f 3085 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3086 WEXITSTATUS (w));
3087 debug_exit ();
bd99dc85 3088 }
fa96cb38
PA
3089 }
3090 else
3091 {
3092 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3093 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3094
fa96cb38
PA
3095 if (debug_threads)
3096 {
d16f3f6c 3097 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3098 "signal %d\n",
0bfdf32f 3099 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3100 WTERMSIG (w));
3101 debug_exit ();
3102 }
0d62e5e8 3103 }
fa96cb38 3104
65706a29
PA
3105 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3106 return filter_exit_event (event_child, ourstatus);
3107
0bfdf32f 3108 return ptid_of (current_thread);
da6d8c04
DJ
3109 }
3110
2d97cd35
AT
3111 /* If step-over executes a breakpoint instruction, in the case of a
3112 hardware single step it means a gdb/gdbserver breakpoint had been
3113 planted on top of a permanent breakpoint, in the case of a software
3114 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3115 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3116 the breakpoint address.
3117 So in the case of the hardware single step advance the PC manually
3118 past the breakpoint and in the case of software single step advance only
3b9a79ef 3119 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3120 This avoids that a program would keep trapping a permanent breakpoint
3121 forever. */
d7e15655 3122 if (step_over_bkpt != null_ptid
2d97cd35
AT
3123 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3124 && (event_child->stepping
3b9a79ef 3125 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3126 {
dd373349
AT
3127 int increment_pc = 0;
3128 int breakpoint_kind = 0;
3129 CORE_ADDR stop_pc = event_child->stop_pc;
3130
d16f3f6c
TBA
3131 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3132 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3133
3134 if (debug_threads)
3135 {
3136 debug_printf ("step-over for %s executed software breakpoint\n",
3137 target_pid_to_str (ptid_of (current_thread)));
3138 }
3139
3140 if (increment_pc != 0)
3141 {
3142 struct regcache *regcache
3143 = get_thread_regcache (current_thread, 1);
3144
3145 event_child->stop_pc += increment_pc;
bf9ae9d8 3146 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3147
d7146cda 3148 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3149 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3150 }
3151 }
3152
6bf5e0ba
PA
3153 /* If this event was not handled before, and is not a SIGTRAP, we
3154 report it. SIGILL and SIGSEGV are also treated as traps in case
3155 a breakpoint is inserted at the current PC. If this target does
3156 not support internal breakpoints at all, we also report the
3157 SIGTRAP without further processing; it's of no concern to us. */
3158 maybe_internal_trap
bf9ae9d8 3159 = (low_supports_breakpoints ()
6bf5e0ba
PA
3160 && (WSTOPSIG (w) == SIGTRAP
3161 || ((WSTOPSIG (w) == SIGILL
3162 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3163 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3164
3165 if (maybe_internal_trap)
3166 {
3167 /* Handle anything that requires bookkeeping before deciding to
3168 report the event or continue waiting. */
3169
3170 /* First check if we can explain the SIGTRAP with an internal
3171 breakpoint, or if we should possibly report the event to GDB.
3172 Do this before anything that may remove or insert a
3173 breakpoint. */
3174 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3175
3176 /* We have a SIGTRAP, possibly a step-over dance has just
3177 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3178 reinsert breakpoints and delete any single-step
3179 breakpoints. */
6bf5e0ba
PA
3180 step_over_finished = finish_step_over (event_child);
3181
3182 /* Now invoke the callbacks of any internal breakpoints there. */
3183 check_breakpoints (event_child->stop_pc);
3184
219f2f23
PA
3185 /* Handle tracepoint data collecting. This may overflow the
3186 trace buffer, and cause a tracing stop, removing
3187 breakpoints. */
3188 trace_event = handle_tracepoints (event_child);
3189
6bf5e0ba
PA
3190 if (bp_explains_trap)
3191 {
6bf5e0ba 3192 if (debug_threads)
87ce2a04 3193 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3194 }
3195 }
3196 else
3197 {
3198 /* We have some other signal, possibly a step-over dance was in
3199 progress, and it should be cancelled too. */
3200 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3201 }
3202
3203 /* We have all the data we need. Either report the event to GDB, or
3204 resume threads and keep waiting for more. */
3205
3206 /* If we're collecting a fast tracepoint, finish the collection and
3207 move out of the jump pad before delivering a signal. See
3208 linux_stabilize_threads. */
3209
3210 if (WIFSTOPPED (w)
3211 && WSTOPSIG (w) != SIGTRAP
3212 && supports_fast_tracepoints ()
58b4daa5 3213 && agent_loaded_p ())
fa593d66
PA
3214 {
3215 if (debug_threads)
87ce2a04
DE
3216 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3217 "to defer or adjust it.\n",
0bfdf32f 3218 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3219
3220 /* Allow debugging the jump pad itself. */
0bfdf32f 3221 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3222 && maybe_move_out_of_jump_pad (event_child, &w))
3223 {
3224 enqueue_one_deferred_signal (event_child, &w);
3225
3226 if (debug_threads)
87ce2a04 3227 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3228 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3229
df95181f 3230 resume_one_lwp (event_child, 0, 0, NULL);
582511be 3231
edeeb602
YQ
3232 if (debug_threads)
3233 debug_exit ();
582511be 3234 return ignore_event (ourstatus);
fa593d66
PA
3235 }
3236 }
219f2f23 3237
229d26fc
SM
3238 if (event_child->collecting_fast_tracepoint
3239 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3240 {
3241 if (debug_threads)
87ce2a04
DE
3242 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3243 "Check if we're already there.\n",
0bfdf32f 3244 lwpid_of (current_thread),
229d26fc 3245 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3246
3247 trace_event = 1;
3248
3249 event_child->collecting_fast_tracepoint
3250 = linux_fast_tracepoint_collecting (event_child, NULL);
3251
229d26fc
SM
3252 if (event_child->collecting_fast_tracepoint
3253 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3254 {
3255 /* No longer need this breakpoint. */
3256 if (event_child->exit_jump_pad_bkpt != NULL)
3257 {
3258 if (debug_threads)
87ce2a04
DE
3259 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3260 "stopping all threads momentarily.\n");
fa593d66
PA
3261
3262 /* Other running threads could hit this breakpoint.
3263 We don't handle moribund locations like GDB does,
3264 instead we always pause all threads when removing
3265 breakpoints, so that any step-over or
3266 decr_pc_after_break adjustment is always taken
3267 care of while the breakpoint is still
3268 inserted. */
3269 stop_all_lwps (1, event_child);
fa593d66
PA
3270
3271 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3272 event_child->exit_jump_pad_bkpt = NULL;
3273
3274 unstop_all_lwps (1, event_child);
3275
3276 gdb_assert (event_child->suspended >= 0);
3277 }
3278 }
3279
229d26fc
SM
3280 if (event_child->collecting_fast_tracepoint
3281 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3282 {
3283 if (debug_threads)
87ce2a04
DE
3284 debug_printf ("fast tracepoint finished "
3285 "collecting successfully.\n");
fa593d66
PA
3286
3287 /* We may have a deferred signal to report. */
3288 if (dequeue_one_deferred_signal (event_child, &w))
3289 {
3290 if (debug_threads)
87ce2a04 3291 debug_printf ("dequeued one signal.\n");
fa593d66 3292 }
3c11dd79 3293 else
fa593d66 3294 {
3c11dd79 3295 if (debug_threads)
87ce2a04 3296 debug_printf ("no deferred signals.\n");
fa593d66
PA
3297
3298 if (stabilizing_threads)
3299 {
3300 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3301 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3302
3303 if (debug_threads)
3304 {
d16f3f6c 3305 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3306 "while stabilizing threads\n",
0bfdf32f 3307 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3308 debug_exit ();
3309 }
3310
0bfdf32f 3311 return ptid_of (current_thread);
fa593d66
PA
3312 }
3313 }
3314 }
6bf5e0ba
PA
3315 }
3316
e471f25b
PA
3317 /* Check whether GDB would be interested in this event. */
3318
82075af2
JS
3319 /* Check if GDB is interested in this syscall. */
3320 if (WIFSTOPPED (w)
3321 && WSTOPSIG (w) == SYSCALL_SIGTRAP
9eedd27d 3322 && !gdb_catch_this_syscall (event_child))
82075af2
JS
3323 {
3324 if (debug_threads)
3325 {
3326 debug_printf ("Ignored syscall for LWP %ld.\n",
3327 lwpid_of (current_thread));
3328 }
3329
df95181f 3330 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602
YQ
3331
3332 if (debug_threads)
3333 debug_exit ();
82075af2
JS
3334 return ignore_event (ourstatus);
3335 }
3336
e471f25b
PA
3337 /* If GDB is not interested in this signal, don't stop other
3338 threads, and don't report it to GDB. Just resume the inferior
3339 right away. We do this for threading-related signals as well as
3340 any that GDB specifically requested we ignore. But never ignore
3341 SIGSTOP if we sent it ourselves, and do not ignore signals when
3342 stepping - they may require special handling to skip the signal
c9587f88
AT
3343 handler. Also never ignore signals that could be caused by a
3344 breakpoint. */
e471f25b 3345 if (WIFSTOPPED (w)
0bfdf32f 3346 && current_thread->last_resume_kind != resume_step
e471f25b 3347 && (
1a981360 3348#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3349 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3350 && (WSTOPSIG (w) == __SIGRTMIN
3351 || WSTOPSIG (w) == __SIGRTMIN + 1))
3352 ||
3353#endif
c12a5089 3354 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3355 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3356 && current_thread->last_resume_kind == resume_stop)
3357 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3358 {
3359 siginfo_t info, *info_p;
3360
3361 if (debug_threads)
87ce2a04 3362 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3363 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3364
0bfdf32f 3365 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3366 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3367 info_p = &info;
3368 else
3369 info_p = NULL;
863d01bd
PA
3370
3371 if (step_over_finished)
3372 {
3373 /* We cancelled this thread's step-over above. We still
3374 need to unsuspend all other LWPs, and set them back
3375 running again while the signal handler runs. */
3376 unsuspend_all_lwps (event_child);
3377
3378 /* Enqueue the pending signal info so that proceed_all_lwps
3379 doesn't lose it. */
3380 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3381
3382 proceed_all_lwps ();
3383 }
3384 else
3385 {
df95181f
TBA
3386 resume_one_lwp (event_child, event_child->stepping,
3387 WSTOPSIG (w), info_p);
863d01bd 3388 }
edeeb602
YQ
3389
3390 if (debug_threads)
3391 debug_exit ();
3392
582511be 3393 return ignore_event (ourstatus);
e471f25b
PA
3394 }
3395
c2d6af84
PA
3396 /* Note that all addresses are always "out of the step range" when
3397 there's no range to begin with. */
3398 in_step_range = lwp_in_step_range (event_child);
3399
3400 /* If GDB wanted this thread to single step, and the thread is out
3401 of the step range, we always want to report the SIGTRAP, and let
3402 GDB handle it. Watchpoints should always be reported. So should
3403 signals we can't explain. A SIGTRAP we can't explain could be a
3404 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3405 do, we're be able to handle GDB breakpoints on top of internal
3406 breakpoints, by handling the internal breakpoint and still
3407 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3408 won't see the breakpoint hit. If we see a single-step event but
3409 the thread should be continuing, don't pass the trap to gdb.
3410 That indicates that we had previously finished a single-step but
3411 left the single-step pending -- see
3412 complete_ongoing_step_over. */
6bf5e0ba 3413 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3414 || (current_thread->last_resume_kind == resume_step
c2d6af84 3415 && !in_step_range)
15c66dd6 3416 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3417 || (!in_step_range
3418 && !bp_explains_trap
3419 && !trace_event
3420 && !step_over_finished
3421 && !(current_thread->last_resume_kind == resume_continue
3422 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3423 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3424 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3425 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3426 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3427
3428 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3429
3430 /* We found no reason GDB would want us to stop. We either hit one
3431 of our own breakpoints, or finished an internal step GDB
3432 shouldn't know about. */
3433 if (!report_to_gdb)
3434 {
3435 if (debug_threads)
3436 {
3437 if (bp_explains_trap)
87ce2a04 3438 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3439 if (step_over_finished)
87ce2a04 3440 debug_printf ("Step-over finished.\n");
219f2f23 3441 if (trace_event)
87ce2a04 3442 debug_printf ("Tracepoint event.\n");
c2d6af84 3443 if (lwp_in_step_range (event_child))
87ce2a04
DE
3444 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3445 paddress (event_child->stop_pc),
3446 paddress (event_child->step_range_start),
3447 paddress (event_child->step_range_end));
6bf5e0ba
PA
3448 }
3449
3450 /* We're not reporting this breakpoint to GDB, so apply the
3451 decr_pc_after_break adjustment to the inferior's regcache
3452 ourselves. */
3453
bf9ae9d8 3454 if (low_supports_breakpoints ())
6bf5e0ba
PA
3455 {
3456 struct regcache *regcache
0bfdf32f 3457 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3458 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3459 }
3460
7984d532 3461 if (step_over_finished)
e3652c84
YQ
3462 {
3463 /* If we have finished stepping over a breakpoint, we've
3464 stopped and suspended all LWPs momentarily except the
3465 stepping one. This is where we resume them all again.
3466 We're going to keep waiting, so use proceed, which
3467 handles stepping over the next breakpoint. */
3468 unsuspend_all_lwps (event_child);
3469 }
3470 else
3471 {
3472 /* Remove the single-step breakpoints if any. Note that
3473 there isn't single-step breakpoint if we finished stepping
3474 over. */
7582c77c 3475 if (supports_software_single_step ()
e3652c84
YQ
3476 && has_single_step_breakpoints (current_thread))
3477 {
3478 stop_all_lwps (0, event_child);
3479 delete_single_step_breakpoints (current_thread);
3480 unstop_all_lwps (0, event_child);
3481 }
3482 }
7984d532 3483
e3652c84
YQ
3484 if (debug_threads)
3485 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3486 proceed_all_lwps ();
edeeb602
YQ
3487
3488 if (debug_threads)
3489 debug_exit ();
3490
582511be 3491 return ignore_event (ourstatus);
6bf5e0ba
PA
3492 }
3493
3494 if (debug_threads)
3495 {
00db26fa 3496 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3497 {
23fdd69e
SM
3498 std::string str
3499 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3500
ad071a30 3501 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3502 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3503 }
0bfdf32f 3504 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3505 {
3506 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3507 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3508 else if (!lwp_in_step_range (event_child))
87ce2a04 3509 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3510 }
15c66dd6 3511 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3512 debug_printf ("Stopped by watchpoint.\n");
582511be 3513 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3514 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3515 if (debug_threads)
87ce2a04 3516 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3517 }
3518
3519 /* Alright, we're going to report a stop. */
3520
3b9a79ef 3521 /* Remove single-step breakpoints. */
7582c77c 3522 if (supports_software_single_step ())
8901d193 3523 {
3b9a79ef 3524 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3525 lwps, so that other threads won't hit the breakpoint in the
3526 staled memory. */
3b9a79ef 3527 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3528
3529 if (non_stop)
3530 {
3b9a79ef
YQ
3531 remove_single_step_breakpoints_p
3532 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3533 }
3534 else
3535 {
3536 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3537 requests. Delete all single-step breakpoints. */
8901d193 3538
9c80ecd6
SM
3539 find_thread ([&] (thread_info *thread) {
3540 if (has_single_step_breakpoints (thread))
3541 {
3542 remove_single_step_breakpoints_p = 1;
3543 return true;
3544 }
8901d193 3545
9c80ecd6
SM
3546 return false;
3547 });
8901d193
YQ
3548 }
3549
3b9a79ef 3550 if (remove_single_step_breakpoints_p)
8901d193 3551 {
3b9a79ef 3552 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3553 so that other threads won't hit the breakpoint in the staled
3554 memory. */
3555 stop_all_lwps (0, event_child);
3556
3557 if (non_stop)
3558 {
3b9a79ef
YQ
3559 gdb_assert (has_single_step_breakpoints (current_thread));
3560 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3561 }
3562 else
3563 {
9c80ecd6
SM
3564 for_each_thread ([] (thread_info *thread){
3565 if (has_single_step_breakpoints (thread))
3566 delete_single_step_breakpoints (thread);
3567 });
8901d193
YQ
3568 }
3569
3570 unstop_all_lwps (0, event_child);
3571 }
3572 }
3573
582511be 3574 if (!stabilizing_threads)
6bf5e0ba
PA
3575 {
3576 /* In all-stop, stop all threads. */
582511be
PA
3577 if (!non_stop)
3578 stop_all_lwps (0, NULL);
6bf5e0ba 3579
c03e6ccc 3580 if (step_over_finished)
582511be
PA
3581 {
3582 if (!non_stop)
3583 {
3584 /* If we were doing a step-over, all other threads but
3585 the stepping one had been paused in start_step_over,
3586 with their suspend counts incremented. We don't want
3587 to do a full unstop/unpause, because we're in
3588 all-stop mode (so we want threads stopped), but we
3589 still need to unsuspend the other threads, to
3590 decrement their `suspended' count back. */
3591 unsuspend_all_lwps (event_child);
3592 }
3593 else
3594 {
3595 /* If we just finished a step-over, then all threads had
3596 been momentarily paused. In all-stop, that's fine,
3597 we want threads stopped by now anyway. In non-stop,
3598 we need to re-resume threads that GDB wanted to be
3599 running. */
3600 unstop_all_lwps (1, event_child);
3601 }
3602 }
c03e6ccc 3603
3aa5cfa0
AT
3604 /* If we're not waiting for a specific LWP, choose an event LWP
3605 from among those that have had events. Giving equal priority
3606 to all LWPs that have had events helps prevent
3607 starvation. */
d7e15655 3608 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3609 {
3610 event_child->status_pending_p = 1;
3611 event_child->status_pending = w;
3612
3613 select_event_lwp (&event_child);
3614
3615 /* current_thread and event_child must stay in sync. */
3616 current_thread = get_lwp_thread (event_child);
3617
3618 event_child->status_pending_p = 0;
3619 w = event_child->status_pending;
3620 }
3621
3622
fa593d66 3623 /* Stabilize threads (move out of jump pads). */
582511be 3624 if (!non_stop)
5c9eb2f2 3625 target_stabilize_threads ();
6bf5e0ba
PA
3626 }
3627 else
3628 {
3629 /* If we just finished a step-over, then all threads had been
3630 momentarily paused. In all-stop, that's fine, we want
3631 threads stopped by now anyway. In non-stop, we need to
3632 re-resume threads that GDB wanted to be running. */
3633 if (step_over_finished)
7984d532 3634 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3635 }
3636
00db26fa 3637 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3638 {
00db26fa
PA
3639 /* If the reported event is an exit, fork, vfork or exec, let
3640 GDB know. */
5a04c4cf
PA
3641
3642 /* Break the unreported fork relationship chain. */
3643 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3644 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3645 {
3646 event_child->fork_relative->fork_relative = NULL;
3647 event_child->fork_relative = NULL;
3648 }
3649
00db26fa 3650 *ourstatus = event_child->waitstatus;
de0d863e
DB
3651 /* Clear the event lwp's waitstatus since we handled it already. */
3652 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3653 }
3654 else
3655 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3656
582511be 3657 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3658 it was a software breakpoint, and the client doesn't know we can
3659 adjust the breakpoint ourselves. */
3660 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3661 && !cs.swbreak_feature)
582511be 3662 {
d4807ea2 3663 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3664
3665 if (decr_pc != 0)
3666 {
3667 struct regcache *regcache
3668 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3669 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3670 }
3671 }
3672
82075af2
JS
3673 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3674 {
82075af2 3675 get_syscall_trapinfo (event_child,
4cc32bec 3676 &ourstatus->value.syscall_number);
82075af2
JS
3677 ourstatus->kind = event_child->syscall_state;
3678 }
3679 else if (current_thread->last_resume_kind == resume_stop
3680 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3681 {
3682 /* A thread that has been requested to stop by GDB with vCont;t,
3683 and it stopped cleanly, so report as SIG0. The use of
3684 SIGSTOP is an implementation detail. */
a493e3e2 3685 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3686 }
0bfdf32f 3687 else if (current_thread->last_resume_kind == resume_stop
8336d594 3688 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3689 {
3690 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3691 but, it stopped for other reasons. */
2ea28649 3692 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3693 }
de0d863e 3694 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3695 {
2ea28649 3696 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3697 }
3698
d7e15655 3699 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3700
bd99dc85 3701 if (debug_threads)
87ce2a04 3702 {
d16f3f6c 3703 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3704 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3705 ourstatus->kind, ourstatus->value.sig);
3706 debug_exit ();
3707 }
bd99dc85 3708
65706a29
PA
3709 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3710 return filter_exit_event (event_child, ourstatus);
3711
0bfdf32f 3712 return ptid_of (current_thread);
bd99dc85
PA
3713}
3714
3715/* Get rid of any pending event in the pipe. */
3716static void
3717async_file_flush (void)
3718{
3719 int ret;
3720 char buf;
3721
3722 do
3723 ret = read (linux_event_pipe[0], &buf, 1);
3724 while (ret >= 0 || (ret == -1 && errno == EINTR));
3725}
3726
3727/* Put something in the pipe, so the event loop wakes up. */
3728static void
3729async_file_mark (void)
3730{
3731 int ret;
3732
3733 async_file_flush ();
3734
3735 do
3736 ret = write (linux_event_pipe[1], "+", 1);
3737 while (ret == 0 || (ret == -1 && errno == EINTR));
3738
3739 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3740 be awakened anyway. */
3741}
3742
6532e7e3
TBA
3743ptid_t
3744linux_process_target::wait (ptid_t ptid,
3745 target_waitstatus *ourstatus,
3746 int target_options)
bd99dc85 3747{
95954743 3748 ptid_t event_ptid;
bd99dc85 3749
bd99dc85
PA
3750 /* Flush the async file first. */
3751 if (target_is_async_p ())
3752 async_file_flush ();
3753
582511be
PA
3754 do
3755 {
d16f3f6c 3756 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3757 }
3758 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3759 && event_ptid == null_ptid
582511be 3760 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3761
3762 /* If at least one stop was reported, there may be more. A single
3763 SIGCHLD can signal more than one child stop. */
3764 if (target_is_async_p ()
3765 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3766 && event_ptid != null_ptid)
bd99dc85
PA
3767 async_file_mark ();
3768
3769 return event_ptid;
da6d8c04
DJ
3770}
3771
c5f62d5f 3772/* Send a signal to an LWP. */
fd500816
DJ
3773
3774static int
a1928bad 3775kill_lwp (unsigned long lwpid, int signo)
fd500816 3776{
4a6ed09b 3777 int ret;
fd500816 3778
4a6ed09b
PA
3779 errno = 0;
3780 ret = syscall (__NR_tkill, lwpid, signo);
3781 if (errno == ENOSYS)
3782 {
3783 /* If tkill fails, then we are not using nptl threads, a
3784 configuration we no longer support. */
3785 perror_with_name (("tkill"));
3786 }
3787 return ret;
fd500816
DJ
3788}
3789
964e4306
PA
3790void
3791linux_stop_lwp (struct lwp_info *lwp)
3792{
3793 send_sigstop (lwp);
3794}
3795
0d62e5e8 3796static void
02fc4de7 3797send_sigstop (struct lwp_info *lwp)
0d62e5e8 3798{
bd99dc85 3799 int pid;
0d62e5e8 3800
d86d4aaf 3801 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3802
0d62e5e8
DJ
3803 /* If we already have a pending stop signal for this process, don't
3804 send another. */
54a0b537 3805 if (lwp->stop_expected)
0d62e5e8 3806 {
ae13219e 3807 if (debug_threads)
87ce2a04 3808 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3809
0d62e5e8
DJ
3810 return;
3811 }
3812
3813 if (debug_threads)
87ce2a04 3814 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3815
d50171e4 3816 lwp->stop_expected = 1;
bd99dc85 3817 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3818}
3819
df3e4dbe
SM
3820static void
3821send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3822{
d86d4aaf 3823 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3824
7984d532
PA
3825 /* Ignore EXCEPT. */
3826 if (lwp == except)
df3e4dbe 3827 return;
7984d532 3828
02fc4de7 3829 if (lwp->stopped)
df3e4dbe 3830 return;
02fc4de7
PA
3831
3832 send_sigstop (lwp);
7984d532
PA
3833}
3834
3835/* Increment the suspend count of an LWP, and stop it, if not stopped
3836 yet. */
df3e4dbe
SM
3837static void
3838suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3839{
d86d4aaf 3840 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3841
3842 /* Ignore EXCEPT. */
3843 if (lwp == except)
df3e4dbe 3844 return;
7984d532 3845
863d01bd 3846 lwp_suspended_inc (lwp);
7984d532 3847
df3e4dbe 3848 send_sigstop (thread, except);
02fc4de7
PA
3849}
3850
95954743
PA
3851static void
3852mark_lwp_dead (struct lwp_info *lwp, int wstat)
3853{
95954743
PA
3854 /* Store the exit status for later. */
3855 lwp->status_pending_p = 1;
3856 lwp->status_pending = wstat;
3857
00db26fa
PA
3858 /* Store in waitstatus as well, as there's nothing else to process
3859 for this event. */
3860 if (WIFEXITED (wstat))
3861 {
3862 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3863 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3864 }
3865 else if (WIFSIGNALED (wstat))
3866 {
3867 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3868 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3869 }
3870
95954743
PA
3871 /* Prevent trying to stop it. */
3872 lwp->stopped = 1;
3873
3874 /* No further stops are expected from a dead lwp. */
3875 lwp->stop_expected = 0;
3876}
3877
00db26fa
PA
3878/* Return true if LWP has exited already, and has a pending exit event
3879 to report to GDB. */
3880
3881static int
3882lwp_is_marked_dead (struct lwp_info *lwp)
3883{
3884 return (lwp->status_pending_p
3885 && (WIFEXITED (lwp->status_pending)
3886 || WIFSIGNALED (lwp->status_pending)));
3887}
3888
d16f3f6c
TBA
3889void
3890linux_process_target::wait_for_sigstop ()
0d62e5e8 3891{
0bfdf32f 3892 struct thread_info *saved_thread;
95954743 3893 ptid_t saved_tid;
fa96cb38
PA
3894 int wstat;
3895 int ret;
0d62e5e8 3896
0bfdf32f
GB
3897 saved_thread = current_thread;
3898 if (saved_thread != NULL)
9c80ecd6 3899 saved_tid = saved_thread->id;
bd99dc85 3900 else
95954743 3901 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3902
d50171e4 3903 if (debug_threads)
fa96cb38 3904 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3905
fa96cb38
PA
3906 /* Passing NULL_PTID as filter indicates we want all events to be
3907 left pending. Eventually this returns when there are no
3908 unwaited-for children left. */
d16f3f6c 3909 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3910 gdb_assert (ret == -1);
0d62e5e8 3911
13d3d99b 3912 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3913 current_thread = saved_thread;
0d62e5e8
DJ
3914 else
3915 {
3916 if (debug_threads)
87ce2a04 3917 debug_printf ("Previously current thread died.\n");
0d62e5e8 3918
f0db101d
PA
3919 /* We can't change the current inferior behind GDB's back,
3920 otherwise, a subsequent command may apply to the wrong
3921 process. */
3922 current_thread = NULL;
0d62e5e8
DJ
3923 }
3924}
3925
13e567af
TBA
3926bool
3927linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3928{
d86d4aaf 3929 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3930
863d01bd
PA
3931 if (lwp->suspended != 0)
3932 {
3933 internal_error (__FILE__, __LINE__,
3934 "LWP %ld is suspended, suspended=%d\n",
3935 lwpid_of (thread), lwp->suspended);
3936 }
fa593d66
PA
3937 gdb_assert (lwp->stopped);
3938
3939 /* Allow debugging the jump pad, gdb_collect, etc.. */
3940 return (supports_fast_tracepoints ()
58b4daa5 3941 && agent_loaded_p ()
fa593d66 3942 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3943 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3944 || thread->last_resume_kind == resume_step)
229d26fc
SM
3945 && (linux_fast_tracepoint_collecting (lwp, NULL)
3946 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3947}
3948
d16f3f6c
TBA
3949void
3950linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3951{
f0ce0d3a 3952 struct thread_info *saved_thread;
d86d4aaf 3953 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3954 int *wstat;
3955
863d01bd
PA
3956 if (lwp->suspended != 0)
3957 {
3958 internal_error (__FILE__, __LINE__,
3959 "LWP %ld is suspended, suspended=%d\n",
3960 lwpid_of (thread), lwp->suspended);
3961 }
fa593d66
PA
3962 gdb_assert (lwp->stopped);
3963
f0ce0d3a
PA
3964 /* For gdb_breakpoint_here. */
3965 saved_thread = current_thread;
3966 current_thread = thread;
3967
fa593d66
PA
3968 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3969
3970 /* Allow debugging the jump pad, gdb_collect, etc. */
3971 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3972 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3973 && thread->last_resume_kind != resume_step
3974 && maybe_move_out_of_jump_pad (lwp, wstat))
3975 {
3976 if (debug_threads)
87ce2a04 3977 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3978 lwpid_of (thread));
fa593d66
PA
3979
3980 if (wstat)
3981 {
3982 lwp->status_pending_p = 0;
3983 enqueue_one_deferred_signal (lwp, wstat);
3984
3985 if (debug_threads)
87ce2a04
DE
3986 debug_printf ("Signal %d for LWP %ld deferred "
3987 "(in jump pad)\n",
d86d4aaf 3988 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3989 }
3990
df95181f 3991 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
3992 }
3993 else
863d01bd 3994 lwp_suspended_inc (lwp);
f0ce0d3a
PA
3995
3996 current_thread = saved_thread;
fa593d66
PA
3997}
3998
5a6b0a41
SM
3999static bool
4000lwp_running (thread_info *thread)
fa593d66 4001{
d86d4aaf 4002 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4003
00db26fa 4004 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4005 return false;
4006
4007 return !lwp->stopped;
fa593d66
PA
4008}
4009
d16f3f6c
TBA
4010void
4011linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 4012{
bde24c0a
PA
4013 /* Should not be called recursively. */
4014 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4015
87ce2a04
DE
4016 if (debug_threads)
4017 {
4018 debug_enter ();
4019 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4020 suspend ? "stop-and-suspend" : "stop",
4021 except != NULL
d86d4aaf 4022 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4023 : "none");
4024 }
4025
bde24c0a
PA
4026 stopping_threads = (suspend
4027 ? STOPPING_AND_SUSPENDING_THREADS
4028 : STOPPING_THREADS);
7984d532
PA
4029
4030 if (suspend)
df3e4dbe
SM
4031 for_each_thread ([&] (thread_info *thread)
4032 {
4033 suspend_and_send_sigstop (thread, except);
4034 });
7984d532 4035 else
df3e4dbe
SM
4036 for_each_thread ([&] (thread_info *thread)
4037 {
4038 send_sigstop (thread, except);
4039 });
4040
fa96cb38 4041 wait_for_sigstop ();
bde24c0a 4042 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4043
4044 if (debug_threads)
4045 {
4046 debug_printf ("stop_all_lwps done, setting stopping_threads "
4047 "back to !stopping\n");
4048 debug_exit ();
4049 }
0d62e5e8
DJ
4050}
4051
863d01bd
PA
4052/* Enqueue one signal in the chain of signals which need to be
4053 delivered to this process on next resume. */
4054
4055static void
4056enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4057{
8d749320 4058 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4059
863d01bd
PA
4060 p_sig->prev = lwp->pending_signals;
4061 p_sig->signal = signal;
4062 if (info == NULL)
4063 memset (&p_sig->info, 0, sizeof (siginfo_t));
4064 else
4065 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4066 lwp->pending_signals = p_sig;
4067}
4068
df95181f
TBA
4069void
4070linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4071{
984a2c04
YQ
4072 struct thread_info *thread = get_lwp_thread (lwp);
4073 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4074
4075 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4076
984a2c04 4077 current_thread = thread;
7582c77c 4078 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4079
a0ff9e1a 4080 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4081 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4082}
4083
df95181f
TBA
4084int
4085linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4086{
4087 int step = 0;
4088
b31cdfa6 4089 if (supports_hardware_single_step ())
7fe5e27e
AT
4090 {
4091 step = 1;
4092 }
7582c77c 4093 else if (supports_software_single_step ())
7fe5e27e
AT
4094 {
4095 install_software_single_step_breakpoints (lwp);
4096 step = 0;
4097 }
4098 else
4099 {
4100 if (debug_threads)
4101 debug_printf ("stepping is not implemented on this target");
4102 }
4103
4104 return step;
4105}
4106
35ac8b3e 4107/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4108 finish a fast tracepoint collect. Since signal can be delivered in
4109 the step-over, the program may go to signal handler and trap again
4110 after return from the signal handler. We can live with the spurious
4111 double traps. */
35ac8b3e
YQ
4112
4113static int
4114lwp_signal_can_be_delivered (struct lwp_info *lwp)
4115{
229d26fc
SM
4116 return (lwp->collecting_fast_tracepoint
4117 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4118}
4119
df95181f
TBA
4120void
4121linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4122 int signal, siginfo_t *info)
da6d8c04 4123{
d86d4aaf 4124 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4125 struct thread_info *saved_thread;
82075af2 4126 int ptrace_request;
c06cbd92
YQ
4127 struct process_info *proc = get_thread_process (thread);
4128
4129 /* Note that target description may not be initialised
4130 (proc->tdesc == NULL) at this point because the program hasn't
4131 stopped at the first instruction yet. It means GDBserver skips
4132 the extra traps from the wrapper program (see option --wrapper).
4133 Code in this function that requires register access should be
4134 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4135
54a0b537 4136 if (lwp->stopped == 0)
0d62e5e8
DJ
4137 return;
4138
65706a29
PA
4139 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4140
229d26fc
SM
4141 fast_tpoint_collect_result fast_tp_collecting
4142 = lwp->collecting_fast_tracepoint;
fa593d66 4143
229d26fc
SM
4144 gdb_assert (!stabilizing_threads
4145 || (fast_tp_collecting
4146 != fast_tpoint_collect_result::not_collecting));
fa593d66 4147
219f2f23
PA
4148 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4149 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4150 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4151 {
4152 /* Collecting 'while-stepping' actions doesn't make sense
4153 anymore. */
d86d4aaf 4154 release_while_stepping_state_list (thread);
219f2f23
PA
4155 }
4156
0d62e5e8 4157 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4158 signal. Also enqueue the signal if it can't be delivered to the
4159 inferior right now. */
0d62e5e8 4160 if (signal != 0
fa593d66
PA
4161 && (lwp->status_pending_p
4162 || lwp->pending_signals != NULL
35ac8b3e 4163 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4164 {
4165 enqueue_pending_signal (lwp, signal, info);
4166
4167 /* Postpone any pending signal. It was enqueued above. */
4168 signal = 0;
4169 }
0d62e5e8 4170
d50171e4
PA
4171 if (lwp->status_pending_p)
4172 {
4173 if (debug_threads)
94610ec4 4174 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4175 " has pending status\n",
94610ec4 4176 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4177 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4178 return;
4179 }
0d62e5e8 4180
0bfdf32f
GB
4181 saved_thread = current_thread;
4182 current_thread = thread;
0d62e5e8 4183
0d62e5e8
DJ
4184 /* This bit needs some thinking about. If we get a signal that
4185 we must report while a single-step reinsert is still pending,
4186 we often end up resuming the thread. It might be better to
4187 (ew) allow a stack of pending events; then we could be sure that
4188 the reinsert happened right away and not lose any signals.
4189
4190 Making this stack would also shrink the window in which breakpoints are
54a0b537 4191 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4192 complete correctness, so it won't solve that problem. It may be
4193 worthwhile just to solve this one, however. */
54a0b537 4194 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4195 {
4196 if (debug_threads)
87ce2a04
DE
4197 debug_printf (" pending reinsert at 0x%s\n",
4198 paddress (lwp->bp_reinsert));
d50171e4 4199
b31cdfa6 4200 if (supports_hardware_single_step ())
d50171e4 4201 {
229d26fc 4202 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4203 {
4204 if (step == 0)
9986ba08 4205 warning ("BAD - reinserting but not stepping.");
fa593d66 4206 if (lwp->suspended)
9986ba08
PA
4207 warning ("BAD - reinserting and suspended(%d).",
4208 lwp->suspended);
fa593d66 4209 }
d50171e4 4210 }
f79b145d
YQ
4211
4212 step = maybe_hw_step (thread);
0d62e5e8
DJ
4213 }
4214
229d26fc 4215 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4216 {
4217 if (debug_threads)
87ce2a04
DE
4218 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4219 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4220 lwpid_of (thread));
fa593d66 4221 }
229d26fc 4222 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4223 {
4224 if (debug_threads)
87ce2a04
DE
4225 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4226 " single-stepping\n",
d86d4aaf 4227 lwpid_of (thread));
fa593d66 4228
b31cdfa6 4229 if (supports_hardware_single_step ())
fa593d66
PA
4230 step = 1;
4231 else
38e08fca
GB
4232 {
4233 internal_error (__FILE__, __LINE__,
4234 "moving out of jump pad single-stepping"
4235 " not implemented on this target");
4236 }
fa593d66
PA
4237 }
4238
219f2f23
PA
4239 /* If we have while-stepping actions in this thread set it stepping.
4240 If we have a signal to deliver, it may or may not be set to
4241 SIG_IGN, we don't know. Assume so, and allow collecting
4242 while-stepping into a signal handler. A possible smart thing to
4243 do would be to set an internal breakpoint at the signal return
4244 address, continue, and carry on catching this while-stepping
4245 action only when that breakpoint is hit. A future
4246 enhancement. */
7fe5e27e 4247 if (thread->while_stepping != NULL)
219f2f23
PA
4248 {
4249 if (debug_threads)
87ce2a04 4250 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4251 lwpid_of (thread));
7fe5e27e
AT
4252
4253 step = single_step (lwp);
219f2f23
PA
4254 }
4255
bf9ae9d8 4256 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4257 {
0bfdf32f 4258 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4259
bf9ae9d8 4260 lwp->stop_pc = low_get_pc (regcache);
582511be
PA
4261
4262 if (debug_threads)
4263 {
4264 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4265 (long) lwp->stop_pc);
4266 }
0d62e5e8
DJ
4267 }
4268
35ac8b3e
YQ
4269 /* If we have pending signals, consume one if it can be delivered to
4270 the inferior. */
4271 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4272 {
4273 struct pending_signals **p_sig;
4274
54a0b537 4275 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4276 while ((*p_sig)->prev != NULL)
4277 p_sig = &(*p_sig)->prev;
4278
4279 signal = (*p_sig)->signal;
32ca6d61 4280 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4281 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4282 &(*p_sig)->info);
32ca6d61 4283
0d62e5e8
DJ
4284 free (*p_sig);
4285 *p_sig = NULL;
4286 }
4287
94610ec4
YQ
4288 if (debug_threads)
4289 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4290 lwpid_of (thread), step ? "step" : "continue", signal,
4291 lwp->stop_expected ? "expected" : "not expected");
4292
d7599cc0 4293 low_prepare_to_resume (lwp);
aa5ca48f 4294
d86d4aaf 4295 regcache_invalidate_thread (thread);
da6d8c04 4296 errno = 0;
54a0b537 4297 lwp->stepping = step;
82075af2
JS
4298 if (step)
4299 ptrace_request = PTRACE_SINGLESTEP;
4300 else if (gdb_catching_syscalls_p (lwp))
4301 ptrace_request = PTRACE_SYSCALL;
4302 else
4303 ptrace_request = PTRACE_CONT;
4304 ptrace (ptrace_request,
4305 lwpid_of (thread),
b8e1b30e 4306 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4307 /* Coerce to a uintptr_t first to avoid potential gcc warning
4308 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4309 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4310
0bfdf32f 4311 current_thread = saved_thread;
da6d8c04 4312 if (errno)
23f238d3
PA
4313 perror_with_name ("resuming thread");
4314
4315 /* Successfully resumed. Clear state that no longer makes sense,
4316 and mark the LWP as running. Must not do this before resuming
4317 otherwise if that fails other code will be confused. E.g., we'd
4318 later try to stop the LWP and hang forever waiting for a stop
4319 status. Note that we must not throw after this is cleared,
4320 otherwise handle_zombie_lwp_error would get confused. */
4321 lwp->stopped = 0;
4322 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4323}
4324
d7599cc0
TBA
4325void
4326linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4327{
4328 /* Nop. */
4329}
4330
23f238d3
PA
4331/* Called when we try to resume a stopped LWP and that errors out. If
4332 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4333 or about to become), discard the error, clear any pending status
4334 the LWP may have, and return true (we'll collect the exit status
4335 soon enough). Otherwise, return false. */
4336
4337static int
4338check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4339{
4340 struct thread_info *thread = get_lwp_thread (lp);
4341
4342 /* If we get an error after resuming the LWP successfully, we'd
4343 confuse !T state for the LWP being gone. */
4344 gdb_assert (lp->stopped);
4345
4346 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4347 because even if ptrace failed with ESRCH, the tracee may be "not
4348 yet fully dead", but already refusing ptrace requests. In that
4349 case the tracee has 'R (Running)' state for a little bit
4350 (observed in Linux 3.18). See also the note on ESRCH in the
4351 ptrace(2) man page. Instead, check whether the LWP has any state
4352 other than ptrace-stopped. */
4353
4354 /* Don't assume anything if /proc/PID/status can't be read. */
4355 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4356 {
23f238d3
PA
4357 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4358 lp->status_pending_p = 0;
4359 return 1;
4360 }
4361 return 0;
4362}
4363
df95181f
TBA
4364void
4365linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4366 siginfo_t *info)
23f238d3 4367{
a70b8144 4368 try
23f238d3 4369 {
df95181f 4370 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4371 }
230d2906 4372 catch (const gdb_exception_error &ex)
23f238d3
PA
4373 {
4374 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4375 throw;
3221518c 4376 }
da6d8c04
DJ
4377}
4378
5fdda392
SM
4379/* This function is called once per thread via for_each_thread.
4380 We look up which resume request applies to THREAD and mark it with a
4381 pointer to the appropriate resume request.
5544ad89
DJ
4382
4383 This algorithm is O(threads * resume elements), but resume elements
4384 is small (and will remain small at least until GDB supports thread
4385 suspension). */
ebcf782c 4386
5fdda392
SM
4387static void
4388linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4389{
d86d4aaf 4390 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4391
5fdda392 4392 for (int ndx = 0; ndx < n; ndx++)
95954743 4393 {
5fdda392 4394 ptid_t ptid = resume[ndx].thread;
d7e15655 4395 if (ptid == minus_one_ptid
9c80ecd6 4396 || ptid == thread->id
0c9070b3
YQ
4397 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4398 of PID'. */
e99b03dc 4399 || (ptid.pid () == pid_of (thread)
0e998d96 4400 && (ptid.is_pid ()
e38504b3 4401 || ptid.lwp () == -1)))
95954743 4402 {
5fdda392 4403 if (resume[ndx].kind == resume_stop
8336d594 4404 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4405 {
4406 if (debug_threads)
87ce2a04
DE
4407 debug_printf ("already %s LWP %ld at GDB's request\n",
4408 (thread->last_status.kind
4409 == TARGET_WAITKIND_STOPPED)
4410 ? "stopped"
4411 : "stopping",
d86d4aaf 4412 lwpid_of (thread));
d50171e4
PA
4413
4414 continue;
4415 }
4416
5a04c4cf
PA
4417 /* Ignore (wildcard) resume requests for already-resumed
4418 threads. */
5fdda392 4419 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4420 && thread->last_resume_kind != resume_stop)
4421 {
4422 if (debug_threads)
4423 debug_printf ("already %s LWP %ld at GDB's request\n",
4424 (thread->last_resume_kind
4425 == resume_step)
4426 ? "stepping"
4427 : "continuing",
4428 lwpid_of (thread));
4429 continue;
4430 }
4431
4432 /* Don't let wildcard resumes resume fork children that GDB
4433 does not yet know are new fork children. */
4434 if (lwp->fork_relative != NULL)
4435 {
5a04c4cf
PA
4436 struct lwp_info *rel = lwp->fork_relative;
4437
4438 if (rel->status_pending_p
4439 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4440 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4441 {
4442 if (debug_threads)
4443 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4444 lwpid_of (thread));
4445 continue;
4446 }
4447 }
4448
4449 /* If the thread has a pending event that has already been
4450 reported to GDBserver core, but GDB has not pulled the
4451 event out of the vStopped queue yet, likewise, ignore the
4452 (wildcard) resume request. */
9c80ecd6 4453 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4454 {
4455 if (debug_threads)
4456 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4457 lwpid_of (thread));
4458 continue;
4459 }
4460
5fdda392 4461 lwp->resume = &resume[ndx];
8336d594 4462 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4463
c2d6af84
PA
4464 lwp->step_range_start = lwp->resume->step_range_start;
4465 lwp->step_range_end = lwp->resume->step_range_end;
4466
fa593d66
PA
4467 /* If we had a deferred signal to report, dequeue one now.
4468 This can happen if LWP gets more than one signal while
4469 trying to get out of a jump pad. */
4470 if (lwp->stopped
4471 && !lwp->status_pending_p
4472 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4473 {
4474 lwp->status_pending_p = 1;
4475
4476 if (debug_threads)
87ce2a04
DE
4477 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4478 "leaving status pending.\n",
d86d4aaf
DE
4479 WSTOPSIG (lwp->status_pending),
4480 lwpid_of (thread));
fa593d66
PA
4481 }
4482
5fdda392 4483 return;
95954743
PA
4484 }
4485 }
2bd7c093
PA
4486
4487 /* No resume action for this thread. */
4488 lwp->resume = NULL;
5544ad89
DJ
4489}
4490
df95181f
TBA
4491bool
4492linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4493{
d86d4aaf 4494 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4495
bd99dc85
PA
4496 /* LWPs which will not be resumed are not interesting, because
4497 we might not wait for them next time through linux_wait. */
2bd7c093 4498 if (lwp->resume == NULL)
25c28b4d 4499 return false;
64386c31 4500
df95181f 4501 return thread_still_has_status_pending (thread);
d50171e4
PA
4502}
4503
df95181f
TBA
4504bool
4505linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4506{
d86d4aaf 4507 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4508 struct thread_info *saved_thread;
d50171e4 4509 CORE_ADDR pc;
c06cbd92
YQ
4510 struct process_info *proc = get_thread_process (thread);
4511
4512 /* GDBserver is skipping the extra traps from the wrapper program,
4513 don't have to do step over. */
4514 if (proc->tdesc == NULL)
eca55aec 4515 return false;
d50171e4
PA
4516
4517 /* LWPs which will not be resumed are not interesting, because we
4518 might not wait for them next time through linux_wait. */
4519
4520 if (!lwp->stopped)
4521 {
4522 if (debug_threads)
87ce2a04 4523 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4524 lwpid_of (thread));
eca55aec 4525 return false;
d50171e4
PA
4526 }
4527
8336d594 4528 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4529 {
4530 if (debug_threads)
87ce2a04
DE
4531 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4532 " stopped\n",
d86d4aaf 4533 lwpid_of (thread));
eca55aec 4534 return false;
d50171e4
PA
4535 }
4536
7984d532
PA
4537 gdb_assert (lwp->suspended >= 0);
4538
4539 if (lwp->suspended)
4540 {
4541 if (debug_threads)
87ce2a04 4542 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4543 lwpid_of (thread));
eca55aec 4544 return false;
7984d532
PA
4545 }
4546
bd99dc85 4547 if (lwp->status_pending_p)
d50171e4
PA
4548 {
4549 if (debug_threads)
87ce2a04
DE
4550 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4551 " status.\n",
d86d4aaf 4552 lwpid_of (thread));
eca55aec 4553 return false;
d50171e4
PA
4554 }
4555
4556 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4557 or we have. */
4558 pc = get_pc (lwp);
4559
4560 /* If the PC has changed since we stopped, then don't do anything,
4561 and let the breakpoint/tracepoint be hit. This happens if, for
4562 instance, GDB handled the decr_pc_after_break subtraction itself,
4563 GDB is OOL stepping this thread, or the user has issued a "jump"
4564 command, or poked thread's registers herself. */
4565 if (pc != lwp->stop_pc)
4566 {
4567 if (debug_threads)
87ce2a04
DE
4568 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4569 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4570 lwpid_of (thread),
4571 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4572 return false;
d50171e4
PA
4573 }
4574
484b3c32
YQ
4575 /* On software single step target, resume the inferior with signal
4576 rather than stepping over. */
7582c77c 4577 if (supports_software_single_step ()
484b3c32
YQ
4578 && lwp->pending_signals != NULL
4579 && lwp_signal_can_be_delivered (lwp))
4580 {
4581 if (debug_threads)
4582 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4583 " signals.\n",
4584 lwpid_of (thread));
4585
eca55aec 4586 return false;
484b3c32
YQ
4587 }
4588
0bfdf32f
GB
4589 saved_thread = current_thread;
4590 current_thread = thread;
d50171e4 4591
8b07ae33 4592 /* We can only step over breakpoints we know about. */
fa593d66 4593 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4594 {
8b07ae33 4595 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4596 though. If the condition is being evaluated on the target's side
4597 and it evaluate to false, step over this breakpoint as well. */
4598 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4599 && gdb_condition_true_at_breakpoint (pc)
4600 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4601 {
4602 if (debug_threads)
87ce2a04
DE
4603 debug_printf ("Need step over [LWP %ld]? yes, but found"
4604 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4605 lwpid_of (thread), paddress (pc));
d50171e4 4606
0bfdf32f 4607 current_thread = saved_thread;
eca55aec 4608 return false;
8b07ae33
PA
4609 }
4610 else
4611 {
4612 if (debug_threads)
87ce2a04
DE
4613 debug_printf ("Need step over [LWP %ld]? yes, "
4614 "found breakpoint at 0x%s\n",
d86d4aaf 4615 lwpid_of (thread), paddress (pc));
d50171e4 4616
8b07ae33 4617 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4618 that find_thread stops looking. */
0bfdf32f 4619 current_thread = saved_thread;
8b07ae33 4620
eca55aec 4621 return true;
8b07ae33 4622 }
d50171e4
PA
4623 }
4624
0bfdf32f 4625 current_thread = saved_thread;
d50171e4
PA
4626
4627 if (debug_threads)
87ce2a04
DE
4628 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4629 " at 0x%s\n",
d86d4aaf 4630 lwpid_of (thread), paddress (pc));
c6ecbae5 4631
eca55aec 4632 return false;
5544ad89
DJ
4633}
4634
d16f3f6c
TBA
4635void
4636linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4637{
d86d4aaf 4638 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4639 struct thread_info *saved_thread;
d50171e4
PA
4640 CORE_ADDR pc;
4641 int step;
4642
4643 if (debug_threads)
87ce2a04 4644 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4645 lwpid_of (thread));
d50171e4 4646
7984d532 4647 stop_all_lwps (1, lwp);
863d01bd
PA
4648
4649 if (lwp->suspended != 0)
4650 {
4651 internal_error (__FILE__, __LINE__,
4652 "LWP %ld suspended=%d\n", lwpid_of (thread),
4653 lwp->suspended);
4654 }
d50171e4
PA
4655
4656 if (debug_threads)
87ce2a04 4657 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4658
4659 /* Note, we should always reach here with an already adjusted PC,
4660 either by GDB (if we're resuming due to GDB's request), or by our
4661 caller, if we just finished handling an internal breakpoint GDB
4662 shouldn't care about. */
4663 pc = get_pc (lwp);
4664
0bfdf32f
GB
4665 saved_thread = current_thread;
4666 current_thread = thread;
d50171e4
PA
4667
4668 lwp->bp_reinsert = pc;
4669 uninsert_breakpoints_at (pc);
fa593d66 4670 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4671
7fe5e27e 4672 step = single_step (lwp);
d50171e4 4673
0bfdf32f 4674 current_thread = saved_thread;
d50171e4 4675
df95181f 4676 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4677
4678 /* Require next event from this LWP. */
9c80ecd6 4679 step_over_bkpt = thread->id;
d50171e4
PA
4680}
4681
b31cdfa6
TBA
4682bool
4683linux_process_target::finish_step_over (lwp_info *lwp)
d50171e4
PA
4684{
4685 if (lwp->bp_reinsert != 0)
4686 {
f79b145d
YQ
4687 struct thread_info *saved_thread = current_thread;
4688
d50171e4 4689 if (debug_threads)
87ce2a04 4690 debug_printf ("Finished step over.\n");
d50171e4 4691
f79b145d
YQ
4692 current_thread = get_lwp_thread (lwp);
4693
d50171e4
PA
4694 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4695 may be no breakpoint to reinsert there by now. */
4696 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4697 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4698
4699 lwp->bp_reinsert = 0;
4700
3b9a79ef
YQ
4701 /* Delete any single-step breakpoints. No longer needed. We
4702 don't have to worry about other threads hitting this trap,
4703 and later not being able to explain it, because we were
4704 stepping over a breakpoint, and we hold all threads but
4705 LWP stopped while doing that. */
b31cdfa6 4706 if (!supports_hardware_single_step ())
f79b145d 4707 {
3b9a79ef
YQ
4708 gdb_assert (has_single_step_breakpoints (current_thread));
4709 delete_single_step_breakpoints (current_thread);
f79b145d 4710 }
d50171e4
PA
4711
4712 step_over_bkpt = null_ptid;
f79b145d 4713 current_thread = saved_thread;
b31cdfa6 4714 return true;
d50171e4
PA
4715 }
4716 else
b31cdfa6 4717 return false;
d50171e4
PA
4718}
4719
d16f3f6c
TBA
4720void
4721linux_process_target::complete_ongoing_step_over ()
863d01bd 4722{
d7e15655 4723 if (step_over_bkpt != null_ptid)
863d01bd
PA
4724 {
4725 struct lwp_info *lwp;
4726 int wstat;
4727 int ret;
4728
4729 if (debug_threads)
4730 debug_printf ("detach: step over in progress, finish it first\n");
4731
4732 /* Passing NULL_PTID as filter indicates we want all events to
4733 be left pending. Eventually this returns when there are no
4734 unwaited-for children left. */
d16f3f6c
TBA
4735 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4736 __WALL);
863d01bd
PA
4737 gdb_assert (ret == -1);
4738
4739 lwp = find_lwp_pid (step_over_bkpt);
4740 if (lwp != NULL)
4741 finish_step_over (lwp);
4742 step_over_bkpt = null_ptid;
4743 unsuspend_all_lwps (lwp);
4744 }
4745}
4746
df95181f
TBA
4747void
4748linux_process_target::resume_one_thread (thread_info *thread,
4749 bool leave_all_stopped)
5544ad89 4750{
d86d4aaf 4751 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4752 int leave_pending;
5544ad89 4753
2bd7c093 4754 if (lwp->resume == NULL)
c80825ff 4755 return;
5544ad89 4756
bd99dc85 4757 if (lwp->resume->kind == resume_stop)
5544ad89 4758 {
bd99dc85 4759 if (debug_threads)
d86d4aaf 4760 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4761
4762 if (!lwp->stopped)
4763 {
4764 if (debug_threads)
d86d4aaf 4765 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4766
d50171e4
PA
4767 /* Stop the thread, and wait for the event asynchronously,
4768 through the event loop. */
02fc4de7 4769 send_sigstop (lwp);
bd99dc85
PA
4770 }
4771 else
4772 {
4773 if (debug_threads)
87ce2a04 4774 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4775 lwpid_of (thread));
d50171e4
PA
4776
4777 /* The LWP may have been stopped in an internal event that
4778 was not meant to be notified back to GDB (e.g., gdbserver
4779 breakpoint), so we should be reporting a stop event in
4780 this case too. */
4781
4782 /* If the thread already has a pending SIGSTOP, this is a
4783 no-op. Otherwise, something later will presumably resume
4784 the thread and this will cause it to cancel any pending
4785 operation, due to last_resume_kind == resume_stop. If
4786 the thread already has a pending status to report, we
4787 will still report it the next time we wait - see
4788 status_pending_p_callback. */
1a981360
PA
4789
4790 /* If we already have a pending signal to report, then
4791 there's no need to queue a SIGSTOP, as this means we're
4792 midway through moving the LWP out of the jumppad, and we
4793 will report the pending signal as soon as that is
4794 finished. */
4795 if (lwp->pending_signals_to_report == NULL)
4796 send_sigstop (lwp);
bd99dc85 4797 }
32ca6d61 4798
bd99dc85
PA
4799 /* For stop requests, we're done. */
4800 lwp->resume = NULL;
fc7238bb 4801 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4802 return;
5544ad89
DJ
4803 }
4804
bd99dc85 4805 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4806 then don't resume it - we can just report the pending status.
4807 Likewise if it is suspended, because e.g., another thread is
4808 stepping past a breakpoint. Make sure to queue any signals that
4809 would otherwise be sent. In all-stop mode, we do this decision
4810 based on if *any* thread has a pending status. If there's a
4811 thread that needs the step-over-breakpoint dance, then don't
4812 resume any other thread but that particular one. */
4813 leave_pending = (lwp->suspended
4814 || lwp->status_pending_p
4815 || leave_all_stopped);
5544ad89 4816
0e9a339e
YQ
4817 /* If we have a new signal, enqueue the signal. */
4818 if (lwp->resume->sig != 0)
4819 {
4820 siginfo_t info, *info_p;
4821
4822 /* If this is the same signal we were previously stopped by,
4823 make sure to queue its siginfo. */
4824 if (WIFSTOPPED (lwp->last_status)
4825 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4826 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4827 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4828 info_p = &info;
4829 else
4830 info_p = NULL;
4831
4832 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4833 }
4834
d50171e4 4835 if (!leave_pending)
bd99dc85
PA
4836 {
4837 if (debug_threads)
d86d4aaf 4838 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4839
9c80ecd6 4840 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4841 }
4842 else
4843 {
4844 if (debug_threads)
d86d4aaf 4845 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4846 }
5544ad89 4847
fc7238bb 4848 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4849 lwp->resume = NULL;
0d62e5e8
DJ
4850}
4851
0e4d7e35
TBA
4852void
4853linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4854{
d86d4aaf 4855 struct thread_info *need_step_over = NULL;
c6ecbae5 4856
87ce2a04
DE
4857 if (debug_threads)
4858 {
4859 debug_enter ();
4860 debug_printf ("linux_resume:\n");
4861 }
4862
5fdda392
SM
4863 for_each_thread ([&] (thread_info *thread)
4864 {
4865 linux_set_resume_request (thread, resume_info, n);
4866 });
5544ad89 4867
d50171e4
PA
4868 /* If there is a thread which would otherwise be resumed, which has
4869 a pending status, then don't resume any threads - we can just
4870 report the pending status. Make sure to queue any signals that
4871 would otherwise be sent. In non-stop mode, we'll apply this
4872 logic to each thread individually. We consume all pending events
4873 before considering to start a step-over (in all-stop). */
25c28b4d 4874 bool any_pending = false;
bd99dc85 4875 if (!non_stop)
df95181f
TBA
4876 any_pending = find_thread ([this] (thread_info *thread)
4877 {
4878 return resume_status_pending (thread);
4879 }) != nullptr;
d50171e4
PA
4880
4881 /* If there is a thread which would otherwise be resumed, which is
4882 stopped at a breakpoint that needs stepping over, then don't
4883 resume any threads - have it step over the breakpoint with all
4884 other threads stopped, then resume all threads again. Make sure
4885 to queue any signals that would otherwise be delivered or
4886 queued. */
bf9ae9d8 4887 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4888 need_step_over = find_thread ([this] (thread_info *thread)
4889 {
4890 return thread_needs_step_over (thread);
4891 });
d50171e4 4892
c80825ff 4893 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4894
4895 if (debug_threads)
4896 {
4897 if (need_step_over != NULL)
87ce2a04 4898 debug_printf ("Not resuming all, need step over\n");
d50171e4 4899 else if (any_pending)
87ce2a04
DE
4900 debug_printf ("Not resuming, all-stop and found "
4901 "an LWP with pending status\n");
d50171e4 4902 else
87ce2a04 4903 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4904 }
4905
4906 /* Even if we're leaving threads stopped, queue all signals we'd
4907 otherwise deliver. */
c80825ff
SM
4908 for_each_thread ([&] (thread_info *thread)
4909 {
df95181f 4910 resume_one_thread (thread, leave_all_stopped);
c80825ff 4911 });
d50171e4
PA
4912
4913 if (need_step_over)
d86d4aaf 4914 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4915
4916 if (debug_threads)
4917 {
4918 debug_printf ("linux_resume done\n");
4919 debug_exit ();
4920 }
1bebeeca
PA
4921
4922 /* We may have events that were pending that can/should be sent to
4923 the client now. Trigger a linux_wait call. */
4924 if (target_is_async_p ())
4925 async_file_mark ();
d50171e4
PA
4926}
4927
df95181f
TBA
4928void
4929linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4930{
d86d4aaf 4931 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4932 int step;
4933
7984d532 4934 if (lwp == except)
e2b44075 4935 return;
d50171e4
PA
4936
4937 if (debug_threads)
d86d4aaf 4938 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4939
4940 if (!lwp->stopped)
4941 {
4942 if (debug_threads)
d86d4aaf 4943 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 4944 return;
d50171e4
PA
4945 }
4946
02fc4de7
PA
4947 if (thread->last_resume_kind == resume_stop
4948 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4949 {
4950 if (debug_threads)
87ce2a04 4951 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4952 lwpid_of (thread));
e2b44075 4953 return;
d50171e4
PA
4954 }
4955
4956 if (lwp->status_pending_p)
4957 {
4958 if (debug_threads)
87ce2a04 4959 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4960 lwpid_of (thread));
e2b44075 4961 return;
d50171e4
PA
4962 }
4963
7984d532
PA
4964 gdb_assert (lwp->suspended >= 0);
4965
d50171e4
PA
4966 if (lwp->suspended)
4967 {
4968 if (debug_threads)
d86d4aaf 4969 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 4970 return;
d50171e4
PA
4971 }
4972
1a981360
PA
4973 if (thread->last_resume_kind == resume_stop
4974 && lwp->pending_signals_to_report == NULL
229d26fc
SM
4975 && (lwp->collecting_fast_tracepoint
4976 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
4977 {
4978 /* We haven't reported this LWP as stopped yet (otherwise, the
4979 last_status.kind check above would catch it, and we wouldn't
4980 reach here. This LWP may have been momentarily paused by a
4981 stop_all_lwps call while handling for example, another LWP's
4982 step-over. In that case, the pending expected SIGSTOP signal
4983 that was queued at vCont;t handling time will have already
4984 been consumed by wait_for_sigstop, and so we need to requeue
4985 another one here. Note that if the LWP already has a SIGSTOP
4986 pending, this is a no-op. */
4987
4988 if (debug_threads)
87ce2a04
DE
4989 debug_printf ("Client wants LWP %ld to stop. "
4990 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4991 lwpid_of (thread));
02fc4de7
PA
4992
4993 send_sigstop (lwp);
4994 }
4995
863d01bd
PA
4996 if (thread->last_resume_kind == resume_step)
4997 {
4998 if (debug_threads)
4999 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5000 lwpid_of (thread));
8901d193 5001
3b9a79ef 5002 /* If resume_step is requested by GDB, install single-step
8901d193 5003 breakpoints when the thread is about to be actually resumed if
3b9a79ef 5004 the single-step breakpoints weren't removed. */
7582c77c 5005 if (supports_software_single_step ()
3b9a79ef 5006 && !has_single_step_breakpoints (thread))
8901d193
YQ
5007 install_software_single_step_breakpoints (lwp);
5008
5009 step = maybe_hw_step (thread);
863d01bd
PA
5010 }
5011 else if (lwp->bp_reinsert != 0)
5012 {
5013 if (debug_threads)
5014 debug_printf (" stepping LWP %ld, reinsert set\n",
5015 lwpid_of (thread));
f79b145d
YQ
5016
5017 step = maybe_hw_step (thread);
863d01bd
PA
5018 }
5019 else
5020 step = 0;
5021
df95181f 5022 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5023}
5024
df95181f
TBA
5025void
5026linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5027 lwp_info *except)
7984d532 5028{
d86d4aaf 5029 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5030
5031 if (lwp == except)
e2b44075 5032 return;
7984d532 5033
863d01bd 5034 lwp_suspended_decr (lwp);
7984d532 5035
e2b44075 5036 proceed_one_lwp (thread, except);
d50171e4
PA
5037}
5038
d16f3f6c
TBA
5039void
5040linux_process_target::proceed_all_lwps ()
d50171e4 5041{
d86d4aaf 5042 struct thread_info *need_step_over;
d50171e4
PA
5043
5044 /* If there is a thread which would otherwise be resumed, which is
5045 stopped at a breakpoint that needs stepping over, then don't
5046 resume any threads - have it step over the breakpoint with all
5047 other threads stopped, then resume all threads again. */
5048
bf9ae9d8 5049 if (low_supports_breakpoints ())
d50171e4 5050 {
df95181f
TBA
5051 need_step_over = find_thread ([this] (thread_info *thread)
5052 {
5053 return thread_needs_step_over (thread);
5054 });
d50171e4
PA
5055
5056 if (need_step_over != NULL)
5057 {
5058 if (debug_threads)
87ce2a04
DE
5059 debug_printf ("proceed_all_lwps: found "
5060 "thread %ld needing a step-over\n",
5061 lwpid_of (need_step_over));
d50171e4 5062
d86d4aaf 5063 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5064 return;
5065 }
5066 }
5544ad89 5067
d50171e4 5068 if (debug_threads)
87ce2a04 5069 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5070
df95181f 5071 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
5072 {
5073 proceed_one_lwp (thread, NULL);
5074 });
d50171e4
PA
5075}
5076
d16f3f6c
TBA
5077void
5078linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5079{
5544ad89
DJ
5080 if (debug_threads)
5081 {
87ce2a04 5082 debug_enter ();
d50171e4 5083 if (except)
87ce2a04 5084 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5085 lwpid_of (get_lwp_thread (except)));
5544ad89 5086 else
87ce2a04 5087 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5088 }
5089
7984d532 5090 if (unsuspend)
e2b44075
SM
5091 for_each_thread ([&] (thread_info *thread)
5092 {
5093 unsuspend_and_proceed_one_lwp (thread, except);
5094 });
7984d532 5095 else
e2b44075
SM
5096 for_each_thread ([&] (thread_info *thread)
5097 {
5098 proceed_one_lwp (thread, except);
5099 });
87ce2a04
DE
5100
5101 if (debug_threads)
5102 {
5103 debug_printf ("unstop_all_lwps done\n");
5104 debug_exit ();
5105 }
0d62e5e8
DJ
5106}
5107
58caa3dc
DJ
5108
5109#ifdef HAVE_LINUX_REGSETS
5110
1faeff08
MR
5111#define use_linux_regsets 1
5112
030031ee
PA
5113/* Returns true if REGSET has been disabled. */
5114
5115static int
5116regset_disabled (struct regsets_info *info, struct regset_info *regset)
5117{
5118 return (info->disabled_regsets != NULL
5119 && info->disabled_regsets[regset - info->regsets]);
5120}
5121
5122/* Disable REGSET. */
5123
5124static void
5125disable_regset (struct regsets_info *info, struct regset_info *regset)
5126{
5127 int dr_offset;
5128
5129 dr_offset = regset - info->regsets;
5130 if (info->disabled_regsets == NULL)
224c3ddb 5131 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5132 info->disabled_regsets[dr_offset] = 1;
5133}
5134
58caa3dc 5135static int
3aee8918
PA
5136regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5137 struct regcache *regcache)
58caa3dc
DJ
5138{
5139 struct regset_info *regset;
e9d25b98 5140 int saw_general_regs = 0;
95954743 5141 int pid;
1570b33e 5142 struct iovec iov;
58caa3dc 5143
0bfdf32f 5144 pid = lwpid_of (current_thread);
28eef672 5145 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5146 {
1570b33e
L
5147 void *buf, *data;
5148 int nt_type, res;
58caa3dc 5149
030031ee 5150 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5151 continue;
58caa3dc 5152
bca929d3 5153 buf = xmalloc (regset->size);
1570b33e
L
5154
5155 nt_type = regset->nt_type;
5156 if (nt_type)
5157 {
5158 iov.iov_base = buf;
5159 iov.iov_len = regset->size;
5160 data = (void *) &iov;
5161 }
5162 else
5163 data = buf;
5164
dfb64f85 5165#ifndef __sparc__
f15f9948 5166 res = ptrace (regset->get_request, pid,
b8e1b30e 5167 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5168#else
1570b33e 5169 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5170#endif
58caa3dc
DJ
5171 if (res < 0)
5172 {
1ef53e6b
AH
5173 if (errno == EIO
5174 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5175 {
1ef53e6b
AH
5176 /* If we get EIO on a regset, or an EINVAL and the regset is
5177 optional, do not try it again for this process mode. */
030031ee 5178 disable_regset (regsets_info, regset);
58caa3dc 5179 }
e5a9158d
AA
5180 else if (errno == ENODATA)
5181 {
5182 /* ENODATA may be returned if the regset is currently
5183 not "active". This can happen in normal operation,
5184 so suppress the warning in this case. */
5185 }
fcd4a73d
YQ
5186 else if (errno == ESRCH)
5187 {
5188 /* At this point, ESRCH should mean the process is
5189 already gone, in which case we simply ignore attempts
5190 to read its registers. */
5191 }
58caa3dc
DJ
5192 else
5193 {
0d62e5e8 5194 char s[256];
95954743
PA
5195 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5196 pid);
0d62e5e8 5197 perror (s);
58caa3dc
DJ
5198 }
5199 }
098dbe61
AA
5200 else
5201 {
5202 if (regset->type == GENERAL_REGS)
5203 saw_general_regs = 1;
5204 regset->store_function (regcache, buf);
5205 }
fdeb2a12 5206 free (buf);
58caa3dc 5207 }
e9d25b98
DJ
5208 if (saw_general_regs)
5209 return 0;
5210 else
5211 return 1;
58caa3dc
DJ
5212}
5213
5214static int
3aee8918
PA
5215regsets_store_inferior_registers (struct regsets_info *regsets_info,
5216 struct regcache *regcache)
58caa3dc
DJ
5217{
5218 struct regset_info *regset;
e9d25b98 5219 int saw_general_regs = 0;
95954743 5220 int pid;
1570b33e 5221 struct iovec iov;
58caa3dc 5222
0bfdf32f 5223 pid = lwpid_of (current_thread);
28eef672 5224 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5225 {
1570b33e
L
5226 void *buf, *data;
5227 int nt_type, res;
58caa3dc 5228
feea5f36
AA
5229 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5230 || regset->fill_function == NULL)
28eef672 5231 continue;
58caa3dc 5232
bca929d3 5233 buf = xmalloc (regset->size);
545587ee
DJ
5234
5235 /* First fill the buffer with the current register set contents,
5236 in case there are any items in the kernel's regset that are
5237 not in gdbserver's regcache. */
1570b33e
L
5238
5239 nt_type = regset->nt_type;
5240 if (nt_type)
5241 {
5242 iov.iov_base = buf;
5243 iov.iov_len = regset->size;
5244 data = (void *) &iov;
5245 }
5246 else
5247 data = buf;
5248
dfb64f85 5249#ifndef __sparc__
f15f9948 5250 res = ptrace (regset->get_request, pid,
b8e1b30e 5251 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5252#else
689cc2ae 5253 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5254#endif
545587ee
DJ
5255
5256 if (res == 0)
5257 {
5258 /* Then overlay our cached registers on that. */
442ea881 5259 regset->fill_function (regcache, buf);
545587ee
DJ
5260
5261 /* Only now do we write the register set. */
dfb64f85 5262#ifndef __sparc__
f15f9948 5263 res = ptrace (regset->set_request, pid,
b8e1b30e 5264 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5265#else
1570b33e 5266 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5267#endif
545587ee
DJ
5268 }
5269
58caa3dc
DJ
5270 if (res < 0)
5271 {
1ef53e6b
AH
5272 if (errno == EIO
5273 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5274 {
1ef53e6b
AH
5275 /* If we get EIO on a regset, or an EINVAL and the regset is
5276 optional, do not try it again for this process mode. */
030031ee 5277 disable_regset (regsets_info, regset);
58caa3dc 5278 }
3221518c
UW
5279 else if (errno == ESRCH)
5280 {
1b3f6016
PA
5281 /* At this point, ESRCH should mean the process is
5282 already gone, in which case we simply ignore attempts
5283 to change its registers. See also the related
df95181f 5284 comment in resume_one_lwp. */
fdeb2a12 5285 free (buf);
3221518c
UW
5286 return 0;
5287 }
58caa3dc
DJ
5288 else
5289 {
ce3a066d 5290 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5291 }
5292 }
e9d25b98
DJ
5293 else if (regset->type == GENERAL_REGS)
5294 saw_general_regs = 1;
09ec9b38 5295 free (buf);
58caa3dc 5296 }
e9d25b98
DJ
5297 if (saw_general_regs)
5298 return 0;
5299 else
5300 return 1;
58caa3dc
DJ
5301}
5302
1faeff08 5303#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5304
1faeff08 5305#define use_linux_regsets 0
3aee8918
PA
5306#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5307#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5308
58caa3dc 5309#endif
1faeff08
MR
5310
5311/* Return 1 if register REGNO is supported by one of the regset ptrace
5312 calls or 0 if it has to be transferred individually. */
5313
5314static int
3aee8918 5315linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5316{
5317 unsigned char mask = 1 << (regno % 8);
5318 size_t index = regno / 8;
5319
5320 return (use_linux_regsets
3aee8918
PA
5321 && (regs_info->regset_bitmap == NULL
5322 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5323}
5324
58caa3dc 5325#ifdef HAVE_LINUX_USRREGS
1faeff08 5326
5b3da067 5327static int
3aee8918 5328register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5329{
5330 int addr;
5331
3aee8918 5332 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5333 error ("Invalid register number %d.", regnum);
5334
3aee8918 5335 addr = usrregs->regmap[regnum];
1faeff08
MR
5336
5337 return addr;
5338}
5339
daca57a7
TBA
5340
5341void
5342linux_process_target::fetch_register (const usrregs_info *usrregs,
5343 regcache *regcache, int regno)
1faeff08
MR
5344{
5345 CORE_ADDR regaddr;
5346 int i, size;
5347 char *buf;
5348 int pid;
5349
3aee8918 5350 if (regno >= usrregs->num_regs)
1faeff08 5351 return;
daca57a7 5352 if (low_cannot_fetch_register (regno))
1faeff08
MR
5353 return;
5354
3aee8918 5355 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5356 if (regaddr == -1)
5357 return;
5358
3aee8918
PA
5359 size = ((register_size (regcache->tdesc, regno)
5360 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5361 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5362 buf = (char *) alloca (size);
1faeff08 5363
0bfdf32f 5364 pid = lwpid_of (current_thread);
1faeff08
MR
5365 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5366 {
5367 errno = 0;
5368 *(PTRACE_XFER_TYPE *) (buf + i) =
5369 ptrace (PTRACE_PEEKUSER, pid,
5370 /* Coerce to a uintptr_t first to avoid potential gcc warning
5371 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5372 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5373 regaddr += sizeof (PTRACE_XFER_TYPE);
5374 if (errno != 0)
9a70f35c
YQ
5375 {
5376 /* Mark register REGNO unavailable. */
5377 supply_register (regcache, regno, NULL);
5378 return;
5379 }
1faeff08
MR
5380 }
5381
b35db733 5382 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5383}
5384
daca57a7
TBA
5385void
5386linux_process_target::store_register (const usrregs_info *usrregs,
5387 regcache *regcache, int regno)
1faeff08
MR
5388{
5389 CORE_ADDR regaddr;
5390 int i, size;
5391 char *buf;
5392 int pid;
5393
3aee8918 5394 if (regno >= usrregs->num_regs)
1faeff08 5395 return;
daca57a7 5396 if (low_cannot_store_register (regno))
1faeff08
MR
5397 return;
5398
3aee8918 5399 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5400 if (regaddr == -1)
5401 return;
5402
3aee8918
PA
5403 size = ((register_size (regcache->tdesc, regno)
5404 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5405 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5406 buf = (char *) alloca (size);
1faeff08
MR
5407 memset (buf, 0, size);
5408
b35db733 5409 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5410
0bfdf32f 5411 pid = lwpid_of (current_thread);
1faeff08
MR
5412 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5413 {
5414 errno = 0;
5415 ptrace (PTRACE_POKEUSER, pid,
5416 /* Coerce to a uintptr_t first to avoid potential gcc warning
5417 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5418 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5419 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5420 if (errno != 0)
5421 {
5422 /* At this point, ESRCH should mean the process is
5423 already gone, in which case we simply ignore attempts
5424 to change its registers. See also the related
df95181f 5425 comment in resume_one_lwp. */
1faeff08
MR
5426 if (errno == ESRCH)
5427 return;
5428
daca57a7
TBA
5429
5430 if (!low_cannot_store_register (regno))
6d91ce9a 5431 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5432 }
5433 regaddr += sizeof (PTRACE_XFER_TYPE);
5434 }
5435}
daca57a7 5436#endif /* HAVE_LINUX_USRREGS */
1faeff08 5437
b35db733
TBA
5438void
5439linux_process_target::low_collect_ptrace_register (regcache *regcache,
5440 int regno, char *buf)
5441{
5442 collect_register (regcache, regno, buf);
5443}
5444
5445void
5446linux_process_target::low_supply_ptrace_register (regcache *regcache,
5447 int regno, const char *buf)
5448{
5449 supply_register (regcache, regno, buf);
5450}
5451
daca57a7
TBA
5452void
5453linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5454 regcache *regcache,
5455 int regno, int all)
1faeff08 5456{
daca57a7 5457#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5458 struct usrregs_info *usr = regs_info->usrregs;
5459
1faeff08
MR
5460 if (regno == -1)
5461 {
3aee8918
PA
5462 for (regno = 0; regno < usr->num_regs; regno++)
5463 if (all || !linux_register_in_regsets (regs_info, regno))
5464 fetch_register (usr, regcache, regno);
1faeff08
MR
5465 }
5466 else
3aee8918 5467 fetch_register (usr, regcache, regno);
daca57a7 5468#endif
1faeff08
MR
5469}
5470
daca57a7
TBA
5471void
5472linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5473 regcache *regcache,
5474 int regno, int all)
1faeff08 5475{
daca57a7 5476#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5477 struct usrregs_info *usr = regs_info->usrregs;
5478
1faeff08
MR
5479 if (regno == -1)
5480 {
3aee8918
PA
5481 for (regno = 0; regno < usr->num_regs; regno++)
5482 if (all || !linux_register_in_regsets (regs_info, regno))
5483 store_register (usr, regcache, regno);
1faeff08
MR
5484 }
5485 else
3aee8918 5486 store_register (usr, regcache, regno);
58caa3dc 5487#endif
daca57a7 5488}
1faeff08 5489
a5a4d4cd
TBA
5490void
5491linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5492{
5493 int use_regsets;
5494 int all = 0;
aa8d21c9 5495 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5496
5497 if (regno == -1)
5498 {
bd70b1f2 5499 if (regs_info->usrregs != NULL)
3aee8918 5500 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5501 low_fetch_register (regcache, regno);
c14dfd32 5502
3aee8918
PA
5503 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5504 if (regs_info->usrregs != NULL)
5505 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5506 }
5507 else
5508 {
bd70b1f2 5509 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5510 return;
5511
3aee8918 5512 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5513 if (use_regsets)
3aee8918
PA
5514 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5515 regcache);
5516 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5517 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5518 }
58caa3dc
DJ
5519}
5520
a5a4d4cd
TBA
5521void
5522linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5523{
1faeff08
MR
5524 int use_regsets;
5525 int all = 0;
aa8d21c9 5526 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5527
5528 if (regno == -1)
5529 {
3aee8918
PA
5530 all = regsets_store_inferior_registers (regs_info->regsets_info,
5531 regcache);
5532 if (regs_info->usrregs != NULL)
5533 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5534 }
5535 else
5536 {
3aee8918 5537 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5538 if (use_regsets)
3aee8918
PA
5539 all = regsets_store_inferior_registers (regs_info->regsets_info,
5540 regcache);
5541 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5542 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5543 }
58caa3dc
DJ
5544}
5545
bd70b1f2
TBA
5546bool
5547linux_process_target::low_fetch_register (regcache *regcache, int regno)
5548{
5549 return false;
5550}
da6d8c04 5551
e2558df3 5552/* A wrapper for the read_memory target op. */
da6d8c04 5553
c3e735a6 5554static int
f450004a 5555linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5556{
52405d85 5557 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5558}
5559
5560/* Copy LEN bytes from inferior's memory starting at MEMADDR
5561 to debugger memory starting at MYADDR. */
5562
5563int
5564linux_process_target::read_memory (CORE_ADDR memaddr,
5565 unsigned char *myaddr, int len)
da6d8c04 5566{
0bfdf32f 5567 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5568 PTRACE_XFER_TYPE *buffer;
5569 CORE_ADDR addr;
5570 int count;
4934b29e 5571 char filename[64];
ae3e2ccf 5572 int i;
4934b29e 5573 int ret;
fd462a61 5574 int fd;
fd462a61
DJ
5575
5576 /* Try using /proc. Don't bother for one word. */
5577 if (len >= 3 * sizeof (long))
5578 {
4934b29e
MR
5579 int bytes;
5580
fd462a61
DJ
5581 /* We could keep this file open and cache it - possibly one per
5582 thread. That requires some juggling, but is even faster. */
95954743 5583 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5584 fd = open (filename, O_RDONLY | O_LARGEFILE);
5585 if (fd == -1)
5586 goto no_proc;
5587
5588 /* If pread64 is available, use it. It's faster if the kernel
5589 supports it (only one syscall), and it's 64-bit safe even on
5590 32-bit platforms (for instance, SPARC debugging a SPARC64
5591 application). */
5592#ifdef HAVE_PREAD64
4934b29e 5593 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5594#else
4934b29e
MR
5595 bytes = -1;
5596 if (lseek (fd, memaddr, SEEK_SET) != -1)
5597 bytes = read (fd, myaddr, len);
fd462a61 5598#endif
fd462a61
DJ
5599
5600 close (fd);
4934b29e
MR
5601 if (bytes == len)
5602 return 0;
5603
5604 /* Some data was read, we'll try to get the rest with ptrace. */
5605 if (bytes > 0)
5606 {
5607 memaddr += bytes;
5608 myaddr += bytes;
5609 len -= bytes;
5610 }
fd462a61 5611 }
da6d8c04 5612
fd462a61 5613 no_proc:
4934b29e
MR
5614 /* Round starting address down to longword boundary. */
5615 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5616 /* Round ending address up; get number of longwords that makes. */
5617 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5618 / sizeof (PTRACE_XFER_TYPE));
5619 /* Allocate buffer of that many longwords. */
8d749320 5620 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5621
da6d8c04 5622 /* Read all the longwords */
4934b29e 5623 errno = 0;
da6d8c04
DJ
5624 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5625 {
14ce3065
DE
5626 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5627 about coercing an 8 byte integer to a 4 byte pointer. */
5628 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5629 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5630 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5631 if (errno)
4934b29e 5632 break;
da6d8c04 5633 }
4934b29e 5634 ret = errno;
da6d8c04
DJ
5635
5636 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5637 if (i > 0)
5638 {
5639 i *= sizeof (PTRACE_XFER_TYPE);
5640 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5641 memcpy (myaddr,
5642 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5643 i < len ? i : len);
5644 }
c3e735a6 5645
4934b29e 5646 return ret;
da6d8c04
DJ
5647}
5648
93ae6fdc
PA
5649/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5650 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5651 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5652
e2558df3
TBA
5653int
5654linux_process_target::write_memory (CORE_ADDR memaddr,
5655 const unsigned char *myaddr, int len)
da6d8c04 5656{
ae3e2ccf 5657 int i;
da6d8c04 5658 /* Round starting address down to longword boundary. */
ae3e2ccf 5659 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5660 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5661 int count
493e2a69
MS
5662 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5663 / sizeof (PTRACE_XFER_TYPE);
5664
da6d8c04 5665 /* Allocate buffer of that many longwords. */
ae3e2ccf 5666 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5667
0bfdf32f 5668 int pid = lwpid_of (current_thread);
da6d8c04 5669
f0ae6fc3
PA
5670 if (len == 0)
5671 {
5672 /* Zero length write always succeeds. */
5673 return 0;
5674 }
5675
0d62e5e8
DJ
5676 if (debug_threads)
5677 {
58d6951d 5678 /* Dump up to four bytes. */
bf47e248
PA
5679 char str[4 * 2 + 1];
5680 char *p = str;
5681 int dump = len < 4 ? len : 4;
5682
5683 for (i = 0; i < dump; i++)
5684 {
5685 sprintf (p, "%02x", myaddr[i]);
5686 p += 2;
5687 }
5688 *p = '\0';
5689
5690 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5691 str, (long) memaddr, pid);
0d62e5e8
DJ
5692 }
5693
da6d8c04
DJ
5694 /* Fill start and end extra bytes of buffer with existing memory data. */
5695
93ae6fdc 5696 errno = 0;
14ce3065
DE
5697 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5698 about coercing an 8 byte integer to a 4 byte pointer. */
5699 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5700 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5701 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5702 if (errno)
5703 return errno;
da6d8c04
DJ
5704
5705 if (count > 1)
5706 {
93ae6fdc 5707 errno = 0;
da6d8c04 5708 buffer[count - 1]
95954743 5709 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5710 /* Coerce to a uintptr_t first to avoid potential gcc warning
5711 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5712 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5713 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5714 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5715 if (errno)
5716 return errno;
da6d8c04
DJ
5717 }
5718
93ae6fdc 5719 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5720
493e2a69
MS
5721 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5722 myaddr, len);
da6d8c04
DJ
5723
5724 /* Write the entire buffer. */
5725
5726 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5727 {
5728 errno = 0;
14ce3065
DE
5729 ptrace (PTRACE_POKETEXT, pid,
5730 /* Coerce to a uintptr_t first to avoid potential gcc warning
5731 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5732 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5733 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5734 if (errno)
5735 return errno;
5736 }
5737
5738 return 0;
5739}
2f2893d9 5740
2a31c7aa
TBA
5741void
5742linux_process_target::look_up_symbols ()
2f2893d9 5743{
0d62e5e8 5744#ifdef USE_THREAD_DB
95954743
PA
5745 struct process_info *proc = current_process ();
5746
fe978cb0 5747 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5748 return;
5749
9b4c5f87 5750 thread_db_init ();
0d62e5e8
DJ
5751#endif
5752}
5753
eb497a2a
TBA
5754void
5755linux_process_target::request_interrupt ()
e5379b03 5756{
78708b7c
PA
5757 /* Send a SIGINT to the process group. This acts just like the user
5758 typed a ^C on the controlling terminal. */
eb497a2a 5759 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5760}
5761
eac215cc
TBA
5762bool
5763linux_process_target::supports_read_auxv ()
5764{
5765 return true;
5766}
5767
aa691b87
RM
5768/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5769 to debugger memory starting at MYADDR. */
5770
eac215cc
TBA
5771int
5772linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5773 unsigned int len)
aa691b87
RM
5774{
5775 char filename[PATH_MAX];
5776 int fd, n;
0bfdf32f 5777 int pid = lwpid_of (current_thread);
aa691b87 5778
6cebaf6e 5779 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5780
5781 fd = open (filename, O_RDONLY);
5782 if (fd < 0)
5783 return -1;
5784
5785 if (offset != (CORE_ADDR) 0
5786 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5787 n = -1;
5788 else
5789 n = read (fd, myaddr, len);
5790
5791 close (fd);
5792
5793 return n;
5794}
5795
7e0bde70
TBA
5796int
5797linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5798 int size, raw_breakpoint *bp)
e013ee27 5799{
c8f4bfdd
YQ
5800 if (type == raw_bkpt_type_sw)
5801 return insert_memory_breakpoint (bp);
e013ee27 5802 else
9db9aa23
TBA
5803 return low_insert_point (type, addr, size, bp);
5804}
5805
5806int
5807linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5808 int size, raw_breakpoint *bp)
5809{
5810 /* Unsupported (see target.h). */
5811 return 1;
e013ee27
OF
5812}
5813
7e0bde70
TBA
5814int
5815linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5816 int size, raw_breakpoint *bp)
e013ee27 5817{
c8f4bfdd
YQ
5818 if (type == raw_bkpt_type_sw)
5819 return remove_memory_breakpoint (bp);
e013ee27 5820 else
9db9aa23
TBA
5821 return low_remove_point (type, addr, size, bp);
5822}
5823
5824int
5825linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5826 int size, raw_breakpoint *bp)
5827{
5828 /* Unsupported (see target.h). */
5829 return 1;
e013ee27
OF
5830}
5831
84320c4e 5832/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5833 method. */
5834
84320c4e
TBA
5835bool
5836linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5837{
5838 struct lwp_info *lwp = get_thread_lwp (current_thread);
5839
5840 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5841}
5842
84320c4e 5843/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5844 method. */
5845
84320c4e
TBA
5846bool
5847linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5848{
5849 return USE_SIGTRAP_SIGINFO;
5850}
5851
93fe88b2 5852/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5853 method. */
5854
93fe88b2
TBA
5855bool
5856linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5857{
5858 struct lwp_info *lwp = get_thread_lwp (current_thread);
5859
5860 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5861}
5862
93fe88b2 5863/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5864 method. */
5865
93fe88b2
TBA
5866bool
5867linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5868{
5869 return USE_SIGTRAP_SIGINFO;
5870}
5871
70b90b91 5872/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5873
22aa6223
TBA
5874bool
5875linux_process_target::supports_hardware_single_step ()
45614f15 5876{
b31cdfa6 5877 return true;
45614f15
YQ
5878}
5879
6eeb5c55
TBA
5880bool
5881linux_process_target::stopped_by_watchpoint ()
e013ee27 5882{
0bfdf32f 5883 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5884
15c66dd6 5885 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5886}
5887
6eeb5c55
TBA
5888CORE_ADDR
5889linux_process_target::stopped_data_address ()
e013ee27 5890{
0bfdf32f 5891 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5892
5893 return lwp->stopped_data_address;
e013ee27
OF
5894}
5895
db0dfaa0
LM
5896/* This is only used for targets that define PT_TEXT_ADDR,
5897 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5898 the target has different ways of acquiring this information, like
5899 loadmaps. */
52fb6437 5900
5203ae1e
TBA
5901bool
5902linux_process_target::supports_read_offsets ()
5903{
5904#ifdef SUPPORTS_READ_OFFSETS
5905 return true;
5906#else
5907 return false;
5908#endif
5909}
5910
52fb6437
NS
5911/* Under uClinux, programs are loaded at non-zero offsets, which we need
5912 to tell gdb about. */
5913
5203ae1e
TBA
5914int
5915linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5916{
5203ae1e 5917#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5918 unsigned long text, text_end, data;
62828379 5919 int pid = lwpid_of (current_thread);
52fb6437
NS
5920
5921 errno = 0;
5922
b8e1b30e
LM
5923 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5924 (PTRACE_TYPE_ARG4) 0);
5925 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5926 (PTRACE_TYPE_ARG4) 0);
5927 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5928 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5929
5930 if (errno == 0)
5931 {
5932 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5933 used by gdb) are relative to the beginning of the program,
5934 with the data segment immediately following the text segment.
5935 However, the actual runtime layout in memory may put the data
5936 somewhere else, so when we send gdb a data base-address, we
5937 use the real data base address and subtract the compile-time
5938 data base-address from it (which is just the length of the
5939 text segment). BSS immediately follows data in both
5940 cases. */
52fb6437
NS
5941 *text_p = text;
5942 *data_p = data - (text_end - text);
1b3f6016 5943
52fb6437
NS
5944 return 1;
5945 }
5203ae1e
TBA
5946 return 0;
5947#else
5948 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5949#endif
5203ae1e 5950}
52fb6437 5951
6e3fd7e9
TBA
5952bool
5953linux_process_target::supports_get_tls_address ()
5954{
5955#ifdef USE_THREAD_DB
5956 return true;
5957#else
5958 return false;
5959#endif
5960}
5961
5962int
5963linux_process_target::get_tls_address (thread_info *thread,
5964 CORE_ADDR offset,
5965 CORE_ADDR load_module,
5966 CORE_ADDR *address)
5967{
5968#ifdef USE_THREAD_DB
5969 return thread_db_get_tls_address (thread, offset, load_module, address);
5970#else
5971 return -1;
5972#endif
5973}
5974
2d0795ee
TBA
5975bool
5976linux_process_target::supports_qxfer_osdata ()
5977{
5978 return true;
5979}
5980
5981int
5982linux_process_target::qxfer_osdata (const char *annex,
5983 unsigned char *readbuf,
5984 unsigned const char *writebuf,
5985 CORE_ADDR offset, int len)
07e059b5 5986{
d26e3629 5987 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5988}
5989
cb63de7c
TBA
5990void
5991linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5992 gdb_byte *inf_siginfo, int direction)
d0722149 5993{
cb63de7c 5994 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
5995
5996 /* If there was no callback, or the callback didn't do anything,
5997 then just do a straight memcpy. */
5998 if (!done)
5999 {
6000 if (direction == 1)
a5362b9a 6001 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6002 else
a5362b9a 6003 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6004 }
6005}
6006
cb63de7c
TBA
6007bool
6008linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
6009 int direction)
6010{
6011 return false;
6012}
6013
d7abedf7
TBA
6014bool
6015linux_process_target::supports_qxfer_siginfo ()
6016{
6017 return true;
6018}
6019
6020int
6021linux_process_target::qxfer_siginfo (const char *annex,
6022 unsigned char *readbuf,
6023 unsigned const char *writebuf,
6024 CORE_ADDR offset, int len)
4aa995e1 6025{
d0722149 6026 int pid;
a5362b9a 6027 siginfo_t siginfo;
8adce034 6028 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6029
0bfdf32f 6030 if (current_thread == NULL)
4aa995e1
PA
6031 return -1;
6032
0bfdf32f 6033 pid = lwpid_of (current_thread);
4aa995e1
PA
6034
6035 if (debug_threads)
87ce2a04
DE
6036 debug_printf ("%s siginfo for lwp %d.\n",
6037 readbuf != NULL ? "Reading" : "Writing",
6038 pid);
4aa995e1 6039
0adea5f7 6040 if (offset >= sizeof (siginfo))
4aa995e1
PA
6041 return -1;
6042
b8e1b30e 6043 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6044 return -1;
6045
d0722149
DE
6046 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6047 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6048 inferior with a 64-bit GDBSERVER should look the same as debugging it
6049 with a 32-bit GDBSERVER, we need to convert it. */
6050 siginfo_fixup (&siginfo, inf_siginfo, 0);
6051
4aa995e1
PA
6052 if (offset + len > sizeof (siginfo))
6053 len = sizeof (siginfo) - offset;
6054
6055 if (readbuf != NULL)
d0722149 6056 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6057 else
6058 {
d0722149
DE
6059 memcpy (inf_siginfo + offset, writebuf, len);
6060
6061 /* Convert back to ptrace layout before flushing it out. */
6062 siginfo_fixup (&siginfo, inf_siginfo, 1);
6063
b8e1b30e 6064 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6065 return -1;
6066 }
6067
6068 return len;
6069}
6070
bd99dc85
PA
6071/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6072 so we notice when children change state; as the handler for the
6073 sigsuspend in my_waitpid. */
6074
6075static void
6076sigchld_handler (int signo)
6077{
6078 int old_errno = errno;
6079
6080 if (debug_threads)
e581f2b4
PA
6081 {
6082 do
6083 {
a7e559cc
AH
6084 /* Use the async signal safe debug function. */
6085 if (debug_write ("sigchld_handler\n",
6086 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6087 break; /* just ignore */
6088 } while (0);
6089 }
bd99dc85
PA
6090
6091 if (target_is_async_p ())
6092 async_file_mark (); /* trigger a linux_wait */
6093
6094 errno = old_errno;
6095}
6096
0dc587d4
TBA
6097bool
6098linux_process_target::supports_non_stop ()
bd99dc85 6099{
0dc587d4 6100 return true;
bd99dc85
PA
6101}
6102
0dc587d4
TBA
6103bool
6104linux_process_target::async (bool enable)
bd99dc85 6105{
0dc587d4 6106 bool previous = target_is_async_p ();
bd99dc85 6107
8336d594 6108 if (debug_threads)
87ce2a04
DE
6109 debug_printf ("linux_async (%d), previous=%d\n",
6110 enable, previous);
8336d594 6111
bd99dc85
PA
6112 if (previous != enable)
6113 {
6114 sigset_t mask;
6115 sigemptyset (&mask);
6116 sigaddset (&mask, SIGCHLD);
6117
21987b9c 6118 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6119
6120 if (enable)
6121 {
6122 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6123 {
6124 linux_event_pipe[0] = -1;
6125 linux_event_pipe[1] = -1;
21987b9c 6126 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6127
6128 warning ("creating event pipe failed.");
6129 return previous;
6130 }
bd99dc85
PA
6131
6132 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6133 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6134
6135 /* Register the event loop handler. */
6136 add_file_handler (linux_event_pipe[0],
6137 handle_target_event, NULL);
6138
6139 /* Always trigger a linux_wait. */
6140 async_file_mark ();
6141 }
6142 else
6143 {
6144 delete_file_handler (linux_event_pipe[0]);
6145
6146 close (linux_event_pipe[0]);
6147 close (linux_event_pipe[1]);
6148 linux_event_pipe[0] = -1;
6149 linux_event_pipe[1] = -1;
6150 }
6151
21987b9c 6152 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6153 }
6154
6155 return previous;
6156}
6157
0dc587d4
TBA
6158int
6159linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6160{
6161 /* Register or unregister from event-loop accordingly. */
0dc587d4 6162 target_async (nonstop);
aa96c426 6163
0dc587d4 6164 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6165 return -1;
6166
bd99dc85
PA
6167 return 0;
6168}
6169
652aef77
TBA
6170bool
6171linux_process_target::supports_multi_process ()
cf8fd78b 6172{
652aef77 6173 return true;
cf8fd78b
PA
6174}
6175
89245bc0
DB
6176/* Check if fork events are supported. */
6177
9690a72a
TBA
6178bool
6179linux_process_target::supports_fork_events ()
89245bc0
DB
6180{
6181 return linux_supports_tracefork ();
6182}
6183
6184/* Check if vfork events are supported. */
6185
9690a72a
TBA
6186bool
6187linux_process_target::supports_vfork_events ()
89245bc0
DB
6188{
6189 return linux_supports_tracefork ();
6190}
6191
94585166
DB
6192/* Check if exec events are supported. */
6193
9690a72a
TBA
6194bool
6195linux_process_target::supports_exec_events ()
94585166
DB
6196{
6197 return linux_supports_traceexec ();
6198}
6199
de0d863e
DB
6200/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6201 ptrace flags for all inferiors. This is in case the new GDB connection
6202 doesn't support the same set of events that the previous one did. */
6203
fb00dfce
TBA
6204void
6205linux_process_target::handle_new_gdb_connection ()
de0d863e 6206{
de0d863e 6207 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6208 for_each_thread ([] (thread_info *thread)
6209 {
6210 struct lwp_info *lwp = get_thread_lwp (thread);
6211
6212 if (!lwp->stopped)
6213 {
6214 /* Stop the lwp so we can modify its ptrace options. */
6215 lwp->must_set_ptrace_flags = 1;
6216 linux_stop_lwp (lwp);
6217 }
6218 else
6219 {
6220 /* Already stopped; go ahead and set the ptrace options. */
6221 struct process_info *proc = find_process_pid (pid_of (thread));
6222 int options = linux_low_ptrace_options (proc->attached);
6223
6224 linux_enable_event_reporting (lwpid_of (thread), options);
6225 lwp->must_set_ptrace_flags = 0;
6226 }
6227 });
de0d863e
DB
6228}
6229
55cf3021
TBA
6230int
6231linux_process_target::handle_monitor_command (char *mon)
6232{
6233#ifdef USE_THREAD_DB
6234 return thread_db_handle_monitor_command (mon);
6235#else
6236 return 0;
6237#endif
6238}
6239
95a45fc1
TBA
6240int
6241linux_process_target::core_of_thread (ptid_t ptid)
6242{
6243 return linux_common_core_of_thread (ptid);
6244}
6245
c756403b
TBA
6246bool
6247linux_process_target::supports_disable_randomization ()
03583c20
UW
6248{
6249#ifdef HAVE_PERSONALITY
c756403b 6250 return true;
03583c20 6251#else
c756403b 6252 return false;
03583c20
UW
6253#endif
6254}
efcbbd14 6255
c0245cb9
TBA
6256bool
6257linux_process_target::supports_agent ()
d1feda86 6258{
c0245cb9 6259 return true;
d1feda86
YQ
6260}
6261
2526e0cd
TBA
6262bool
6263linux_process_target::supports_range_stepping ()
c2d6af84 6264{
7582c77c 6265 if (supports_software_single_step ())
2526e0cd 6266 return true;
c2d6af84 6267
9cfd8715
TBA
6268 return low_supports_range_stepping ();
6269}
6270
6271bool
6272linux_process_target::low_supports_range_stepping ()
6273{
6274 return false;
c2d6af84
PA
6275}
6276
8247b823
TBA
6277bool
6278linux_process_target::supports_pid_to_exec_file ()
6279{
6280 return true;
6281}
6282
6283char *
6284linux_process_target::pid_to_exec_file (int pid)
6285{
6286 return linux_proc_pid_to_exec_file (pid);
6287}
6288
c9b7b804
TBA
6289bool
6290linux_process_target::supports_multifs ()
6291{
6292 return true;
6293}
6294
6295int
6296linux_process_target::multifs_open (int pid, const char *filename,
6297 int flags, mode_t mode)
6298{
6299 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6300}
6301
6302int
6303linux_process_target::multifs_unlink (int pid, const char *filename)
6304{
6305 return linux_mntns_unlink (pid, filename);
6306}
6307
6308ssize_t
6309linux_process_target::multifs_readlink (int pid, const char *filename,
6310 char *buf, size_t bufsiz)
6311{
6312 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6313}
6314
723b724b 6315#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6316struct target_loadseg
6317{
6318 /* Core address to which the segment is mapped. */
6319 Elf32_Addr addr;
6320 /* VMA recorded in the program header. */
6321 Elf32_Addr p_vaddr;
6322 /* Size of this segment in memory. */
6323 Elf32_Word p_memsz;
6324};
6325
723b724b 6326# if defined PT_GETDSBT
78d85199
YQ
6327struct target_loadmap
6328{
6329 /* Protocol version number, must be zero. */
6330 Elf32_Word version;
6331 /* Pointer to the DSBT table, its size, and the DSBT index. */
6332 unsigned *dsbt_table;
6333 unsigned dsbt_size, dsbt_index;
6334 /* Number of segments in this map. */
6335 Elf32_Word nsegs;
6336 /* The actual memory map. */
6337 struct target_loadseg segs[/*nsegs*/];
6338};
723b724b
MF
6339# define LINUX_LOADMAP PT_GETDSBT
6340# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6341# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6342# else
6343struct target_loadmap
6344{
6345 /* Protocol version number, must be zero. */
6346 Elf32_Half version;
6347 /* Number of segments in this map. */
6348 Elf32_Half nsegs;
6349 /* The actual memory map. */
6350 struct target_loadseg segs[/*nsegs*/];
6351};
6352# define LINUX_LOADMAP PTRACE_GETFDPIC
6353# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6354# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6355# endif
78d85199 6356
9da41fda
TBA
6357bool
6358linux_process_target::supports_read_loadmap ()
6359{
6360 return true;
6361}
6362
6363int
6364linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6365 unsigned char *myaddr, unsigned int len)
78d85199 6366{
0bfdf32f 6367 int pid = lwpid_of (current_thread);
78d85199
YQ
6368 int addr = -1;
6369 struct target_loadmap *data = NULL;
6370 unsigned int actual_length, copy_length;
6371
6372 if (strcmp (annex, "exec") == 0)
723b724b 6373 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6374 else if (strcmp (annex, "interp") == 0)
723b724b 6375 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6376 else
6377 return -1;
6378
723b724b 6379 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6380 return -1;
6381
6382 if (data == NULL)
6383 return -1;
6384
6385 actual_length = sizeof (struct target_loadmap)
6386 + sizeof (struct target_loadseg) * data->nsegs;
6387
6388 if (offset < 0 || offset > actual_length)
6389 return -1;
6390
6391 copy_length = actual_length - offset < len ? actual_length - offset : len;
6392 memcpy (myaddr, (char *) data + offset, copy_length);
6393 return copy_length;
6394}
723b724b 6395#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6396
bc8d3ae4
TBA
6397bool
6398linux_process_target::supports_catch_syscall ()
82075af2 6399{
9eedd27d 6400 return (low_supports_catch_syscall ()
82075af2
JS
6401 && linux_supports_tracesysgood ());
6402}
6403
9eedd27d
TBA
6404bool
6405linux_process_target::low_supports_catch_syscall ()
6406{
6407 return false;
6408}
6409
770d8f6a
TBA
6410CORE_ADDR
6411linux_process_target::read_pc (regcache *regcache)
219f2f23 6412{
bf9ae9d8 6413 if (!low_supports_breakpoints ())
219f2f23
PA
6414 return 0;
6415
bf9ae9d8 6416 return low_get_pc (regcache);
219f2f23
PA
6417}
6418
770d8f6a
TBA
6419void
6420linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6421{
bf9ae9d8 6422 gdb_assert (low_supports_breakpoints ());
219f2f23 6423
bf9ae9d8 6424 low_set_pc (regcache, pc);
219f2f23
PA
6425}
6426
68119632
TBA
6427bool
6428linux_process_target::supports_thread_stopped ()
6429{
6430 return true;
6431}
6432
6433bool
6434linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6435{
6436 return get_thread_lwp (thread)->stopped;
6437}
6438
6439/* This exposes stop-all-threads functionality to other modules. */
6440
29e8dc09
TBA
6441void
6442linux_process_target::pause_all (bool freeze)
8336d594 6443{
7984d532
PA
6444 stop_all_lwps (freeze, NULL);
6445}
6446
6447/* This exposes unstop-all-threads functionality to other gdbserver
6448 modules. */
6449
29e8dc09
TBA
6450void
6451linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6452{
6453 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6454}
6455
79b44087
TBA
6456int
6457linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6458{
6459 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6460 running LWP. */
6461 if (non_stop)
29e8dc09 6462 target_pause_all (true);
90d74c30
PA
6463 return 0;
6464}
6465
79b44087
TBA
6466void
6467linux_process_target::done_accessing_memory ()
90d74c30
PA
6468{
6469 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6470 running LWP. */
6471 if (non_stop)
29e8dc09 6472 target_unpause_all (true);
90d74c30
PA
6473}
6474
2268b414
JK
6475/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6476
6477static int
6478get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6479 CORE_ADDR *phdr_memaddr, int *num_phdr)
6480{
6481 char filename[PATH_MAX];
6482 int fd;
6483 const int auxv_size = is_elf64
6484 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6485 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6486
6487 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6488
6489 fd = open (filename, O_RDONLY);
6490 if (fd < 0)
6491 return 1;
6492
6493 *phdr_memaddr = 0;
6494 *num_phdr = 0;
6495 while (read (fd, buf, auxv_size) == auxv_size
6496 && (*phdr_memaddr == 0 || *num_phdr == 0))
6497 {
6498 if (is_elf64)
6499 {
6500 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6501
6502 switch (aux->a_type)
6503 {
6504 case AT_PHDR:
6505 *phdr_memaddr = aux->a_un.a_val;
6506 break;
6507 case AT_PHNUM:
6508 *num_phdr = aux->a_un.a_val;
6509 break;
6510 }
6511 }
6512 else
6513 {
6514 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6515
6516 switch (aux->a_type)
6517 {
6518 case AT_PHDR:
6519 *phdr_memaddr = aux->a_un.a_val;
6520 break;
6521 case AT_PHNUM:
6522 *num_phdr = aux->a_un.a_val;
6523 break;
6524 }
6525 }
6526 }
6527
6528 close (fd);
6529
6530 if (*phdr_memaddr == 0 || *num_phdr == 0)
6531 {
6532 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6533 "phdr_memaddr = %ld, phdr_num = %d",
6534 (long) *phdr_memaddr, *num_phdr);
6535 return 2;
6536 }
6537
6538 return 0;
6539}
6540
6541/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6542
6543static CORE_ADDR
6544get_dynamic (const int pid, const int is_elf64)
6545{
6546 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6547 int num_phdr, i;
2268b414 6548 unsigned char *phdr_buf;
db1ff28b 6549 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6550
6551 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6552 return 0;
6553
6554 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6555 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6556
6557 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6558 return 0;
6559
6560 /* Compute relocation: it is expected to be 0 for "regular" executables,
6561 non-zero for PIE ones. */
6562 relocation = -1;
db1ff28b
JK
6563 for (i = 0; relocation == -1 && i < num_phdr; i++)
6564 if (is_elf64)
6565 {
6566 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6567
6568 if (p->p_type == PT_PHDR)
6569 relocation = phdr_memaddr - p->p_vaddr;
6570 }
6571 else
6572 {
6573 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6574
6575 if (p->p_type == PT_PHDR)
6576 relocation = phdr_memaddr - p->p_vaddr;
6577 }
6578
2268b414
JK
6579 if (relocation == -1)
6580 {
e237a7e2
JK
6581 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6582 any real world executables, including PIE executables, have always
6583 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6584 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6585 or present DT_DEBUG anyway (fpc binaries are statically linked).
6586
6587 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6588
6589 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6590
2268b414
JK
6591 return 0;
6592 }
6593
db1ff28b
JK
6594 for (i = 0; i < num_phdr; i++)
6595 {
6596 if (is_elf64)
6597 {
6598 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6599
6600 if (p->p_type == PT_DYNAMIC)
6601 return p->p_vaddr + relocation;
6602 }
6603 else
6604 {
6605 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6606
db1ff28b
JK
6607 if (p->p_type == PT_DYNAMIC)
6608 return p->p_vaddr + relocation;
6609 }
6610 }
2268b414
JK
6611
6612 return 0;
6613}
6614
6615/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6616 can be 0 if the inferior does not yet have the library list initialized.
6617 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6618 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6619
6620static CORE_ADDR
6621get_r_debug (const int pid, const int is_elf64)
6622{
6623 CORE_ADDR dynamic_memaddr;
6624 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6625 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6626 CORE_ADDR map = -1;
2268b414
JK
6627
6628 dynamic_memaddr = get_dynamic (pid, is_elf64);
6629 if (dynamic_memaddr == 0)
367ba2c2 6630 return map;
2268b414
JK
6631
6632 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6633 {
6634 if (is_elf64)
6635 {
6636 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6637#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6638 union
6639 {
6640 Elf64_Xword map;
6641 unsigned char buf[sizeof (Elf64_Xword)];
6642 }
6643 rld_map;
a738da3a
MF
6644#endif
6645#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6646 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6647 {
6648 if (linux_read_memory (dyn->d_un.d_val,
6649 rld_map.buf, sizeof (rld_map.buf)) == 0)
6650 return rld_map.map;
6651 else
6652 break;
6653 }
75f62ce7 6654#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6655#ifdef DT_MIPS_RLD_MAP_REL
6656 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6657 {
6658 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6659 rld_map.buf, sizeof (rld_map.buf)) == 0)
6660 return rld_map.map;
6661 else
6662 break;
6663 }
6664#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6665
367ba2c2
MR
6666 if (dyn->d_tag == DT_DEBUG && map == -1)
6667 map = dyn->d_un.d_val;
2268b414
JK
6668
6669 if (dyn->d_tag == DT_NULL)
6670 break;
6671 }
6672 else
6673 {
6674 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6675#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6676 union
6677 {
6678 Elf32_Word map;
6679 unsigned char buf[sizeof (Elf32_Word)];
6680 }
6681 rld_map;
a738da3a
MF
6682#endif
6683#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6684 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6685 {
6686 if (linux_read_memory (dyn->d_un.d_val,
6687 rld_map.buf, sizeof (rld_map.buf)) == 0)
6688 return rld_map.map;
6689 else
6690 break;
6691 }
75f62ce7 6692#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6693#ifdef DT_MIPS_RLD_MAP_REL
6694 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6695 {
6696 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6697 rld_map.buf, sizeof (rld_map.buf)) == 0)
6698 return rld_map.map;
6699 else
6700 break;
6701 }
6702#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6703
367ba2c2
MR
6704 if (dyn->d_tag == DT_DEBUG && map == -1)
6705 map = dyn->d_un.d_val;
2268b414
JK
6706
6707 if (dyn->d_tag == DT_NULL)
6708 break;
6709 }
6710
6711 dynamic_memaddr += dyn_size;
6712 }
6713
367ba2c2 6714 return map;
2268b414
JK
6715}
6716
6717/* Read one pointer from MEMADDR in the inferior. */
6718
6719static int
6720read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6721{
485f1ee4
PA
6722 int ret;
6723
6724 /* Go through a union so this works on either big or little endian
6725 hosts, when the inferior's pointer size is smaller than the size
6726 of CORE_ADDR. It is assumed the inferior's endianness is the
6727 same of the superior's. */
6728 union
6729 {
6730 CORE_ADDR core_addr;
6731 unsigned int ui;
6732 unsigned char uc;
6733 } addr;
6734
6735 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6736 if (ret == 0)
6737 {
6738 if (ptr_size == sizeof (CORE_ADDR))
6739 *ptr = addr.core_addr;
6740 else if (ptr_size == sizeof (unsigned int))
6741 *ptr = addr.ui;
6742 else
6743 gdb_assert_not_reached ("unhandled pointer size");
6744 }
6745 return ret;
2268b414
JK
6746}
6747
974387bb
TBA
6748bool
6749linux_process_target::supports_qxfer_libraries_svr4 ()
6750{
6751 return true;
6752}
6753
2268b414
JK
6754struct link_map_offsets
6755 {
6756 /* Offset and size of r_debug.r_version. */
6757 int r_version_offset;
6758
6759 /* Offset and size of r_debug.r_map. */
6760 int r_map_offset;
6761
6762 /* Offset to l_addr field in struct link_map. */
6763 int l_addr_offset;
6764
6765 /* Offset to l_name field in struct link_map. */
6766 int l_name_offset;
6767
6768 /* Offset to l_ld field in struct link_map. */
6769 int l_ld_offset;
6770
6771 /* Offset to l_next field in struct link_map. */
6772 int l_next_offset;
6773
6774 /* Offset to l_prev field in struct link_map. */
6775 int l_prev_offset;
6776 };
6777
fb723180 6778/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6779
974387bb
TBA
6780int
6781linux_process_target::qxfer_libraries_svr4 (const char *annex,
6782 unsigned char *readbuf,
6783 unsigned const char *writebuf,
6784 CORE_ADDR offset, int len)
2268b414 6785{
fe978cb0 6786 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6787 char filename[PATH_MAX];
6788 int pid, is_elf64;
6789
6790 static const struct link_map_offsets lmo_32bit_offsets =
6791 {
6792 0, /* r_version offset. */
6793 4, /* r_debug.r_map offset. */
6794 0, /* l_addr offset in link_map. */
6795 4, /* l_name offset in link_map. */
6796 8, /* l_ld offset in link_map. */
6797 12, /* l_next offset in link_map. */
6798 16 /* l_prev offset in link_map. */
6799 };
6800
6801 static const struct link_map_offsets lmo_64bit_offsets =
6802 {
6803 0, /* r_version offset. */
6804 8, /* r_debug.r_map offset. */
6805 0, /* l_addr offset in link_map. */
6806 8, /* l_name offset in link_map. */
6807 16, /* l_ld offset in link_map. */
6808 24, /* l_next offset in link_map. */
6809 32 /* l_prev offset in link_map. */
6810 };
6811 const struct link_map_offsets *lmo;
214d508e 6812 unsigned int machine;
b1fbec62
GB
6813 int ptr_size;
6814 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6815 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6816 int header_done = 0;
2268b414
JK
6817
6818 if (writebuf != NULL)
6819 return -2;
6820 if (readbuf == NULL)
6821 return -1;
6822
0bfdf32f 6823 pid = lwpid_of (current_thread);
2268b414 6824 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6825 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6826 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6827 ptr_size = is_elf64 ? 8 : 4;
2268b414 6828
b1fbec62
GB
6829 while (annex[0] != '\0')
6830 {
6831 const char *sep;
6832 CORE_ADDR *addrp;
da4ae14a 6833 int name_len;
2268b414 6834
b1fbec62
GB
6835 sep = strchr (annex, '=');
6836 if (sep == NULL)
6837 break;
0c5bf5a9 6838
da4ae14a
TT
6839 name_len = sep - annex;
6840 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6841 addrp = &lm_addr;
da4ae14a 6842 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6843 addrp = &lm_prev;
6844 else
6845 {
6846 annex = strchr (sep, ';');
6847 if (annex == NULL)
6848 break;
6849 annex++;
6850 continue;
6851 }
6852
6853 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6854 }
b1fbec62
GB
6855
6856 if (lm_addr == 0)
2268b414 6857 {
b1fbec62
GB
6858 int r_version = 0;
6859
6860 if (priv->r_debug == 0)
6861 priv->r_debug = get_r_debug (pid, is_elf64);
6862
6863 /* We failed to find DT_DEBUG. Such situation will not change
6864 for this inferior - do not retry it. Report it to GDB as
6865 E01, see for the reasons at the GDB solib-svr4.c side. */
6866 if (priv->r_debug == (CORE_ADDR) -1)
6867 return -1;
6868
6869 if (priv->r_debug != 0)
2268b414 6870 {
b1fbec62
GB
6871 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6872 (unsigned char *) &r_version,
6873 sizeof (r_version)) != 0
6874 || r_version != 1)
6875 {
6876 warning ("unexpected r_debug version %d", r_version);
6877 }
6878 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6879 &lm_addr, ptr_size) != 0)
6880 {
6881 warning ("unable to read r_map from 0x%lx",
6882 (long) priv->r_debug + lmo->r_map_offset);
6883 }
2268b414 6884 }
b1fbec62 6885 }
2268b414 6886
f6e8a41e 6887 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6888
6889 while (lm_addr
6890 && read_one_ptr (lm_addr + lmo->l_name_offset,
6891 &l_name, ptr_size) == 0
6892 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6893 &l_addr, ptr_size) == 0
6894 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6895 &l_ld, ptr_size) == 0
6896 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6897 &l_prev, ptr_size) == 0
6898 && read_one_ptr (lm_addr + lmo->l_next_offset,
6899 &l_next, ptr_size) == 0)
6900 {
6901 unsigned char libname[PATH_MAX];
6902
6903 if (lm_prev != l_prev)
2268b414 6904 {
b1fbec62
GB
6905 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6906 (long) lm_prev, (long) l_prev);
6907 break;
2268b414
JK
6908 }
6909
d878444c
JK
6910 /* Ignore the first entry even if it has valid name as the first entry
6911 corresponds to the main executable. The first entry should not be
6912 skipped if the dynamic loader was loaded late by a static executable
6913 (see solib-svr4.c parameter ignore_first). But in such case the main
6914 executable does not have PT_DYNAMIC present and this function already
6915 exited above due to failed get_r_debug. */
6916 if (lm_prev == 0)
f6e8a41e 6917 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
6918 else
6919 {
6920 /* Not checking for error because reading may stop before
6921 we've got PATH_MAX worth of characters. */
6922 libname[0] = '\0';
6923 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6924 libname[sizeof (libname) - 1] = '\0';
6925 if (libname[0] != '\0')
2268b414 6926 {
d878444c
JK
6927 if (!header_done)
6928 {
6929 /* Terminate `<library-list-svr4'. */
f6e8a41e 6930 document += '>';
d878444c
JK
6931 header_done = 1;
6932 }
2268b414 6933
e6a58aa8
SM
6934 string_appendf (document, "<library name=\"");
6935 xml_escape_text_append (&document, (char *) libname);
6936 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 6937 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
6938 (unsigned long) lm_addr, (unsigned long) l_addr,
6939 (unsigned long) l_ld);
d878444c 6940 }
0afae3cf 6941 }
b1fbec62
GB
6942
6943 lm_prev = lm_addr;
6944 lm_addr = l_next;
2268b414
JK
6945 }
6946
b1fbec62
GB
6947 if (!header_done)
6948 {
6949 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 6950 document += "/>";
b1fbec62
GB
6951 }
6952 else
f6e8a41e 6953 document += "</library-list-svr4>";
b1fbec62 6954
f6e8a41e 6955 int document_len = document.length ();
2268b414
JK
6956 if (offset < document_len)
6957 document_len -= offset;
6958 else
6959 document_len = 0;
6960 if (len > document_len)
6961 len = document_len;
6962
f6e8a41e 6963 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6964
6965 return len;
6966}
6967
9accd112
MM
6968#ifdef HAVE_LINUX_BTRACE
6969
79597bdd
TBA
6970btrace_target_info *
6971linux_process_target::enable_btrace (ptid_t ptid,
6972 const btrace_config *conf)
6973{
6974 return linux_enable_btrace (ptid, conf);
6975}
6976
969c39fb 6977/* See to_disable_btrace target method. */
9accd112 6978
79597bdd
TBA
6979int
6980linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
6981{
6982 enum btrace_error err;
6983
6984 err = linux_disable_btrace (tinfo);
6985 return (err == BTRACE_ERR_NONE ? 0 : -1);
6986}
6987
bc504a31 6988/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
6989
6990static void
6991linux_low_encode_pt_config (struct buffer *buffer,
6992 const struct btrace_data_pt_config *config)
6993{
6994 buffer_grow_str (buffer, "<pt-config>\n");
6995
6996 switch (config->cpu.vendor)
6997 {
6998 case CV_INTEL:
6999 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7000 "model=\"%u\" stepping=\"%u\"/>\n",
7001 config->cpu.family, config->cpu.model,
7002 config->cpu.stepping);
7003 break;
7004
7005 default:
7006 break;
7007 }
7008
7009 buffer_grow_str (buffer, "</pt-config>\n");
7010}
7011
7012/* Encode a raw buffer. */
7013
7014static void
7015linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7016 unsigned int size)
7017{
7018 if (size == 0)
7019 return;
7020
268a13a5 7021 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
7022 buffer_grow_str (buffer, "<raw>\n");
7023
7024 while (size-- > 0)
7025 {
7026 char elem[2];
7027
7028 elem[0] = tohex ((*data >> 4) & 0xf);
7029 elem[1] = tohex (*data++ & 0xf);
7030
7031 buffer_grow (buffer, elem, 2);
7032 }
7033
7034 buffer_grow_str (buffer, "</raw>\n");
7035}
7036
969c39fb
MM
7037/* See to_read_btrace target method. */
7038
79597bdd
TBA
7039int
7040linux_process_target::read_btrace (btrace_target_info *tinfo,
7041 buffer *buffer,
7042 enum btrace_read_type type)
9accd112 7043{
734b0e4b 7044 struct btrace_data btrace;
969c39fb 7045 enum btrace_error err;
9accd112 7046
969c39fb
MM
7047 err = linux_read_btrace (&btrace, tinfo, type);
7048 if (err != BTRACE_ERR_NONE)
7049 {
7050 if (err == BTRACE_ERR_OVERFLOW)
7051 buffer_grow_str0 (buffer, "E.Overflow.");
7052 else
7053 buffer_grow_str0 (buffer, "E.Generic Error.");
7054
8dcc53b3 7055 return -1;
969c39fb 7056 }
9accd112 7057
734b0e4b
MM
7058 switch (btrace.format)
7059 {
7060 case BTRACE_FORMAT_NONE:
7061 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7062 return -1;
734b0e4b
MM
7063
7064 case BTRACE_FORMAT_BTS:
7065 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7066 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7067
46f29a9a 7068 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7069 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7070 paddress (block.begin), paddress (block.end));
9accd112 7071
734b0e4b
MM
7072 buffer_grow_str0 (buffer, "</btrace>\n");
7073 break;
7074
b20a6524
MM
7075 case BTRACE_FORMAT_PT:
7076 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7077 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7078 buffer_grow_str (buffer, "<pt>\n");
7079
7080 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7081
b20a6524
MM
7082 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7083 btrace.variant.pt.size);
7084
7085 buffer_grow_str (buffer, "</pt>\n");
7086 buffer_grow_str0 (buffer, "</btrace>\n");
7087 break;
7088
7089 default:
7090 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7091 return -1;
734b0e4b 7092 }
969c39fb
MM
7093
7094 return 0;
9accd112 7095}
f4abbc16
MM
7096
7097/* See to_btrace_conf target method. */
7098
79597bdd
TBA
7099int
7100linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7101 buffer *buffer)
f4abbc16
MM
7102{
7103 const struct btrace_config *conf;
7104
7105 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7106 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7107
7108 conf = linux_btrace_conf (tinfo);
7109 if (conf != NULL)
7110 {
7111 switch (conf->format)
7112 {
7113 case BTRACE_FORMAT_NONE:
7114 break;
7115
7116 case BTRACE_FORMAT_BTS:
d33501a5
MM
7117 buffer_xml_printf (buffer, "<bts");
7118 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7119 buffer_xml_printf (buffer, " />\n");
f4abbc16 7120 break;
b20a6524
MM
7121
7122 case BTRACE_FORMAT_PT:
7123 buffer_xml_printf (buffer, "<pt");
7124 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7125 buffer_xml_printf (buffer, "/>\n");
7126 break;
f4abbc16
MM
7127 }
7128 }
7129
7130 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7131 return 0;
7132}
9accd112
MM
7133#endif /* HAVE_LINUX_BTRACE */
7134
7b669087
GB
7135/* See nat/linux-nat.h. */
7136
7137ptid_t
7138current_lwp_ptid (void)
7139{
7140 return ptid_of (current_thread);
7141}
7142
7f63b89b
TBA
7143const char *
7144linux_process_target::thread_name (ptid_t thread)
7145{
7146 return linux_proc_tid_get_name (thread);
7147}
7148
7149#if USE_THREAD_DB
7150bool
7151linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7152 int *handle_len)
7153{
7154 return thread_db_thread_handle (ptid, handle, handle_len);
7155}
7156#endif
7157
276d4552
YQ
7158/* Default implementation of linux_target_ops method "set_pc" for
7159 32-bit pc register which is literally named "pc". */
7160
7161void
7162linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7163{
7164 uint32_t newpc = pc;
7165
7166 supply_register_by_name (regcache, "pc", &newpc);
7167}
7168
7169/* Default implementation of linux_target_ops method "get_pc" for
7170 32-bit pc register which is literally named "pc". */
7171
7172CORE_ADDR
7173linux_get_pc_32bit (struct regcache *regcache)
7174{
7175 uint32_t pc;
7176
7177 collect_register_by_name (regcache, "pc", &pc);
7178 if (debug_threads)
7179 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7180 return pc;
7181}
7182
6f69e520
YQ
7183/* Default implementation of linux_target_ops method "set_pc" for
7184 64-bit pc register which is literally named "pc". */
7185
7186void
7187linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7188{
7189 uint64_t newpc = pc;
7190
7191 supply_register_by_name (regcache, "pc", &newpc);
7192}
7193
7194/* Default implementation of linux_target_ops method "get_pc" for
7195 64-bit pc register which is literally named "pc". */
7196
7197CORE_ADDR
7198linux_get_pc_64bit (struct regcache *regcache)
7199{
7200 uint64_t pc;
7201
7202 collect_register_by_name (regcache, "pc", &pc);
7203 if (debug_threads)
7204 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7205 return pc;
7206}
7207
0570503d 7208/* See linux-low.h. */
974c89e0 7209
0570503d
PFC
7210int
7211linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7212{
7213 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7214 int offset = 0;
7215
7216 gdb_assert (wordsize == 4 || wordsize == 8);
7217
52405d85 7218 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7219 {
7220 if (wordsize == 4)
7221 {
0570503d 7222 uint32_t *data_p = (uint32_t *) data;
974c89e0 7223 if (data_p[0] == match)
0570503d
PFC
7224 {
7225 *valp = data_p[1];
7226 return 1;
7227 }
974c89e0
AH
7228 }
7229 else
7230 {
0570503d 7231 uint64_t *data_p = (uint64_t *) data;
974c89e0 7232 if (data_p[0] == match)
0570503d
PFC
7233 {
7234 *valp = data_p[1];
7235 return 1;
7236 }
974c89e0
AH
7237 }
7238
7239 offset += 2 * wordsize;
7240 }
7241
7242 return 0;
7243}
7244
7245/* See linux-low.h. */
7246
7247CORE_ADDR
7248linux_get_hwcap (int wordsize)
7249{
0570503d
PFC
7250 CORE_ADDR hwcap = 0;
7251 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7252 return hwcap;
974c89e0
AH
7253}
7254
7255/* See linux-low.h. */
7256
7257CORE_ADDR
7258linux_get_hwcap2 (int wordsize)
7259{
0570503d
PFC
7260 CORE_ADDR hwcap2 = 0;
7261 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7262 return hwcap2;
974c89e0 7263}
6f69e520 7264
3aee8918
PA
7265#ifdef HAVE_LINUX_REGSETS
7266void
7267initialize_regsets_info (struct regsets_info *info)
7268{
7269 for (info->num_regsets = 0;
7270 info->regsets[info->num_regsets].size >= 0;
7271 info->num_regsets++)
7272 ;
3aee8918
PA
7273}
7274#endif
7275
da6d8c04
DJ
7276void
7277initialize_low (void)
7278{
bd99dc85 7279 struct sigaction sigchld_action;
dd373349 7280
bd99dc85 7281 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7282 set_target_ops (the_linux_target);
dd373349 7283
aa7c7447 7284 linux_ptrace_init_warnings ();
1b919490 7285 linux_proc_init_warnings ();
bd99dc85
PA
7286
7287 sigchld_action.sa_handler = sigchld_handler;
7288 sigemptyset (&sigchld_action.sa_mask);
7289 sigchld_action.sa_flags = SA_RESTART;
7290 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7291
7292 initialize_low_arch ();
89245bc0
DB
7293
7294 linux_check_ptrace_features ();
da6d8c04 7295}
This page took 2.139994 seconds and 4 git commands to generate.