gdbserver/linux-low: turn 'emit_ops' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
b811d2c2 2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
69f4c9cc
AH
74#ifndef AT_HWCAP2
75#define AT_HWCAP2 26
76#endif
77
db0dfaa0
LM
78/* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81#if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84#if defined(__mcoldfire__)
85/* These are still undefined in 3.10 kernels. */
86#define PT_TEXT_ADDR 49*4
87#define PT_DATA_ADDR 50*4
88#define PT_TEXT_END_ADDR 51*4
89/* BFIN already defines these since at least 2.6.32 kernels. */
90#elif defined(BFIN)
91#define PT_TEXT_ADDR 220
92#define PT_TEXT_END_ADDR 224
93#define PT_DATA_ADDR 228
94/* These are still undefined in 3.10 kernels. */
95#elif defined(__TMS320C6X__)
96#define PT_TEXT_ADDR (0x10000*4)
97#define PT_DATA_ADDR (0x10004*4)
98#define PT_TEXT_END_ADDR (0x10008*4)
99#endif
100#endif
101
5203ae1e
TBA
102#if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107#define SUPPORTS_READ_OFFSETS
108#endif
109
9accd112 110#ifdef HAVE_LINUX_BTRACE
125f8a3d 111# include "nat/linux-btrace.h"
268a13a5 112# include "gdbsupport/btrace-common.h"
9accd112
MM
113#endif
114
8365dcf5
TJB
115#ifndef HAVE_ELF32_AUXV_T
116/* Copied from glibc's elf.h. */
117typedef struct
118{
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127} Elf32_auxv_t;
128#endif
129
130#ifndef HAVE_ELF64_AUXV_T
131/* Copied from glibc's elf.h. */
132typedef struct
133{
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142} Elf64_auxv_t;
143#endif
144
ded48a5e
YQ
145/* Does the current host support PTRACE_GETREGSET? */
146int have_ptrace_getregset = -1;
147
cff068da
GB
148/* LWP accessors. */
149
150/* See nat/linux-nat.h. */
151
152ptid_t
153ptid_of_lwp (struct lwp_info *lwp)
154{
155 return ptid_of (get_lwp_thread (lwp));
156}
157
158/* See nat/linux-nat.h. */
159
4b134ca1
GB
160void
161lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163{
164 lwp->arch_private = info;
165}
166
167/* See nat/linux-nat.h. */
168
169struct arch_lwp_info *
170lwp_arch_private_info (struct lwp_info *lwp)
171{
172 return lwp->arch_private;
173}
174
175/* See nat/linux-nat.h. */
176
cff068da
GB
177int
178lwp_is_stopped (struct lwp_info *lwp)
179{
180 return lwp->stopped;
181}
182
183/* See nat/linux-nat.h. */
184
185enum target_stop_reason
186lwp_stop_reason (struct lwp_info *lwp)
187{
188 return lwp->stop_reason;
189}
190
0e00e962
AA
191/* See nat/linux-nat.h. */
192
193int
194lwp_is_stepping (struct lwp_info *lwp)
195{
196 return lwp->stepping;
197}
198
05044653
PA
199/* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
24a09b5f 202
05044653
PA
203struct simple_pid_list
204{
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
216/* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219static void
220add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221{
8d749320 222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228}
229
230static int
231pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232{
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246}
24a09b5f 247
bde24c0a
PA
248enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260/* This is set while stop_all_lwps is in effect. */
261enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
262
263/* FIXME make into a target method? */
24a09b5f 264int using_threads = 1;
24a09b5f 265
fa593d66
PA
266/* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268static int stabilizing_threads;
269
f50bf8e5 270static void unsuspend_all_lwps (struct lwp_info *except);
95954743 271static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 272static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 273static int finish_step_over (struct lwp_info *lwp);
d50171e4 274static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 275static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 276static int linux_low_ptrace_options (int attached);
ced2dffb 277static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 278
582511be
PA
279/* When the event-loop is doing a step-over, this points at the thread
280 being stepped. */
281ptid_t step_over_bkpt;
282
7d00775e 283/* True if the low target can hardware single-step. */
d50171e4
PA
284
285static int
286can_hardware_single_step (void)
287{
7d00775e
AT
288 if (the_low_target.supports_hardware_single_step != NULL)
289 return the_low_target.supports_hardware_single_step ();
290 else
291 return 0;
292}
293
bf9ae9d8
TBA
294bool
295linux_process_target::low_supports_breakpoints ()
296{
297 return false;
298}
d50171e4 299
bf9ae9d8
TBA
300CORE_ADDR
301linux_process_target::low_get_pc (regcache *regcache)
302{
303 return 0;
304}
305
306void
307linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 308{
bf9ae9d8 309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 310}
0d62e5e8 311
7582c77c
TBA
312std::vector<CORE_ADDR>
313linux_process_target::low_get_next_pcs (regcache *regcache)
314{
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
317}
318
d4807ea2
TBA
319int
320linux_process_target::low_decr_pc_after_break ()
321{
322 return 0;
323}
324
c2d6af84
PA
325/* True if LWP is stopped in its stepping range. */
326
327static int
328lwp_in_step_range (struct lwp_info *lwp)
329{
330 CORE_ADDR pc = lwp->stop_pc;
331
332 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
333}
334
0d62e5e8
DJ
335struct pending_signals
336{
337 int signal;
32ca6d61 338 siginfo_t info;
0d62e5e8
DJ
339 struct pending_signals *prev;
340};
611cb4a5 341
bd99dc85
PA
342/* The read/write ends of the pipe registered as waitable file in the
343 event loop. */
344static int linux_event_pipe[2] = { -1, -1 };
345
346/* True if we're currently in async mode. */
347#define target_is_async_p() (linux_event_pipe[0] != -1)
348
02fc4de7 349static void send_sigstop (struct lwp_info *lwp);
bd99dc85 350
d0722149
DE
351/* Return non-zero if HEADER is a 64-bit ELF file. */
352
353static int
214d508e 354elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 355{
214d508e
L
356 if (header->e_ident[EI_MAG0] == ELFMAG0
357 && header->e_ident[EI_MAG1] == ELFMAG1
358 && header->e_ident[EI_MAG2] == ELFMAG2
359 && header->e_ident[EI_MAG3] == ELFMAG3)
360 {
361 *machine = header->e_machine;
362 return header->e_ident[EI_CLASS] == ELFCLASS64;
363
364 }
365 *machine = EM_NONE;
366 return -1;
d0722149
DE
367}
368
369/* Return non-zero if FILE is a 64-bit ELF file,
370 zero if the file is not a 64-bit ELF file,
371 and -1 if the file is not accessible or doesn't exist. */
372
be07f1a2 373static int
214d508e 374elf_64_file_p (const char *file, unsigned int *machine)
d0722149 375{
957f3f49 376 Elf64_Ehdr header;
d0722149
DE
377 int fd;
378
379 fd = open (file, O_RDONLY);
380 if (fd < 0)
381 return -1;
382
383 if (read (fd, &header, sizeof (header)) != sizeof (header))
384 {
385 close (fd);
386 return 0;
387 }
388 close (fd);
389
214d508e 390 return elf_64_header_p (&header, machine);
d0722149
DE
391}
392
be07f1a2
PA
393/* Accepts an integer PID; Returns true if the executable PID is
394 running is a 64-bit ELF file.. */
395
396int
214d508e 397linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 398{
d8d2a3ee 399 char file[PATH_MAX];
be07f1a2
PA
400
401 sprintf (file, "/proc/%d/exe", pid);
214d508e 402 return elf_64_file_p (file, machine);
be07f1a2
PA
403}
404
fd000fb3
TBA
405void
406linux_process_target::delete_lwp (lwp_info *lwp)
bd99dc85 407{
fa96cb38
PA
408 struct thread_info *thr = get_lwp_thread (lwp);
409
410 if (debug_threads)
411 debug_printf ("deleting %ld\n", lwpid_of (thr));
412
413 remove_thread (thr);
466eecee 414
fd000fb3 415 low_delete_thread (lwp->arch_private);
466eecee 416
bd99dc85
PA
417 free (lwp);
418}
419
fd000fb3
TBA
420void
421linux_process_target::low_delete_thread (arch_lwp_info *info)
422{
423 /* Default implementation should be overridden if architecture-specific
424 info is being used. */
425 gdb_assert (info == nullptr);
426}
95954743 427
fd000fb3
TBA
428process_info *
429linux_process_target::add_linux_process (int pid, int attached)
95954743
PA
430{
431 struct process_info *proc;
432
95954743 433 proc = add_process (pid, attached);
8d749320 434 proc->priv = XCNEW (struct process_info_private);
95954743 435
fd000fb3 436 proc->priv->arch_private = low_new_process ();
aa5ca48f 437
95954743
PA
438 return proc;
439}
440
fd000fb3
TBA
441arch_process_info *
442linux_process_target::low_new_process ()
443{
444 return nullptr;
445}
446
447void
448linux_process_target::low_delete_process (arch_process_info *info)
449{
450 /* Default implementation must be overridden if architecture-specific
451 info exists. */
452 gdb_assert (info == nullptr);
453}
454
455void
456linux_process_target::low_new_fork (process_info *parent, process_info *child)
457{
458 /* Nop. */
459}
460
797bcff5
TBA
461void
462linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
463{
464 struct thread_info *saved_thread;
465
466 saved_thread = current_thread;
467 current_thread = thread;
468
797bcff5 469 low_arch_setup ();
94585166
DB
470
471 current_thread = saved_thread;
472}
473
d16f3f6c
TBA
474int
475linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
476 int wstat)
24a09b5f 477{
c12a5089 478 client_state &cs = get_client_state ();
94585166 479 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 480 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 481 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 482 struct lwp_info *new_lwp;
24a09b5f 483
65706a29
PA
484 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
485
82075af2
JS
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
490
c269dbdb
DB
491 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
492 || (event == PTRACE_EVENT_CLONE))
24a09b5f 493 {
95954743 494 ptid_t ptid;
24a09b5f 495 unsigned long new_pid;
05044653 496 int ret, status;
24a09b5f 497
de0d863e 498 /* Get the pid of the new lwp. */
d86d4aaf 499 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 500 &new_pid);
24a09b5f
DJ
501
502 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 503 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
504 {
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
507
97438e3f 508 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
509
510 if (ret == -1)
511 perror_with_name ("waiting for new child");
512 else if (ret != new_pid)
513 warning ("wait returned unexpected PID %d", ret);
da5898ce 514 else if (!WIFSTOPPED (status))
24a09b5f
DJ
515 warning ("wait returned unexpected status 0x%x", status);
516 }
517
c269dbdb 518 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
519 {
520 struct process_info *parent_proc;
521 struct process_info *child_proc;
522 struct lwp_info *child_lwp;
bfacd19d 523 struct thread_info *child_thr;
de0d863e
DB
524 struct target_desc *tdesc;
525
fd79271b 526 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
527
528 if (debug_threads)
529 {
530 debug_printf ("HEW: Got fork event from LWP %ld, "
531 "new child is %d\n",
e38504b3 532 ptid_of (event_thr).lwp (),
e99b03dc 533 ptid.pid ());
de0d863e
DB
534 }
535
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
fd000fb3 541 child_proc = add_linux_process (new_pid, 0);
de0d863e
DB
542 gdb_assert (child_proc != NULL);
543 child_lwp = add_lwp (ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
bfacd19d
DB
546 child_lwp->must_set_ptrace_flags = 1;
547 child_lwp->status_pending_p = 0;
548 child_thr = get_lwp_thread (child_lwp);
549 child_thr->last_resume_kind = resume_stop;
998d452a
PA
550 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
551
863d01bd 552 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
863d01bd
PA
558 {
559 if (debug_threads)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp->suspended = 1;
562 }
563
de0d863e
DB
564 parent_proc = get_thread_process (event_thr);
565 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
566
567 if (event_lwp->bp_reinsert != 0
7582c77c 568 && supports_software_single_step ()
2e7b624b
YQ
569 && event == PTRACE_EVENT_VFORK)
570 {
3b9a79ef
YQ
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
3b9a79ef 575 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
576 }
577
63c40ec7 578 clone_all_breakpoints (child_thr, event_thr);
de0d863e 579
cc397f3a 580 tdesc = allocate_target_description ();
de0d863e
DB
581 copy_target_description (tdesc, parent_proc->tdesc);
582 child_proc->tdesc = tdesc;
de0d863e 583
3a8a0396 584 /* Clone arch-specific process data. */
fd000fb3 585 low_new_fork (parent_proc, child_proc);
3a8a0396 586
de0d863e 587 /* Save fork info in the parent thread. */
c269dbdb
DB
588 if (event == PTRACE_EVENT_FORK)
589 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
590 else if (event == PTRACE_EVENT_VFORK)
591 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
592
de0d863e 593 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 594
de0d863e
DB
595 /* The status_pending field contains bits denoting the
596 extended event, so when the pending event is handled,
597 the handler will look at lwp->waitstatus. */
598 event_lwp->status_pending_p = 1;
599 event_lwp->status_pending = wstat;
600
5a04c4cf
PA
601 /* Link the threads until the parent event is passed on to
602 higher layers. */
603 event_lwp->fork_relative = child_lwp;
604 child_lwp->fork_relative = event_lwp;
605
3b9a79ef
YQ
606 /* If the parent thread is doing step-over with single-step
607 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
608 from the parent's. Remove them from the child process.
609 In case of vfork, we'll reinsert them back once vforked
610 child is done. */
8a81c5d7 611 if (event_lwp->bp_reinsert != 0
7582c77c 612 && supports_software_single_step ())
8a81c5d7 613 {
8a81c5d7
YQ
614 /* The child process is forked and stopped, so it is safe
615 to access its memory without stopping all other threads
616 from other processes. */
3b9a79ef 617 delete_single_step_breakpoints (child_thr);
8a81c5d7 618
3b9a79ef
YQ
619 gdb_assert (has_single_step_breakpoints (event_thr));
620 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
621 }
622
de0d863e
DB
623 /* Report the event. */
624 return 0;
625 }
626
fa96cb38
PA
627 if (debug_threads)
628 debug_printf ("HEW: Got clone event "
629 "from LWP %ld, new child is LWP %ld\n",
630 lwpid_of (event_thr), new_pid);
631
fd79271b 632 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 633 new_lwp = add_lwp (ptid);
24a09b5f 634
e27d73f6 635 /* Either we're going to immediately resume the new thread
df95181f 636 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 637 thinks the thread is currently running, so set this first
df95181f 638 before calling resume_one_lwp. */
e27d73f6
DE
639 new_lwp->stopped = 1;
640
0f8288ae
YQ
641 /* If we're suspending all threads, leave this one suspended
642 too. If the fork/clone parent is stepping over a breakpoint,
643 all other threads have been suspended already. Leave the
644 child suspended too. */
645 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
646 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
647 new_lwp->suspended = 1;
648
da5898ce
DJ
649 /* Normally we will get the pending SIGSTOP. But in some cases
650 we might get another signal delivered to the group first.
f21cc1a2 651 If we do get another signal, be sure not to lose it. */
20ba1ce6 652 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 653 {
54a0b537 654 new_lwp->stop_expected = 1;
20ba1ce6
PA
655 new_lwp->status_pending_p = 1;
656 new_lwp->status_pending = status;
da5898ce 657 }
c12a5089 658 else if (cs.report_thread_events)
65706a29
PA
659 {
660 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
661 new_lwp->status_pending_p = 1;
662 new_lwp->status_pending = status;
663 }
de0d863e 664
a0aad537 665#ifdef USE_THREAD_DB
94c207e0 666 thread_db_notice_clone (event_thr, ptid);
a0aad537 667#endif
86299109 668
de0d863e
DB
669 /* Don't report the event. */
670 return 1;
24a09b5f 671 }
c269dbdb
DB
672 else if (event == PTRACE_EVENT_VFORK_DONE)
673 {
674 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
675
7582c77c 676 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
2e7b624b 677 {
3b9a79ef 678 reinsert_single_step_breakpoints (event_thr);
2e7b624b 679
3b9a79ef 680 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
681 }
682
c269dbdb
DB
683 /* Report the event. */
684 return 0;
685 }
c12a5089 686 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
687 {
688 struct process_info *proc;
f27866ba 689 std::vector<int> syscalls_to_catch;
94585166
DB
690 ptid_t event_ptid;
691 pid_t event_pid;
692
693 if (debug_threads)
694 {
695 debug_printf ("HEW: Got exec event from LWP %ld\n",
696 lwpid_of (event_thr));
697 }
698
699 /* Get the event ptid. */
700 event_ptid = ptid_of (event_thr);
e99b03dc 701 event_pid = event_ptid.pid ();
94585166 702
82075af2 703 /* Save the syscall list from the execing process. */
94585166 704 proc = get_thread_process (event_thr);
f27866ba 705 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
706
707 /* Delete the execing process and all its threads. */
d16f3f6c 708 mourn (proc);
94585166
DB
709 current_thread = NULL;
710
711 /* Create a new process/lwp/thread. */
fd000fb3 712 proc = add_linux_process (event_pid, 0);
94585166
DB
713 event_lwp = add_lwp (event_ptid);
714 event_thr = get_lwp_thread (event_lwp);
715 gdb_assert (current_thread == event_thr);
797bcff5 716 arch_setup_thread (event_thr);
94585166
DB
717
718 /* Set the event status. */
719 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
720 event_lwp->waitstatus.value.execd_pathname
721 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
722
723 /* Mark the exec status as pending. */
724 event_lwp->stopped = 1;
725 event_lwp->status_pending_p = 1;
726 event_lwp->status_pending = wstat;
727 event_thr->last_resume_kind = resume_continue;
728 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
729
82075af2
JS
730 /* Update syscall state in the new lwp, effectively mid-syscall too. */
731 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
732
733 /* Restore the list to catch. Don't rely on the client, which is free
734 to avoid sending a new list when the architecture doesn't change.
735 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 736 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 737
94585166
DB
738 /* Report the event. */
739 *orig_event_lwp = event_lwp;
740 return 0;
741 }
de0d863e
DB
742
743 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
744}
745
df95181f
TBA
746CORE_ADDR
747linux_process_target::get_pc (lwp_info *lwp)
d50171e4 748{
0bfdf32f 749 struct thread_info *saved_thread;
d50171e4
PA
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
bf9ae9d8 753 if (!low_supports_breakpoints ())
d50171e4
PA
754 return 0;
755
0bfdf32f
GB
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
d50171e4 758
0bfdf32f 759 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 760 pc = low_get_pc (regcache);
d50171e4
PA
761
762 if (debug_threads)
87ce2a04 763 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 764
0bfdf32f 765 current_thread = saved_thread;
d50171e4
PA
766 return pc;
767}
768
82075af2 769/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 770 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
771
772static void
4cc32bec 773get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
774{
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 781 system call number. */
82075af2 782 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
791
792 if (debug_threads)
4cc32bec 793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
794
795 current_thread = saved_thread;
796}
797
df95181f
TBA
798bool
799linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 800{
582511be
PA
801 CORE_ADDR pc;
802 CORE_ADDR sw_breakpoint_pc;
803 struct thread_info *saved_thread;
3e572f71
PA
804#if USE_SIGTRAP_SIGINFO
805 siginfo_t siginfo;
806#endif
d50171e4 807
bf9ae9d8 808 if (!low_supports_breakpoints ())
df95181f 809 return false;
0d62e5e8 810
582511be 811 pc = get_pc (lwp);
d4807ea2 812 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
d50171e4 813
582511be
PA
814 /* breakpoint_at reads from the current thread. */
815 saved_thread = current_thread;
816 current_thread = get_lwp_thread (lwp);
47c0c975 817
3e572f71
PA
818#if USE_SIGTRAP_SIGINFO
819 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
820 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
821 {
822 if (siginfo.si_signo == SIGTRAP)
823 {
e7ad2f14
PA
824 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
825 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 826 {
e7ad2f14
PA
827 /* The si_code is ambiguous on this arch -- check debug
828 registers. */
829 if (!check_stopped_by_watchpoint (lwp))
830 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
831 }
832 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
833 {
834 /* If we determine the LWP stopped for a SW breakpoint,
835 trust it. Particularly don't check watchpoint
836 registers, because at least on s390, we'd find
837 stopped-by-watchpoint as long as there's a watchpoint
838 set. */
3e572f71 839 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 840 }
e7ad2f14 841 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 842 {
e7ad2f14
PA
843 /* This can indicate either a hardware breakpoint or
844 hardware watchpoint. Check debug registers. */
845 if (!check_stopped_by_watchpoint (lwp))
846 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 847 }
2bf6fb9d
PA
848 else if (siginfo.si_code == TRAP_TRACE)
849 {
e7ad2f14
PA
850 /* We may have single stepped an instruction that
851 triggered a watchpoint. In that case, on some
852 architectures (such as x86), instead of TRAP_HWBKPT,
853 si_code indicates TRAP_TRACE, and we need to check
854 the debug registers separately. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 857 }
3e572f71
PA
858 }
859 }
860#else
582511be
PA
861 /* We may have just stepped a breakpoint instruction. E.g., in
862 non-stop mode, GDB first tells the thread A to step a range, and
863 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
864 case we need to report the breakpoint PC. */
865 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
d7146cda 866 && low_breakpoint_at (sw_breakpoint_pc))
e7ad2f14
PA
867 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
868
869 if (hardware_breakpoint_inserted_here (pc))
870 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
871
872 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
873 check_stopped_by_watchpoint (lwp);
874#endif
875
876 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
877 {
878 if (debug_threads)
879 {
880 struct thread_info *thr = get_lwp_thread (lwp);
881
882 debug_printf ("CSBB: %s stopped by software breakpoint\n",
883 target_pid_to_str (ptid_of (thr)));
884 }
885
886 /* Back up the PC if necessary. */
887 if (pc != sw_breakpoint_pc)
e7ad2f14 888 {
582511be
PA
889 struct regcache *regcache
890 = get_thread_regcache (current_thread, 1);
bf9ae9d8 891 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
892 }
893
e7ad2f14
PA
894 /* Update this so we record the correct stop PC below. */
895 pc = sw_breakpoint_pc;
582511be 896 }
e7ad2f14 897 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
898 {
899 if (debug_threads)
900 {
901 struct thread_info *thr = get_lwp_thread (lwp);
902
903 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
904 target_pid_to_str (ptid_of (thr)));
905 }
e7ad2f14
PA
906 }
907 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
908 {
909 if (debug_threads)
910 {
911 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 912
e7ad2f14
PA
913 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
914 target_pid_to_str (ptid_of (thr)));
915 }
582511be 916 }
e7ad2f14
PA
917 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
918 {
919 if (debug_threads)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
582511be 922
e7ad2f14
PA
923 debug_printf ("CSBB: %s stopped by trace\n",
924 target_pid_to_str (ptid_of (thr)));
925 }
926 }
927
928 lwp->stop_pc = pc;
582511be 929 current_thread = saved_thread;
df95181f 930 return true;
0d62e5e8 931}
ce3a066d 932
fd000fb3
TBA
933lwp_info *
934linux_process_target::add_lwp (ptid_t ptid)
611cb4a5 935{
54a0b537 936 struct lwp_info *lwp;
0d62e5e8 937
8d749320 938 lwp = XCNEW (struct lwp_info);
00db26fa
PA
939
940 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 941
754e3168
AH
942 lwp->thread = add_thread (ptid, lwp);
943
fd000fb3 944 low_new_thread (lwp);
aa5ca48f 945
54a0b537 946 return lwp;
0d62e5e8 947}
611cb4a5 948
fd000fb3
TBA
949void
950linux_process_target::low_new_thread (lwp_info *info)
951{
952 /* Nop. */
953}
954
2090129c
SDJ
955/* Callback to be used when calling fork_inferior, responsible for
956 actually initiating the tracing of the inferior. */
957
958static void
959linux_ptrace_fun ()
960{
961 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
962 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 963 trace_start_error_with_name ("ptrace");
2090129c
SDJ
964
965 if (setpgid (0, 0) < 0)
966 trace_start_error_with_name ("setpgid");
967
968 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
969 stdout to stderr so that inferior i/o doesn't corrupt the connection.
970 Also, redirect stdin to /dev/null. */
971 if (remote_connection_is_stdio ())
972 {
973 if (close (0) < 0)
974 trace_start_error_with_name ("close");
975 if (open ("/dev/null", O_RDONLY) < 0)
976 trace_start_error_with_name ("open");
977 if (dup2 (2, 1) < 0)
978 trace_start_error_with_name ("dup2");
979 if (write (2, "stdin/stdout redirected\n",
980 sizeof ("stdin/stdout redirected\n") - 1) < 0)
981 {
982 /* Errors ignored. */;
983 }
984 }
985}
986
da6d8c04 987/* Start an inferior process and returns its pid.
2090129c
SDJ
988 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
989 are its arguments. */
da6d8c04 990
15295543
TBA
991int
992linux_process_target::create_inferior (const char *program,
993 const std::vector<char *> &program_args)
da6d8c04 994{
c12a5089 995 client_state &cs = get_client_state ();
a6dbe5df 996 struct lwp_info *new_lwp;
da6d8c04 997 int pid;
95954743 998 ptid_t ptid;
03583c20 999
41272101
TT
1000 {
1001 maybe_disable_address_space_randomization restore_personality
c12a5089 1002 (cs.disable_randomization);
41272101
TT
1003 std::string str_program_args = stringify_argv (program_args);
1004
1005 pid = fork_inferior (program,
1006 str_program_args.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun,
1008 NULL, NULL, NULL, NULL);
1009 }
03583c20 1010
fd000fb3 1011 add_linux_process (pid, 0);
95954743 1012
fd79271b 1013 ptid = ptid_t (pid, pid, 0);
95954743 1014 new_lwp = add_lwp (ptid);
a6dbe5df 1015 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1016
2090129c
SDJ
1017 post_fork_inferior (pid, program);
1018
a9fa9f7d 1019 return pid;
da6d8c04
DJ
1020}
1021
ece66d65
JS
1022/* Implement the post_create_inferior target_ops method. */
1023
6dee9afb
TBA
1024void
1025linux_process_target::post_create_inferior ()
ece66d65
JS
1026{
1027 struct lwp_info *lwp = get_thread_lwp (current_thread);
1028
797bcff5 1029 low_arch_setup ();
ece66d65
JS
1030
1031 if (lwp->must_set_ptrace_flags)
1032 {
1033 struct process_info *proc = current_process ();
1034 int options = linux_low_ptrace_options (proc->attached);
1035
1036 linux_enable_event_reporting (lwpid_of (current_thread), options);
1037 lwp->must_set_ptrace_flags = 0;
1038 }
1039}
1040
7ae1a6a6 1041int
fd000fb3 1042linux_process_target::attach_lwp (ptid_t ptid)
da6d8c04 1043{
54a0b537 1044 struct lwp_info *new_lwp;
e38504b3 1045 int lwpid = ptid.lwp ();
611cb4a5 1046
b8e1b30e 1047 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1048 != 0)
7ae1a6a6 1049 return errno;
24a09b5f 1050
b3312d80 1051 new_lwp = add_lwp (ptid);
0d62e5e8 1052
a6dbe5df
PA
1053 /* We need to wait for SIGSTOP before being able to make the next
1054 ptrace call on this LWP. */
1055 new_lwp->must_set_ptrace_flags = 1;
1056
644cebc9 1057 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1058 {
1059 if (debug_threads)
87ce2a04 1060 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1061
1062 /* The process is definitely stopped. It is in a job control
1063 stop, unless the kernel predates the TASK_STOPPED /
1064 TASK_TRACED distinction, in which case it might be in a
1065 ptrace stop. Make sure it is in a ptrace stop; from there we
1066 can kill it, signal it, et cetera.
1067
1068 First make sure there is a pending SIGSTOP. Since we are
1069 already attached, the process can not transition from stopped
1070 to running without a PTRACE_CONT; so we know this signal will
1071 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1072 probably already in the queue (unless this kernel is old
1073 enough to use TASK_STOPPED for ptrace stops); but since
1074 SIGSTOP is not an RT signal, it can only be queued once. */
1075 kill_lwp (lwpid, SIGSTOP);
1076
1077 /* Finally, resume the stopped process. This will deliver the
1078 SIGSTOP (or a higher priority signal, just like normal
1079 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1080 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1081 }
1082
0d62e5e8 1083 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1084 brings it to a halt.
1085
1086 There are several cases to consider here:
1087
1088 1) gdbserver has already attached to the process and is being notified
1b3f6016 1089 of a new thread that is being created.
d50171e4
PA
1090 In this case we should ignore that SIGSTOP and resume the
1091 process. This is handled below by setting stop_expected = 1,
8336d594 1092 and the fact that add_thread sets last_resume_kind ==
d50171e4 1093 resume_continue.
0e21c1ec
DE
1094
1095 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1096 to it via attach_inferior.
1097 In this case we want the process thread to stop.
d50171e4
PA
1098 This is handled by having linux_attach set last_resume_kind ==
1099 resume_stop after we return.
e3deef73
LM
1100
1101 If the pid we are attaching to is also the tgid, we attach to and
1102 stop all the existing threads. Otherwise, we attach to pid and
1103 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1104
1105 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1106 existing threads.
1107 In this case we want the thread to stop.
1108 FIXME: This case is currently not properly handled.
1109 We should wait for the SIGSTOP but don't. Things work apparently
1110 because enough time passes between when we ptrace (ATTACH) and when
1111 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1112
1113 On the other hand, if we are currently trying to stop all threads, we
1114 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1115 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1116 end of the list, and so the new thread has not yet reached
1117 wait_for_sigstop (but will). */
d50171e4 1118 new_lwp->stop_expected = 1;
0d62e5e8 1119
7ae1a6a6 1120 return 0;
95954743
PA
1121}
1122
8784d563
PA
1123/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1124 already attached. Returns true if a new LWP is found, false
1125 otherwise. */
1126
1127static int
1128attach_proc_task_lwp_callback (ptid_t ptid)
1129{
1130 /* Is this a new thread? */
1131 if (find_thread_ptid (ptid) == NULL)
1132 {
e38504b3 1133 int lwpid = ptid.lwp ();
8784d563
PA
1134 int err;
1135
1136 if (debug_threads)
1137 debug_printf ("Found new lwp %d\n", lwpid);
1138
fd000fb3 1139 err = the_linux_target->attach_lwp (ptid);
8784d563
PA
1140
1141 /* Be quiet if we simply raced with the thread exiting. EPERM
1142 is returned if the thread's task still exists, and is marked
1143 as exited or zombie, as well as other conditions, so in that
1144 case, confirm the status in /proc/PID/status. */
1145 if (err == ESRCH
1146 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1147 {
1148 if (debug_threads)
1149 {
1150 debug_printf ("Cannot attach to lwp %d: "
1151 "thread is gone (%d: %s)\n",
6d91ce9a 1152 lwpid, err, safe_strerror (err));
8784d563
PA
1153 }
1154 }
1155 else if (err != 0)
1156 {
4d9b86e1 1157 std::string reason
50fa3001 1158 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1159
1160 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1161 }
1162
1163 return 1;
1164 }
1165 return 0;
1166}
1167
500c1d85
PA
1168static void async_file_mark (void);
1169
e3deef73
LM
1170/* Attach to PID. If PID is the tgid, attach to it and all
1171 of its threads. */
1172
ef03dad8
TBA
1173int
1174linux_process_target::attach (unsigned long pid)
0d62e5e8 1175{
500c1d85
PA
1176 struct process_info *proc;
1177 struct thread_info *initial_thread;
fd79271b 1178 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1179 int err;
1180
fd000fb3 1181 proc = add_linux_process (pid, 1);
df0da8a2 1182
e3deef73
LM
1183 /* Attach to PID. We will check for other threads
1184 soon. */
fd000fb3 1185 err = attach_lwp (ptid);
7ae1a6a6 1186 if (err != 0)
4d9b86e1 1187 {
df0da8a2 1188 remove_process (proc);
4d9b86e1 1189
50fa3001
SDJ
1190 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1191 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1192 }
7ae1a6a6 1193
500c1d85
PA
1194 /* Don't ignore the initial SIGSTOP if we just attached to this
1195 process. It will be collected by wait shortly. */
fd79271b 1196 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1197 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1198
8784d563
PA
1199 /* We must attach to every LWP. If /proc is mounted, use that to
1200 find them now. On the one hand, the inferior may be using raw
1201 clone instead of using pthreads. On the other hand, even if it
1202 is using pthreads, GDB may not be connected yet (thread_db needs
1203 to do symbol lookups, through qSymbol). Also, thread_db walks
1204 structures in the inferior's address space to find the list of
1205 threads/LWPs, and those structures may well be corrupted. Note
1206 that once thread_db is loaded, we'll still use it to list threads
1207 and associate pthread info with each LWP. */
1208 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1209
1210 /* GDB will shortly read the xml target description for this
1211 process, to figure out the process' architecture. But the target
1212 description is only filled in when the first process/thread in
1213 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1214 that now, otherwise, if GDB is fast enough, it could read the
1215 target description _before_ that initial stop. */
1216 if (non_stop)
1217 {
1218 struct lwp_info *lwp;
1219 int wstat, lwpid;
f2907e49 1220 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1221
d16f3f6c 1222 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1223 gdb_assert (lwpid > 0);
1224
f2907e49 1225 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
95954743
PA
1240 return 0;
1241}
1242
95954743 1243static int
e4eb0dec 1244last_thread_of_process_p (int pid)
95954743 1245{
e4eb0dec 1246 bool seen_one = false;
95954743 1247
da4ae14a 1248 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1249 {
e4eb0dec
SM
1250 if (!seen_one)
1251 {
1252 /* This is the first thread of this process we see. */
1253 seen_one = true;
1254 return false;
1255 }
1256 else
1257 {
1258 /* This is the second thread of this process we see. */
1259 return true;
1260 }
1261 });
da6d8c04 1262
e4eb0dec 1263 return thread == NULL;
95954743
PA
1264}
1265
da84f473
PA
1266/* Kill LWP. */
1267
1268static void
1269linux_kill_one_lwp (struct lwp_info *lwp)
1270{
d86d4aaf
DE
1271 struct thread_info *thr = get_lwp_thread (lwp);
1272 int pid = lwpid_of (thr);
da84f473
PA
1273
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
4a6ed09b
PA
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
da84f473
PA
1286
1287 errno = 0;
69ff6be5 1288 kill_lwp (pid, SIGKILL);
da84f473 1289 if (debug_threads)
ce9e3fe7
PA
1290 {
1291 int save_errno = errno;
1292
1293 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1294 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1295 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1296 }
da84f473
PA
1297
1298 errno = 0;
b8e1b30e 1299 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1300 if (debug_threads)
ce9e3fe7
PA
1301 {
1302 int save_errno = errno;
1303
1304 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1306 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1307 }
da84f473
PA
1308}
1309
e76126e8
PA
1310/* Kill LWP and wait for it to die. */
1311
1312static void
1313kill_wait_lwp (struct lwp_info *lwp)
1314{
1315 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1316 int pid = ptid_of (thr).pid ();
e38504b3 1317 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1318 int wstat;
1319 int res;
1320
1321 if (debug_threads)
1322 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1323
1324 do
1325 {
1326 linux_kill_one_lwp (lwp);
1327
1328 /* Make sure it died. Notes:
1329
1330 - The loop is most likely unnecessary.
1331
d16f3f6c 1332 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1333 while we're iterating over them. We're not interested in
1334 any pending status at this point, only in making sure all
1335 wait status on the kernel side are collected until the
1336 process is reaped.
1337
1338 - We don't use __WALL here as the __WALL emulation relies on
1339 SIGCHLD, and killing a stopped process doesn't generate
1340 one, nor an exit status.
1341 */
1342 res = my_waitpid (lwpid, &wstat, 0);
1343 if (res == -1 && errno == ECHILD)
1344 res = my_waitpid (lwpid, &wstat, __WCLONE);
1345 } while (res > 0 && WIFSTOPPED (wstat));
1346
586b02a9
PA
1347 /* Even if it was stopped, the child may have already disappeared.
1348 E.g., if it was killed by SIGKILL. */
1349 if (res < 0 && errno != ECHILD)
1350 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1351}
1352
578290ec 1353/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1354 except the leader. */
95954743 1355
578290ec
SM
1356static void
1357kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1358{
54a0b537 1359 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1360
fd500816
DJ
1361 /* We avoid killing the first thread here, because of a Linux kernel (at
1362 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1363 the children get a chance to be reaped, it will remain a zombie
1364 forever. */
95954743 1365
d86d4aaf 1366 if (lwpid_of (thread) == pid)
95954743
PA
1367 {
1368 if (debug_threads)
87ce2a04 1369 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1370 target_pid_to_str (thread->id));
578290ec 1371 return;
95954743 1372 }
fd500816 1373
e76126e8 1374 kill_wait_lwp (lwp);
da6d8c04
DJ
1375}
1376
c6885a57
TBA
1377int
1378linux_process_target::kill (process_info *process)
0d62e5e8 1379{
a780ef4f 1380 int pid = process->pid;
9d606399 1381
f9e39928
PA
1382 /* If we're killing a running inferior, make sure it is stopped
1383 first, as PTRACE_KILL will not work otherwise. */
7984d532 1384 stop_all_lwps (0, NULL);
f9e39928 1385
578290ec
SM
1386 for_each_thread (pid, [&] (thread_info *thread)
1387 {
1388 kill_one_lwp_callback (thread, pid);
1389 });
fd500816 1390
54a0b537 1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1392 thread in the list, so do so now. */
a780ef4f 1393 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1394
784867a5 1395 if (lwp == NULL)
fd500816 1396 {
784867a5 1397 if (debug_threads)
d86d4aaf
DE
1398 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1399 pid);
784867a5
JK
1400 }
1401 else
e76126e8 1402 kill_wait_lwp (lwp);
2d717e4f 1403
8adb37b9 1404 mourn (process);
f9e39928
PA
1405
1406 /* Since we presently can only stop all lwps of all processes, we
1407 need to unstop lwps of other processes. */
7984d532 1408 unstop_all_lwps (0, NULL);
95954743 1409 return 0;
0d62e5e8
DJ
1410}
1411
9b224c5e
PA
1412/* Get pending signal of THREAD, for detaching purposes. This is the
1413 signal the thread last stopped for, which we need to deliver to the
1414 thread when detaching, otherwise, it'd be suppressed/lost. */
1415
1416static int
1417get_detach_signal (struct thread_info *thread)
1418{
c12a5089 1419 client_state &cs = get_client_state ();
a493e3e2 1420 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1421 int status;
1422 struct lwp_info *lp = get_thread_lwp (thread);
1423
1424 if (lp->status_pending_p)
1425 status = lp->status_pending;
1426 else
1427 {
1428 /* If the thread had been suspended by gdbserver, and it stopped
1429 cleanly, then it'll have stopped with SIGSTOP. But we don't
1430 want to deliver that SIGSTOP. */
1431 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1432 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1433 return 0;
1434
1435 /* Otherwise, we may need to deliver the signal we
1436 intercepted. */
1437 status = lp->last_status;
1438 }
1439
1440 if (!WIFSTOPPED (status))
1441 {
1442 if (debug_threads)
87ce2a04 1443 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1444 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1445 return 0;
1446 }
1447
1448 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1449 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1450 {
1451 if (debug_threads)
87ce2a04
DE
1452 debug_printf ("GPS: lwp %s had stopped with extended "
1453 "status: no pending signal\n",
d86d4aaf 1454 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1455 return 0;
1456 }
1457
2ea28649 1458 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1459
c12a5089 1460 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1461 {
1462 if (debug_threads)
87ce2a04 1463 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1464 target_pid_to_str (ptid_of (thread)),
87ce2a04 1465 gdb_signal_to_string (signo));
9b224c5e
PA
1466 return 0;
1467 }
c12a5089 1468 else if (!cs.program_signals_p
9b224c5e
PA
1469 /* If we have no way to know which signals GDB does not
1470 want to have passed to the program, assume
1471 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1472 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1473 {
1474 if (debug_threads)
87ce2a04
DE
1475 debug_printf ("GPS: lwp %s had signal %s, "
1476 "but we don't know if we should pass it. "
1477 "Default to not.\n",
d86d4aaf 1478 target_pid_to_str (ptid_of (thread)),
87ce2a04 1479 gdb_signal_to_string (signo));
9b224c5e
PA
1480 return 0;
1481 }
1482 else
1483 {
1484 if (debug_threads)
87ce2a04 1485 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1486 target_pid_to_str (ptid_of (thread)),
87ce2a04 1487 gdb_signal_to_string (signo));
9b224c5e
PA
1488
1489 return WSTOPSIG (status);
1490 }
1491}
1492
fd000fb3
TBA
1493void
1494linux_process_target::detach_one_lwp (lwp_info *lwp)
6ad8ae5c 1495{
ced2dffb 1496 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1497 int sig;
ced2dffb 1498 int lwpid;
6ad8ae5c 1499
9b224c5e 1500 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1501 if (lwp->stop_expected)
ae13219e 1502 {
9b224c5e 1503 if (debug_threads)
87ce2a04 1504 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1505 target_pid_to_str (ptid_of (thread)));
9b224c5e 1506
d86d4aaf 1507 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1508 lwp->stop_expected = 0;
ae13219e
DJ
1509 }
1510
9b224c5e
PA
1511 /* Pass on any pending signal for this thread. */
1512 sig = get_detach_signal (thread);
1513
ced2dffb
PA
1514 /* Preparing to resume may try to write registers, and fail if the
1515 lwp is zombie. If that happens, ignore the error. We'll handle
1516 it below, when detach fails with ESRCH. */
a70b8144 1517 try
ced2dffb
PA
1518 {
1519 /* Flush any pending changes to the process's registers. */
1520 regcache_invalidate_thread (thread);
1521
1522 /* Finally, let it resume. */
d7599cc0 1523 low_prepare_to_resume (lwp);
ced2dffb 1524 }
230d2906 1525 catch (const gdb_exception_error &ex)
ced2dffb
PA
1526 {
1527 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1528 throw;
ced2dffb 1529 }
ced2dffb
PA
1530
1531 lwpid = lwpid_of (thread);
1532 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1533 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1534 {
1535 int save_errno = errno;
1536
1537 /* We know the thread exists, so ESRCH must mean the lwp is
1538 zombie. This can happen if one of the already-detached
1539 threads exits the whole thread group. In that case we're
1540 still attached, and must reap the lwp. */
1541 if (save_errno == ESRCH)
1542 {
1543 int ret, status;
1544
1545 ret = my_waitpid (lwpid, &status, __WALL);
1546 if (ret == -1)
1547 {
1548 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1549 lwpid, safe_strerror (errno));
ced2dffb
PA
1550 }
1551 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1552 {
1553 warning (_("Reaping LWP %d while detaching "
1554 "returned unexpected status 0x%x"),
1555 lwpid, status);
1556 }
1557 }
1558 else
1559 {
1560 error (_("Can't detach %s: %s"),
1561 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1562 safe_strerror (save_errno));
ced2dffb
PA
1563 }
1564 }
1565 else if (debug_threads)
1566 {
1567 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1568 target_pid_to_str (ptid_of (thread)),
1569 strsignal (sig));
1570 }
bd99dc85
PA
1571
1572 delete_lwp (lwp);
ced2dffb
PA
1573}
1574
9061c9cf
TBA
1575int
1576linux_process_target::detach (process_info *process)
95954743 1577{
ced2dffb 1578 struct lwp_info *main_lwp;
95954743 1579
863d01bd
PA
1580 /* As there's a step over already in progress, let it finish first,
1581 otherwise nesting a stabilize_threads operation on top gets real
1582 messy. */
1583 complete_ongoing_step_over ();
1584
f9e39928 1585 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1586 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1587 may need to uninstall thread event breakpoints from memory, which
1588 only works with a stopped process anyway. */
7984d532 1589 stop_all_lwps (0, NULL);
f9e39928 1590
ca5c370d 1591#ifdef USE_THREAD_DB
8336d594 1592 thread_db_detach (process);
ca5c370d
PA
1593#endif
1594
fa593d66 1595 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1596 target_stabilize_threads ();
fa593d66 1597
ced2dffb
PA
1598 /* Detach from the clone lwps first. If the thread group exits just
1599 while we're detaching, we must reap the clone lwps before we're
1600 able to reap the leader. */
fd000fb3
TBA
1601 for_each_thread (process->pid, [this] (thread_info *thread)
1602 {
1603 /* We don't actually detach from the thread group leader just yet.
1604 If the thread group exits, we must reap the zombie clone lwps
1605 before we're able to reap the leader. */
1606 if (thread->id.pid () == thread->id.lwp ())
1607 return;
1608
1609 lwp_info *lwp = get_thread_lwp (thread);
1610 detach_one_lwp (lwp);
1611 });
ced2dffb 1612
ef2ddb33 1613 main_lwp = find_lwp_pid (ptid_t (process->pid));
fd000fb3 1614 detach_one_lwp (main_lwp);
8336d594 1615
8adb37b9 1616 mourn (process);
f9e39928
PA
1617
1618 /* Since we presently can only stop all lwps of all processes, we
1619 need to unstop lwps of other processes. */
7984d532 1620 unstop_all_lwps (0, NULL);
f9e39928
PA
1621 return 0;
1622}
1623
1624/* Remove all LWPs that belong to process PROC from the lwp list. */
1625
8adb37b9
TBA
1626void
1627linux_process_target::mourn (process_info *process)
8336d594
PA
1628{
1629 struct process_info_private *priv;
1630
1631#ifdef USE_THREAD_DB
1632 thread_db_mourn (process);
1633#endif
1634
fd000fb3 1635 for_each_thread (process->pid, [this] (thread_info *thread)
6b2a85da
SM
1636 {
1637 delete_lwp (get_thread_lwp (thread));
1638 });
f9e39928 1639
8336d594 1640 /* Freeing all private data. */
fe978cb0 1641 priv = process->priv;
fd000fb3 1642 low_delete_process (priv->arch_private);
8336d594 1643 free (priv);
fe978cb0 1644 process->priv = NULL;
505106cd
PA
1645
1646 remove_process (process);
8336d594
PA
1647}
1648
95a49a39
TBA
1649void
1650linux_process_target::join (int pid)
444d6139 1651{
444d6139
PA
1652 int status, ret;
1653
1654 do {
d105de22 1655 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1656 if (WIFEXITED (status) || WIFSIGNALED (status))
1657 break;
1658 } while (ret != -1 || errno != ECHILD);
1659}
1660
13d3d99b
TBA
1661/* Return true if the given thread is still alive. */
1662
1663bool
1664linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1665{
95954743
PA
1666 struct lwp_info *lwp = find_lwp_pid (ptid);
1667
1668 /* We assume we always know if a thread exits. If a whole process
1669 exited but we still haven't been able to report it to GDB, we'll
1670 hold on to the last lwp of the dead process. */
1671 if (lwp != NULL)
00db26fa 1672 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1673 else
1674 return 0;
1675}
1676
df95181f
TBA
1677bool
1678linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1679{
1680 struct lwp_info *lp = get_thread_lwp (thread);
1681
1682 if (!lp->status_pending_p)
1683 return 0;
1684
582511be 1685 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1686 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1687 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1688 {
1689 struct thread_info *saved_thread;
1690 CORE_ADDR pc;
1691 int discard = 0;
1692
1693 gdb_assert (lp->last_status != 0);
1694
1695 pc = get_pc (lp);
1696
1697 saved_thread = current_thread;
1698 current_thread = thread;
1699
1700 if (pc != lp->stop_pc)
1701 {
1702 if (debug_threads)
1703 debug_printf ("PC of %ld changed\n",
1704 lwpid_of (thread));
1705 discard = 1;
1706 }
3e572f71
PA
1707
1708#if !USE_SIGTRAP_SIGINFO
15c66dd6 1709 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
d7146cda 1710 && !low_breakpoint_at (pc))
582511be
PA
1711 {
1712 if (debug_threads)
1713 debug_printf ("previous SW breakpoint of %ld gone\n",
1714 lwpid_of (thread));
1715 discard = 1;
1716 }
15c66dd6 1717 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1718 && !hardware_breakpoint_inserted_here (pc))
1719 {
1720 if (debug_threads)
1721 debug_printf ("previous HW breakpoint of %ld gone\n",
1722 lwpid_of (thread));
1723 discard = 1;
1724 }
3e572f71 1725#endif
582511be
PA
1726
1727 current_thread = saved_thread;
1728
1729 if (discard)
1730 {
1731 if (debug_threads)
1732 debug_printf ("discarding pending breakpoint status\n");
1733 lp->status_pending_p = 0;
1734 return 0;
1735 }
1736 }
1737
1738 return 1;
1739}
1740
a681f9c9
PA
1741/* Returns true if LWP is resumed from the client's perspective. */
1742
1743static int
1744lwp_resumed (struct lwp_info *lwp)
1745{
1746 struct thread_info *thread = get_lwp_thread (lwp);
1747
1748 if (thread->last_resume_kind != resume_stop)
1749 return 1;
1750
1751 /* Did gdb send us a `vCont;t', but we haven't reported the
1752 corresponding stop to gdb yet? If so, the thread is still
1753 resumed/running from gdb's perspective. */
1754 if (thread->last_resume_kind == resume_stop
1755 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1756 return 1;
1757
1758 return 0;
1759}
1760
df95181f
TBA
1761bool
1762linux_process_target::status_pending_p_callback (thread_info *thread,
1763 ptid_t ptid)
0d62e5e8 1764{
582511be 1765 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1766
1767 /* Check if we're only interested in events from a specific process
afa8d396 1768 or a specific LWP. */
83e1b6c1 1769 if (!thread->id.matches (ptid))
95954743 1770 return 0;
0d62e5e8 1771
a681f9c9
PA
1772 if (!lwp_resumed (lp))
1773 return 0;
1774
582511be 1775 if (lp->status_pending_p
df95181f 1776 && !thread_still_has_status_pending (thread))
582511be 1777 {
df95181f 1778 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1779 return 0;
1780 }
0d62e5e8 1781
582511be 1782 return lp->status_pending_p;
0d62e5e8
DJ
1783}
1784
95954743
PA
1785struct lwp_info *
1786find_lwp_pid (ptid_t ptid)
1787{
da4ae14a 1788 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1789 {
1790 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1791 return thr_arg->id.lwp () == lwp;
454296a2 1792 });
d86d4aaf
DE
1793
1794 if (thread == NULL)
1795 return NULL;
1796
9c80ecd6 1797 return get_thread_lwp (thread);
95954743
PA
1798}
1799
fa96cb38 1800/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1801
fa96cb38
PA
1802static int
1803num_lwps (int pid)
1804{
fa96cb38 1805 int count = 0;
0d62e5e8 1806
4d3bb80e
SM
1807 for_each_thread (pid, [&] (thread_info *thread)
1808 {
9c80ecd6 1809 count++;
4d3bb80e 1810 });
3aee8918 1811
fa96cb38
PA
1812 return count;
1813}
d61ddec4 1814
6d4ee8c6
GB
1815/* See nat/linux-nat.h. */
1816
1817struct lwp_info *
1818iterate_over_lwps (ptid_t filter,
d3a70e03 1819 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1820{
da4ae14a 1821 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1822 {
da4ae14a 1823 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1824
d3a70e03 1825 return callback (lwp);
6d1e5673 1826 });
6d4ee8c6 1827
9c80ecd6 1828 if (thread == NULL)
6d4ee8c6
GB
1829 return NULL;
1830
9c80ecd6 1831 return get_thread_lwp (thread);
6d4ee8c6
GB
1832}
1833
fd000fb3
TBA
1834void
1835linux_process_target::check_zombie_leaders ()
fa96cb38 1836{
fd000fb3 1837 for_each_process ([this] (process_info *proc) {
9179355e
SM
1838 pid_t leader_pid = pid_of (proc);
1839 struct lwp_info *leader_lp;
1840
f2907e49 1841 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1842
1843 if (debug_threads)
1844 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1845 "num_lwps=%d, zombie=%d\n",
1846 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1847 linux_proc_pid_is_zombie (leader_pid));
1848
1849 if (leader_lp != NULL && !leader_lp->stopped
1850 /* Check if there are other threads in the group, as we may
1851 have raced with the inferior simply exiting. */
1852 && !last_thread_of_process_p (leader_pid)
1853 && linux_proc_pid_is_zombie (leader_pid))
1854 {
1855 /* A leader zombie can mean one of two things:
1856
1857 - It exited, and there's an exit status pending
1858 available, or only the leader exited (not the whole
1859 program). In the latter case, we can't waitpid the
1860 leader's exit status until all other threads are gone.
1861
1862 - There are 3 or more threads in the group, and a thread
1863 other than the leader exec'd. On an exec, the Linux
1864 kernel destroys all other threads (except the execing
1865 one) in the thread group, and resets the execing thread's
1866 tid to the tgid. No exit notification is sent for the
1867 execing thread -- from the ptracer's perspective, it
1868 appears as though the execing thread just vanishes.
1869 Until we reap all other threads except the leader and the
1870 execing thread, the leader will be zombie, and the
1871 execing thread will be in `D (disc sleep)'. As soon as
1872 all other threads are reaped, the execing thread changes
1873 it's tid to the tgid, and the previous (zombie) leader
1874 vanishes, giving place to the "new" leader. We could try
1875 distinguishing the exit and exec cases, by waiting once
1876 more, and seeing if something comes out, but it doesn't
1877 sound useful. The previous leader _does_ go away, and
1878 we'll re-add the new one once we see the exec event
1879 (which is just the same as what would happen if the
1880 previous leader did exit voluntarily before some other
1881 thread execs). */
1882
1883 if (debug_threads)
1884 debug_printf ("CZL: Thread group leader %d zombie "
1885 "(it exited, or another thread execd).\n",
1886 leader_pid);
1887
1888 delete_lwp (leader_lp);
1889 }
1890 });
fa96cb38 1891}
c3adc08c 1892
a1385b7b
SM
1893/* Callback for `find_thread'. Returns the first LWP that is not
1894 stopped. */
d50171e4 1895
a1385b7b
SM
1896static bool
1897not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1898{
a1385b7b
SM
1899 if (!thread->id.matches (filter))
1900 return false;
47c0c975 1901
a1385b7b 1902 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1903
a1385b7b 1904 return !lwp->stopped;
0d62e5e8 1905}
611cb4a5 1906
863d01bd
PA
1907/* Increment LWP's suspend count. */
1908
1909static void
1910lwp_suspended_inc (struct lwp_info *lwp)
1911{
1912 lwp->suspended++;
1913
1914 if (debug_threads && lwp->suspended > 4)
1915 {
1916 struct thread_info *thread = get_lwp_thread (lwp);
1917
1918 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1919 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1920 }
1921}
1922
1923/* Decrement LWP's suspend count. */
1924
1925static void
1926lwp_suspended_decr (struct lwp_info *lwp)
1927{
1928 lwp->suspended--;
1929
1930 if (lwp->suspended < 0)
1931 {
1932 struct thread_info *thread = get_lwp_thread (lwp);
1933
1934 internal_error (__FILE__, __LINE__,
1935 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1936 lwp->suspended);
1937 }
1938}
1939
219f2f23
PA
1940/* This function should only be called if the LWP got a SIGTRAP.
1941
1942 Handle any tracepoint steps or hits. Return true if a tracepoint
1943 event was handled, 0 otherwise. */
1944
1945static int
1946handle_tracepoints (struct lwp_info *lwp)
1947{
1948 struct thread_info *tinfo = get_lwp_thread (lwp);
1949 int tpoint_related_event = 0;
1950
582511be
PA
1951 gdb_assert (lwp->suspended == 0);
1952
7984d532
PA
1953 /* If this tracepoint hit causes a tracing stop, we'll immediately
1954 uninsert tracepoints. To do this, we temporarily pause all
1955 threads, unpatch away, and then unpause threads. We need to make
1956 sure the unpausing doesn't resume LWP too. */
863d01bd 1957 lwp_suspended_inc (lwp);
7984d532 1958
219f2f23
PA
1959 /* And we need to be sure that any all-threads-stopping doesn't try
1960 to move threads out of the jump pads, as it could deadlock the
1961 inferior (LWP could be in the jump pad, maybe even holding the
1962 lock.) */
1963
1964 /* Do any necessary step collect actions. */
1965 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1966
fa593d66
PA
1967 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1968
219f2f23
PA
1969 /* See if we just hit a tracepoint and do its main collect
1970 actions. */
1971 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1972
863d01bd 1973 lwp_suspended_decr (lwp);
7984d532
PA
1974
1975 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1976 gdb_assert (!stabilizing_threads
1977 || (lwp->collecting_fast_tracepoint
1978 != fast_tpoint_collect_result::not_collecting));
7984d532 1979
219f2f23
PA
1980 if (tpoint_related_event)
1981 {
1982 if (debug_threads)
87ce2a04 1983 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1984 return 1;
1985 }
1986
1987 return 0;
1988}
1989
13e567af
TBA
1990fast_tpoint_collect_result
1991linux_process_target::linux_fast_tracepoint_collecting
1992 (lwp_info *lwp, fast_tpoint_collect_status *status)
fa593d66
PA
1993{
1994 CORE_ADDR thread_area;
d86d4aaf 1995 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66 1996
fa593d66
PA
1997 /* Get the thread area address. This is used to recognize which
1998 thread is which when tracing with the in-process agent library.
1999 We don't read anything from the address, and treat it as opaque;
2000 it's the address itself that we assume is unique per-thread. */
13e567af 2001 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
229d26fc 2002 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2003
2004 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2005}
2006
13e567af
TBA
2007int
2008linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2009{
2010 return -1;
2011}
2012
d16f3f6c
TBA
2013bool
2014linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2015{
0bfdf32f 2016 struct thread_info *saved_thread;
fa593d66 2017
0bfdf32f
GB
2018 saved_thread = current_thread;
2019 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2020
2021 if ((wstat == NULL
2022 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2023 && supports_fast_tracepoints ()
58b4daa5 2024 && agent_loaded_p ())
fa593d66
PA
2025 {
2026 struct fast_tpoint_collect_status status;
fa593d66
PA
2027
2028 if (debug_threads)
87ce2a04
DE
2029 debug_printf ("Checking whether LWP %ld needs to move out of the "
2030 "jump pad.\n",
0bfdf32f 2031 lwpid_of (current_thread));
fa593d66 2032
229d26fc
SM
2033 fast_tpoint_collect_result r
2034 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2035
2036 if (wstat == NULL
2037 || (WSTOPSIG (*wstat) != SIGILL
2038 && WSTOPSIG (*wstat) != SIGFPE
2039 && WSTOPSIG (*wstat) != SIGSEGV
2040 && WSTOPSIG (*wstat) != SIGBUS))
2041 {
2042 lwp->collecting_fast_tracepoint = r;
2043
229d26fc 2044 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2045 {
229d26fc
SM
2046 if (r == fast_tpoint_collect_result::before_insn
2047 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2048 {
2049 /* Haven't executed the original instruction yet.
2050 Set breakpoint there, and wait till it's hit,
2051 then single-step until exiting the jump pad. */
2052 lwp->exit_jump_pad_bkpt
2053 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2054 }
2055
2056 if (debug_threads)
87ce2a04
DE
2057 debug_printf ("Checking whether LWP %ld needs to move out of "
2058 "the jump pad...it does\n",
0bfdf32f
GB
2059 lwpid_of (current_thread));
2060 current_thread = saved_thread;
fa593d66 2061
d16f3f6c 2062 return true;
fa593d66
PA
2063 }
2064 }
2065 else
2066 {
2067 /* If we get a synchronous signal while collecting, *and*
2068 while executing the (relocated) original instruction,
2069 reset the PC to point at the tpoint address, before
2070 reporting to GDB. Otherwise, it's an IPA lib bug: just
2071 report the signal to GDB, and pray for the best. */
2072
229d26fc
SM
2073 lwp->collecting_fast_tracepoint
2074 = fast_tpoint_collect_result::not_collecting;
fa593d66 2075
229d26fc 2076 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2077 && (status.adjusted_insn_addr <= lwp->stop_pc
2078 && lwp->stop_pc < status.adjusted_insn_addr_end))
2079 {
2080 siginfo_t info;
2081 struct regcache *regcache;
2082
2083 /* The si_addr on a few signals references the address
2084 of the faulting instruction. Adjust that as
2085 well. */
2086 if ((WSTOPSIG (*wstat) == SIGILL
2087 || WSTOPSIG (*wstat) == SIGFPE
2088 || WSTOPSIG (*wstat) == SIGBUS
2089 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2090 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2091 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2092 /* Final check just to make sure we don't clobber
2093 the siginfo of non-kernel-sent signals. */
2094 && (uintptr_t) info.si_addr == lwp->stop_pc)
2095 {
2096 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2097 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2098 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2099 }
2100
0bfdf32f 2101 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2102 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2103 lwp->stop_pc = status.tpoint_addr;
2104
2105 /* Cancel any fast tracepoint lock this thread was
2106 holding. */
2107 force_unlock_trace_buffer ();
2108 }
2109
2110 if (lwp->exit_jump_pad_bkpt != NULL)
2111 {
2112 if (debug_threads)
87ce2a04
DE
2113 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2114 "stopping all threads momentarily.\n");
fa593d66
PA
2115
2116 stop_all_lwps (1, lwp);
fa593d66
PA
2117
2118 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2119 lwp->exit_jump_pad_bkpt = NULL;
2120
2121 unstop_all_lwps (1, lwp);
2122
2123 gdb_assert (lwp->suspended >= 0);
2124 }
2125 }
2126 }
2127
2128 if (debug_threads)
87ce2a04
DE
2129 debug_printf ("Checking whether LWP %ld needs to move out of the "
2130 "jump pad...no\n",
0bfdf32f 2131 lwpid_of (current_thread));
0cccb683 2132
0bfdf32f 2133 current_thread = saved_thread;
d16f3f6c 2134 return false;
fa593d66
PA
2135}
2136
2137/* Enqueue one signal in the "signals to report later when out of the
2138 jump pad" list. */
2139
2140static void
2141enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2142{
2143 struct pending_signals *p_sig;
d86d4aaf 2144 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2145
2146 if (debug_threads)
87ce2a04 2147 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2148 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2149
2150 if (debug_threads)
2151 {
2152 struct pending_signals *sig;
2153
2154 for (sig = lwp->pending_signals_to_report;
2155 sig != NULL;
2156 sig = sig->prev)
87ce2a04
DE
2157 debug_printf (" Already queued %d\n",
2158 sig->signal);
fa593d66 2159
87ce2a04 2160 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2161 }
2162
1a981360
PA
2163 /* Don't enqueue non-RT signals if they are already in the deferred
2164 queue. (SIGSTOP being the easiest signal to see ending up here
2165 twice) */
2166 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2167 {
2168 struct pending_signals *sig;
2169
2170 for (sig = lwp->pending_signals_to_report;
2171 sig != NULL;
2172 sig = sig->prev)
2173 {
2174 if (sig->signal == WSTOPSIG (*wstat))
2175 {
2176 if (debug_threads)
87ce2a04
DE
2177 debug_printf ("Not requeuing already queued non-RT signal %d"
2178 " for LWP %ld\n",
2179 sig->signal,
d86d4aaf 2180 lwpid_of (thread));
1a981360
PA
2181 return;
2182 }
2183 }
2184 }
2185
8d749320 2186 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2187 p_sig->prev = lwp->pending_signals_to_report;
2188 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2189
d86d4aaf 2190 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2191 &p_sig->info);
fa593d66
PA
2192
2193 lwp->pending_signals_to_report = p_sig;
2194}
2195
2196/* Dequeue one signal from the "signals to report later when out of
2197 the jump pad" list. */
2198
2199static int
2200dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2201{
d86d4aaf
DE
2202 struct thread_info *thread = get_lwp_thread (lwp);
2203
fa593d66
PA
2204 if (lwp->pending_signals_to_report != NULL)
2205 {
2206 struct pending_signals **p_sig;
2207
2208 p_sig = &lwp->pending_signals_to_report;
2209 while ((*p_sig)->prev != NULL)
2210 p_sig = &(*p_sig)->prev;
2211
2212 *wstat = W_STOPCODE ((*p_sig)->signal);
2213 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2214 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2215 &(*p_sig)->info);
fa593d66
PA
2216 free (*p_sig);
2217 *p_sig = NULL;
2218
2219 if (debug_threads)
87ce2a04 2220 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2221 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2222
2223 if (debug_threads)
2224 {
2225 struct pending_signals *sig;
2226
2227 for (sig = lwp->pending_signals_to_report;
2228 sig != NULL;
2229 sig = sig->prev)
87ce2a04
DE
2230 debug_printf (" Still queued %d\n",
2231 sig->signal);
fa593d66 2232
87ce2a04 2233 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2234 }
2235
2236 return 1;
2237 }
2238
2239 return 0;
2240}
2241
ac1bbaca
TBA
2242bool
2243linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
582511be 2244{
ac1bbaca
TBA
2245 struct thread_info *saved_thread = current_thread;
2246 current_thread = get_lwp_thread (child);
d50171e4 2247
ac1bbaca
TBA
2248 if (low_stopped_by_watchpoint ())
2249 {
2250 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2251 child->stopped_data_address = low_stopped_data_address ();
2252 }
582511be 2253
ac1bbaca 2254 current_thread = saved_thread;
582511be 2255
ac1bbaca
TBA
2256 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2257}
d50171e4 2258
ac1bbaca
TBA
2259bool
2260linux_process_target::low_stopped_by_watchpoint ()
2261{
2262 return false;
2263}
d50171e4 2264
ac1bbaca
TBA
2265CORE_ADDR
2266linux_process_target::low_stopped_data_address ()
2267{
2268 return 0;
c4d9ceb6
YQ
2269}
2270
de0d863e
DB
2271/* Return the ptrace options that we want to try to enable. */
2272
2273static int
2274linux_low_ptrace_options (int attached)
2275{
c12a5089 2276 client_state &cs = get_client_state ();
de0d863e
DB
2277 int options = 0;
2278
2279 if (!attached)
2280 options |= PTRACE_O_EXITKILL;
2281
c12a5089 2282 if (cs.report_fork_events)
de0d863e
DB
2283 options |= PTRACE_O_TRACEFORK;
2284
c12a5089 2285 if (cs.report_vfork_events)
c269dbdb
DB
2286 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2287
c12a5089 2288 if (cs.report_exec_events)
94585166
DB
2289 options |= PTRACE_O_TRACEEXEC;
2290
82075af2
JS
2291 options |= PTRACE_O_TRACESYSGOOD;
2292
de0d863e
DB
2293 return options;
2294}
2295
d16f3f6c
TBA
2296lwp_info *
2297linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2298{
c12a5089 2299 client_state &cs = get_client_state ();
fa96cb38
PA
2300 struct lwp_info *child;
2301 struct thread_info *thread;
582511be 2302 int have_stop_pc = 0;
fa96cb38 2303
f2907e49 2304 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2305
94585166
DB
2306 /* Check for stop events reported by a process we didn't already
2307 know about - anything not already in our LWP list.
2308
2309 If we're expecting to receive stopped processes after
2310 fork, vfork, and clone events, then we'll just add the
2311 new one to our list and go back to waiting for the event
2312 to be reported - the stopped process might be returned
2313 from waitpid before or after the event is.
2314
2315 But note the case of a non-leader thread exec'ing after the
2316 leader having exited, and gone from our lists (because
2317 check_zombie_leaders deleted it). The non-leader thread
2318 changes its tid to the tgid. */
2319
2320 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2321 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2322 {
2323 ptid_t child_ptid;
2324
2325 /* A multi-thread exec after we had seen the leader exiting. */
2326 if (debug_threads)
2327 {
2328 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2329 "after exec.\n", lwpid);
2330 }
2331
fd79271b 2332 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2333 child = add_lwp (child_ptid);
2334 child->stopped = 1;
2335 current_thread = child->thread;
2336 }
2337
fa96cb38
PA
2338 /* If we didn't find a process, one of two things presumably happened:
2339 - A process we started and then detached from has exited. Ignore it.
2340 - A process we are controlling has forked and the new child's stop
2341 was reported to us by the kernel. Save its PID. */
2342 if (child == NULL && WIFSTOPPED (wstat))
2343 {
2344 add_to_pid_list (&stopped_pids, lwpid, wstat);
2345 return NULL;
2346 }
2347 else if (child == NULL)
2348 return NULL;
2349
2350 thread = get_lwp_thread (child);
2351
2352 child->stopped = 1;
2353
2354 child->last_status = wstat;
2355
582511be
PA
2356 /* Check if the thread has exited. */
2357 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2358 {
2359 if (debug_threads)
2360 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2361
2362 if (finish_step_over (child))
2363 {
2364 /* Unsuspend all other LWPs, and set them back running again. */
2365 unsuspend_all_lwps (child);
2366 }
2367
65706a29
PA
2368 /* If there is at least one more LWP, then the exit signal was
2369 not the end of the debugged application and should be
2370 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2371 if (cs.report_thread_events
65706a29 2372 || last_thread_of_process_p (pid_of (thread)))
582511be 2373 {
65706a29
PA
2374 /* Since events are serialized to GDB core, and we can't
2375 report this one right now. Leave the status pending for
2376 the next time we're able to report it. */
2377 mark_lwp_dead (child, wstat);
2378 return child;
582511be
PA
2379 }
2380 else
2381 {
65706a29
PA
2382 delete_lwp (child);
2383 return NULL;
582511be
PA
2384 }
2385 }
2386
2387 gdb_assert (WIFSTOPPED (wstat));
2388
fa96cb38
PA
2389 if (WIFSTOPPED (wstat))
2390 {
2391 struct process_info *proc;
2392
c06cbd92 2393 /* Architecture-specific setup after inferior is running. */
fa96cb38 2394 proc = find_process_pid (pid_of (thread));
c06cbd92 2395 if (proc->tdesc == NULL)
fa96cb38 2396 {
c06cbd92
YQ
2397 if (proc->attached)
2398 {
c06cbd92
YQ
2399 /* This needs to happen after we have attached to the
2400 inferior and it is stopped for the first time, but
2401 before we access any inferior registers. */
797bcff5 2402 arch_setup_thread (thread);
c06cbd92
YQ
2403 }
2404 else
2405 {
2406 /* The process is started, but GDBserver will do
2407 architecture-specific setup after the program stops at
2408 the first instruction. */
2409 child->status_pending_p = 1;
2410 child->status_pending = wstat;
2411 return child;
2412 }
fa96cb38
PA
2413 }
2414 }
2415
fa96cb38
PA
2416 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2417 {
beed38b8 2418 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2419 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2420
de0d863e 2421 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2422 child->must_set_ptrace_flags = 0;
2423 }
2424
82075af2
JS
2425 /* Always update syscall_state, even if it will be filtered later. */
2426 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2427 {
2428 child->syscall_state
2429 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2430 ? TARGET_WAITKIND_SYSCALL_RETURN
2431 : TARGET_WAITKIND_SYSCALL_ENTRY);
2432 }
2433 else
2434 {
2435 /* Almost all other ptrace-stops are known to be outside of system
2436 calls, with further exceptions in handle_extended_wait. */
2437 child->syscall_state = TARGET_WAITKIND_IGNORE;
2438 }
2439
e7ad2f14
PA
2440 /* Be careful to not overwrite stop_pc until save_stop_reason is
2441 called. */
fa96cb38 2442 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2443 && linux_is_extended_waitstatus (wstat))
fa96cb38 2444 {
582511be 2445 child->stop_pc = get_pc (child);
94585166 2446 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2447 {
2448 /* The event has been handled, so just return without
2449 reporting it. */
2450 return NULL;
2451 }
fa96cb38
PA
2452 }
2453
80aea927 2454 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2455 {
e7ad2f14 2456 if (save_stop_reason (child))
582511be
PA
2457 have_stop_pc = 1;
2458 }
2459
2460 if (!have_stop_pc)
2461 child->stop_pc = get_pc (child);
2462
fa96cb38
PA
2463 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2464 && child->stop_expected)
2465 {
2466 if (debug_threads)
2467 debug_printf ("Expected stop.\n");
2468 child->stop_expected = 0;
2469
2470 if (thread->last_resume_kind == resume_stop)
2471 {
2472 /* We want to report the stop to the core. Treat the
2473 SIGSTOP as a normal event. */
2bf6fb9d
PA
2474 if (debug_threads)
2475 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2476 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2477 }
2478 else if (stopping_threads != NOT_STOPPING_THREADS)
2479 {
2480 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2481 pending. */
2bf6fb9d
PA
2482 if (debug_threads)
2483 debug_printf ("LLW: SIGSTOP caught for %s "
2484 "while stopping threads.\n",
2485 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2486 return NULL;
2487 }
2488 else
2489 {
2bf6fb9d
PA
2490 /* This is a delayed SIGSTOP. Filter out the event. */
2491 if (debug_threads)
2492 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2493 child->stepping ? "step" : "continue",
2494 target_pid_to_str (ptid_of (thread)));
2495
df95181f 2496 resume_one_lwp (child, child->stepping, 0, NULL);
fa96cb38
PA
2497 return NULL;
2498 }
2499 }
2500
582511be
PA
2501 child->status_pending_p = 1;
2502 child->status_pending = wstat;
fa96cb38
PA
2503 return child;
2504}
2505
f79b145d
YQ
2506/* Return true if THREAD is doing hardware single step. */
2507
2508static int
2509maybe_hw_step (struct thread_info *thread)
2510{
2511 if (can_hardware_single_step ())
2512 return 1;
2513 else
2514 {
3b9a79ef 2515 /* GDBserver must insert single-step breakpoint for software
f79b145d 2516 single step. */
3b9a79ef 2517 gdb_assert (has_single_step_breakpoints (thread));
f79b145d
YQ
2518 return 0;
2519 }
2520}
2521
df95181f
TBA
2522void
2523linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2524{
20ba1ce6
PA
2525 struct lwp_info *lp = get_thread_lwp (thread);
2526
2527 if (lp->stopped
863d01bd 2528 && !lp->suspended
20ba1ce6 2529 && !lp->status_pending_p
20ba1ce6
PA
2530 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2531 {
8901d193
YQ
2532 int step = 0;
2533
2534 if (thread->last_resume_kind == resume_step)
2535 step = maybe_hw_step (thread);
20ba1ce6
PA
2536
2537 if (debug_threads)
2538 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2539 target_pid_to_str (ptid_of (thread)),
2540 paddress (lp->stop_pc),
2541 step);
2542
df95181f 2543 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2544 }
2545}
2546
d16f3f6c
TBA
2547int
2548linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2549 ptid_t filter_ptid,
2550 int *wstatp, int options)
0d62e5e8 2551{
d86d4aaf 2552 struct thread_info *event_thread;
d50171e4 2553 struct lwp_info *event_child, *requested_child;
fa96cb38 2554 sigset_t block_mask, prev_mask;
d50171e4 2555
fa96cb38 2556 retry:
d86d4aaf
DE
2557 /* N.B. event_thread points to the thread_info struct that contains
2558 event_child. Keep them in sync. */
2559 event_thread = NULL;
d50171e4
PA
2560 event_child = NULL;
2561 requested_child = NULL;
0d62e5e8 2562
95954743 2563 /* Check for a lwp with a pending status. */
bd99dc85 2564
d7e15655 2565 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2566 {
83e1b6c1
SM
2567 event_thread = find_thread_in_random ([&] (thread_info *thread)
2568 {
2569 return status_pending_p_callback (thread, filter_ptid);
2570 });
2571
d86d4aaf
DE
2572 if (event_thread != NULL)
2573 event_child = get_thread_lwp (event_thread);
2574 if (debug_threads && event_thread)
2575 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2576 }
d7e15655 2577 else if (filter_ptid != null_ptid)
0d62e5e8 2578 {
fa96cb38 2579 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2580
bde24c0a 2581 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2582 && requested_child->status_pending_p
229d26fc
SM
2583 && (requested_child->collecting_fast_tracepoint
2584 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2585 {
2586 enqueue_one_deferred_signal (requested_child,
2587 &requested_child->status_pending);
2588 requested_child->status_pending_p = 0;
2589 requested_child->status_pending = 0;
df95181f 2590 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2591 }
2592
2593 if (requested_child->suspended
2594 && requested_child->status_pending_p)
38e08fca
GB
2595 {
2596 internal_error (__FILE__, __LINE__,
2597 "requesting an event out of a"
2598 " suspended child?");
2599 }
fa593d66 2600
d50171e4 2601 if (requested_child->status_pending_p)
d86d4aaf
DE
2602 {
2603 event_child = requested_child;
2604 event_thread = get_lwp_thread (event_child);
2605 }
0d62e5e8 2606 }
611cb4a5 2607
0d62e5e8
DJ
2608 if (event_child != NULL)
2609 {
bd99dc85 2610 if (debug_threads)
87ce2a04 2611 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2612 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2613 *wstatp = event_child->status_pending;
bd99dc85
PA
2614 event_child->status_pending_p = 0;
2615 event_child->status_pending = 0;
0bfdf32f 2616 current_thread = event_thread;
d86d4aaf 2617 return lwpid_of (event_thread);
0d62e5e8
DJ
2618 }
2619
fa96cb38
PA
2620 /* But if we don't find a pending event, we'll have to wait.
2621
2622 We only enter this loop if no process has a pending wait status.
2623 Thus any action taken in response to a wait status inside this
2624 loop is responding as soon as we detect the status, not after any
2625 pending events. */
d8301ad1 2626
fa96cb38
PA
2627 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2628 all signals while here. */
2629 sigfillset (&block_mask);
21987b9c 2630 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2631
582511be
PA
2632 /* Always pull all events out of the kernel. We'll randomly select
2633 an event LWP out of all that have events, to prevent
2634 starvation. */
fa96cb38 2635 while (event_child == NULL)
0d62e5e8 2636 {
fa96cb38 2637 pid_t ret = 0;
0d62e5e8 2638
fa96cb38
PA
2639 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2640 quirks:
0d62e5e8 2641
fa96cb38
PA
2642 - If the thread group leader exits while other threads in the
2643 thread group still exist, waitpid(TGID, ...) hangs. That
2644 waitpid won't return an exit status until the other threads
2645 in the group are reaped.
611cb4a5 2646
fa96cb38
PA
2647 - When a non-leader thread execs, that thread just vanishes
2648 without reporting an exit (so we'd hang if we waited for it
2649 explicitly in that case). The exec event is reported to
94585166 2650 the TGID pid. */
fa96cb38
PA
2651 errno = 0;
2652 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2653
fa96cb38
PA
2654 if (debug_threads)
2655 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2656 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2657
fa96cb38 2658 if (ret > 0)
0d62e5e8 2659 {
89be2091 2660 if (debug_threads)
bd99dc85 2661 {
fa96cb38
PA
2662 debug_printf ("LLW: waitpid %ld received %s\n",
2663 (long) ret, status_to_str (*wstatp));
bd99dc85 2664 }
89be2091 2665
582511be
PA
2666 /* Filter all events. IOW, leave all events pending. We'll
2667 randomly select an event LWP out of all that have events
2668 below. */
d16f3f6c 2669 filter_event (ret, *wstatp);
fa96cb38
PA
2670 /* Retry until nothing comes out of waitpid. A single
2671 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2672 continue;
2673 }
2674
20ba1ce6
PA
2675 /* Now that we've pulled all events out of the kernel, resume
2676 LWPs that don't have an interesting event to report. */
2677 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2678 for_each_thread ([this] (thread_info *thread)
2679 {
2680 resume_stopped_resumed_lwps (thread);
2681 });
20ba1ce6
PA
2682
2683 /* ... and find an LWP with a status to report to the core, if
2684 any. */
83e1b6c1
SM
2685 event_thread = find_thread_in_random ([&] (thread_info *thread)
2686 {
2687 return status_pending_p_callback (thread, filter_ptid);
2688 });
2689
582511be
PA
2690 if (event_thread != NULL)
2691 {
2692 event_child = get_thread_lwp (event_thread);
2693 *wstatp = event_child->status_pending;
2694 event_child->status_pending_p = 0;
2695 event_child->status_pending = 0;
2696 break;
2697 }
2698
fa96cb38
PA
2699 /* Check for zombie thread group leaders. Those can't be reaped
2700 until all other threads in the thread group are. */
2701 check_zombie_leaders ();
2702
a1385b7b
SM
2703 auto not_stopped = [&] (thread_info *thread)
2704 {
2705 return not_stopped_callback (thread, wait_ptid);
2706 };
2707
fa96cb38
PA
2708 /* If there are no resumed children left in the set of LWPs we
2709 want to wait for, bail. We can't just block in
2710 waitpid/sigsuspend, because lwps might have been left stopped
2711 in trace-stop state, and we'd be stuck forever waiting for
2712 their status to change (which would only happen if we resumed
2713 them). Even if WNOHANG is set, this return code is preferred
2714 over 0 (below), as it is more detailed. */
a1385b7b 2715 if (find_thread (not_stopped) == NULL)
a6dbe5df 2716 {
fa96cb38
PA
2717 if (debug_threads)
2718 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2719 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2720 return -1;
a6dbe5df
PA
2721 }
2722
fa96cb38
PA
2723 /* No interesting event to report to the caller. */
2724 if ((options & WNOHANG))
24a09b5f 2725 {
fa96cb38
PA
2726 if (debug_threads)
2727 debug_printf ("WNOHANG set, no event found\n");
2728
21987b9c 2729 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2730 return 0;
24a09b5f
DJ
2731 }
2732
fa96cb38
PA
2733 /* Block until we get an event reported with SIGCHLD. */
2734 if (debug_threads)
2735 debug_printf ("sigsuspend'ing\n");
d50171e4 2736
fa96cb38 2737 sigsuspend (&prev_mask);
21987b9c 2738 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2739 goto retry;
2740 }
d50171e4 2741
21987b9c 2742 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2743
0bfdf32f 2744 current_thread = event_thread;
d50171e4 2745
fa96cb38
PA
2746 return lwpid_of (event_thread);
2747}
2748
d16f3f6c
TBA
2749int
2750linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2751{
d16f3f6c 2752 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2753}
2754
6bf5e0ba
PA
2755/* Select one LWP out of those that have events pending. */
2756
2757static void
2758select_event_lwp (struct lwp_info **orig_lp)
2759{
582511be
PA
2760 struct thread_info *event_thread = NULL;
2761
2762 /* In all-stop, give preference to the LWP that is being
2763 single-stepped. There will be at most one, and it's the LWP that
2764 the core is most interested in. If we didn't do this, then we'd
2765 have to handle pending step SIGTRAPs somehow in case the core
2766 later continues the previously-stepped thread, otherwise we'd
2767 report the pending SIGTRAP, and the core, not having stepped the
2768 thread, wouldn't understand what the trap was for, and therefore
2769 would report it to the user as a random signal. */
2770 if (!non_stop)
6bf5e0ba 2771 {
39a64da5
SM
2772 event_thread = find_thread ([] (thread_info *thread)
2773 {
2774 lwp_info *lp = get_thread_lwp (thread);
2775
2776 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2777 && thread->last_resume_kind == resume_step
2778 && lp->status_pending_p);
2779 });
2780
582511be
PA
2781 if (event_thread != NULL)
2782 {
2783 if (debug_threads)
2784 debug_printf ("SEL: Select single-step %s\n",
2785 target_pid_to_str (ptid_of (event_thread)));
2786 }
6bf5e0ba 2787 }
582511be 2788 if (event_thread == NULL)
6bf5e0ba
PA
2789 {
2790 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2791 which have had events. */
6bf5e0ba 2792
b0319eaa 2793 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2794 {
2795 lwp_info *lp = get_thread_lwp (thread);
2796
b0319eaa
TT
2797 /* Only resumed LWPs that have an event pending. */
2798 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2799 && lp->status_pending_p);
39a64da5 2800 });
6bf5e0ba
PA
2801 }
2802
d86d4aaf 2803 if (event_thread != NULL)
6bf5e0ba 2804 {
d86d4aaf
DE
2805 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2806
6bf5e0ba
PA
2807 /* Switch the event LWP. */
2808 *orig_lp = event_lp;
2809 }
2810}
2811
7984d532
PA
2812/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2813 NULL. */
2814
2815static void
2816unsuspend_all_lwps (struct lwp_info *except)
2817{
139720c5
SM
2818 for_each_thread ([&] (thread_info *thread)
2819 {
2820 lwp_info *lwp = get_thread_lwp (thread);
2821
2822 if (lwp != except)
2823 lwp_suspended_decr (lwp);
2824 });
7984d532
PA
2825}
2826
5a6b0a41 2827static bool lwp_running (thread_info *thread);
fa593d66
PA
2828
2829/* Stabilize threads (move out of jump pads).
2830
2831 If a thread is midway collecting a fast tracepoint, we need to
2832 finish the collection and move it out of the jump pad before
2833 reporting the signal.
2834
2835 This avoids recursion while collecting (when a signal arrives
2836 midway, and the signal handler itself collects), which would trash
2837 the trace buffer. In case the user set a breakpoint in a signal
2838 handler, this avoids the backtrace showing the jump pad, etc..
2839 Most importantly, there are certain things we can't do safely if
2840 threads are stopped in a jump pad (or in its callee's). For
2841 example:
2842
2843 - starting a new trace run. A thread still collecting the
2844 previous run, could trash the trace buffer when resumed. The trace
2845 buffer control structures would have been reset but the thread had
2846 no way to tell. The thread could even midway memcpy'ing to the
2847 buffer, which would mean that when resumed, it would clobber the
2848 trace buffer that had been set for a new run.
2849
2850 - we can't rewrite/reuse the jump pads for new tracepoints
2851 safely. Say you do tstart while a thread is stopped midway while
2852 collecting. When the thread is later resumed, it finishes the
2853 collection, and returns to the jump pad, to execute the original
2854 instruction that was under the tracepoint jump at the time the
2855 older run had been started. If the jump pad had been rewritten
2856 since for something else in the new run, the thread would now
2857 execute the wrong / random instructions. */
2858
5c9eb2f2
TBA
2859void
2860linux_process_target::stabilize_threads ()
fa593d66 2861{
13e567af
TBA
2862 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2863 {
2864 return stuck_in_jump_pad (thread);
2865 });
fa593d66 2866
d86d4aaf 2867 if (thread_stuck != NULL)
fa593d66 2868 {
b4d51a55 2869 if (debug_threads)
87ce2a04 2870 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2871 lwpid_of (thread_stuck));
fa593d66
PA
2872 return;
2873 }
2874
fcb056a5 2875 thread_info *saved_thread = current_thread;
fa593d66
PA
2876
2877 stabilizing_threads = 1;
2878
2879 /* Kick 'em all. */
d16f3f6c
TBA
2880 for_each_thread ([this] (thread_info *thread)
2881 {
2882 move_out_of_jump_pad (thread);
2883 });
fa593d66
PA
2884
2885 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2886 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2887 {
2888 struct target_waitstatus ourstatus;
2889 struct lwp_info *lwp;
fa593d66
PA
2890 int wstat;
2891
2892 /* Note that we go through the full wait even loop. While
2893 moving threads out of jump pad, we need to be able to step
2894 over internal breakpoints and such. */
d16f3f6c 2895 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2896
2897 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2898 {
0bfdf32f 2899 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2900
2901 /* Lock it. */
863d01bd 2902 lwp_suspended_inc (lwp);
fa593d66 2903
a493e3e2 2904 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2905 || current_thread->last_resume_kind == resume_stop)
fa593d66 2906 {
2ea28649 2907 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2908 enqueue_one_deferred_signal (lwp, &wstat);
2909 }
2910 }
2911 }
2912
fcdad592 2913 unsuspend_all_lwps (NULL);
fa593d66
PA
2914
2915 stabilizing_threads = 0;
2916
0bfdf32f 2917 current_thread = saved_thread;
fa593d66 2918
b4d51a55 2919 if (debug_threads)
fa593d66 2920 {
13e567af
TBA
2921 thread_stuck = find_thread ([this] (thread_info *thread)
2922 {
2923 return stuck_in_jump_pad (thread);
2924 });
fcb056a5 2925
d86d4aaf 2926 if (thread_stuck != NULL)
87ce2a04 2927 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2928 lwpid_of (thread_stuck));
fa593d66
PA
2929 }
2930}
2931
582511be
PA
2932/* Convenience function that is called when the kernel reports an
2933 event that is not passed out to GDB. */
2934
2935static ptid_t
2936ignore_event (struct target_waitstatus *ourstatus)
2937{
2938 /* If we got an event, there may still be others, as a single
2939 SIGCHLD can indicate more than one child stopped. This forces
2940 another target_wait call. */
2941 async_file_mark ();
2942
2943 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2944 return null_ptid;
2945}
2946
fd000fb3
TBA
2947ptid_t
2948linux_process_target::filter_exit_event (lwp_info *event_child,
2949 target_waitstatus *ourstatus)
65706a29 2950{
c12a5089 2951 client_state &cs = get_client_state ();
65706a29
PA
2952 struct thread_info *thread = get_lwp_thread (event_child);
2953 ptid_t ptid = ptid_of (thread);
2954
2955 if (!last_thread_of_process_p (pid_of (thread)))
2956 {
c12a5089 2957 if (cs.report_thread_events)
65706a29
PA
2958 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2959 else
2960 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2961
2962 delete_lwp (event_child);
2963 }
2964 return ptid;
2965}
2966
82075af2
JS
2967/* Returns 1 if GDB is interested in any event_child syscalls. */
2968
2969static int
2970gdb_catching_syscalls_p (struct lwp_info *event_child)
2971{
2972 struct thread_info *thread = get_lwp_thread (event_child);
2973 struct process_info *proc = get_thread_process (thread);
2974
f27866ba 2975 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2976}
2977
2978/* Returns 1 if GDB is interested in the event_child syscall.
2979 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2980
2981static int
2982gdb_catch_this_syscall_p (struct lwp_info *event_child)
2983{
4cc32bec 2984 int sysno;
82075af2
JS
2985 struct thread_info *thread = get_lwp_thread (event_child);
2986 struct process_info *proc = get_thread_process (thread);
2987
f27866ba 2988 if (proc->syscalls_to_catch.empty ())
82075af2
JS
2989 return 0;
2990
f27866ba 2991 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
82075af2
JS
2992 return 1;
2993
4cc32bec 2994 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
2995
2996 for (int iter : proc->syscalls_to_catch)
82075af2
JS
2997 if (iter == sysno)
2998 return 1;
2999
3000 return 0;
3001}
3002
d16f3f6c
TBA
3003ptid_t
3004linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3005 int target_options)
da6d8c04 3006{
c12a5089 3007 client_state &cs = get_client_state ();
e5f1222d 3008 int w;
fc7238bb 3009 struct lwp_info *event_child;
bd99dc85 3010 int options;
bd99dc85 3011 int pid;
6bf5e0ba
PA
3012 int step_over_finished;
3013 int bp_explains_trap;
3014 int maybe_internal_trap;
3015 int report_to_gdb;
219f2f23 3016 int trace_event;
c2d6af84 3017 int in_step_range;
f2faf941 3018 int any_resumed;
bd99dc85 3019
87ce2a04
DE
3020 if (debug_threads)
3021 {
3022 debug_enter ();
d16f3f6c 3023 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
3024 }
3025
bd99dc85
PA
3026 /* Translate generic target options into linux options. */
3027 options = __WALL;
3028 if (target_options & TARGET_WNOHANG)
3029 options |= WNOHANG;
0d62e5e8 3030
fa593d66
PA
3031 bp_explains_trap = 0;
3032 trace_event = 0;
c2d6af84 3033 in_step_range = 0;
bd99dc85
PA
3034 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3035
83e1b6c1
SM
3036 auto status_pending_p_any = [&] (thread_info *thread)
3037 {
3038 return status_pending_p_callback (thread, minus_one_ptid);
3039 };
3040
a1385b7b
SM
3041 auto not_stopped = [&] (thread_info *thread)
3042 {
3043 return not_stopped_callback (thread, minus_one_ptid);
3044 };
3045
f2faf941 3046 /* Find a resumed LWP, if any. */
83e1b6c1 3047 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3048 any_resumed = 1;
a1385b7b 3049 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3050 any_resumed = 1;
3051 else
3052 any_resumed = 0;
3053
d7e15655 3054 if (step_over_bkpt == null_ptid)
d16f3f6c 3055 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3056 else
3057 {
3058 if (debug_threads)
87ce2a04
DE
3059 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3060 target_pid_to_str (step_over_bkpt));
d16f3f6c 3061 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3062 }
3063
f2faf941 3064 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3065 {
fa96cb38
PA
3066 gdb_assert (target_options & TARGET_WNOHANG);
3067
87ce2a04
DE
3068 if (debug_threads)
3069 {
d16f3f6c 3070 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3071 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3072 debug_exit ();
3073 }
fa96cb38
PA
3074
3075 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3076 return null_ptid;
3077 }
fa96cb38
PA
3078 else if (pid == -1)
3079 {
3080 if (debug_threads)
3081 {
d16f3f6c 3082 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3083 "TARGET_WAITKIND_NO_RESUMED\n");
3084 debug_exit ();
3085 }
bd99dc85 3086
fa96cb38
PA
3087 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3088 return null_ptid;
3089 }
0d62e5e8 3090
0bfdf32f 3091 event_child = get_thread_lwp (current_thread);
0d62e5e8 3092
d16f3f6c 3093 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3094 child of a process. Report it. */
3095 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3096 {
fa96cb38 3097 if (WIFEXITED (w))
0d62e5e8 3098 {
fa96cb38
PA
3099 ourstatus->kind = TARGET_WAITKIND_EXITED;
3100 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3101
fa96cb38 3102 if (debug_threads)
bd99dc85 3103 {
d16f3f6c 3104 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3105 "retcode %d\n",
0bfdf32f 3106 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3107 WEXITSTATUS (w));
3108 debug_exit ();
bd99dc85 3109 }
fa96cb38
PA
3110 }
3111 else
3112 {
3113 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3114 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3115
fa96cb38
PA
3116 if (debug_threads)
3117 {
d16f3f6c 3118 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3119 "signal %d\n",
0bfdf32f 3120 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3121 WTERMSIG (w));
3122 debug_exit ();
3123 }
0d62e5e8 3124 }
fa96cb38 3125
65706a29
PA
3126 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3127 return filter_exit_event (event_child, ourstatus);
3128
0bfdf32f 3129 return ptid_of (current_thread);
da6d8c04
DJ
3130 }
3131
2d97cd35
AT
3132 /* If step-over executes a breakpoint instruction, in the case of a
3133 hardware single step it means a gdb/gdbserver breakpoint had been
3134 planted on top of a permanent breakpoint, in the case of a software
3135 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3136 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3137 the breakpoint address.
3138 So in the case of the hardware single step advance the PC manually
3139 past the breakpoint and in the case of software single step advance only
3b9a79ef 3140 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3141 This avoids that a program would keep trapping a permanent breakpoint
3142 forever. */
d7e15655 3143 if (step_over_bkpt != null_ptid
2d97cd35
AT
3144 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3145 && (event_child->stepping
3b9a79ef 3146 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3147 {
dd373349
AT
3148 int increment_pc = 0;
3149 int breakpoint_kind = 0;
3150 CORE_ADDR stop_pc = event_child->stop_pc;
3151
d16f3f6c
TBA
3152 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3153 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3154
3155 if (debug_threads)
3156 {
3157 debug_printf ("step-over for %s executed software breakpoint\n",
3158 target_pid_to_str (ptid_of (current_thread)));
3159 }
3160
3161 if (increment_pc != 0)
3162 {
3163 struct regcache *regcache
3164 = get_thread_regcache (current_thread, 1);
3165
3166 event_child->stop_pc += increment_pc;
bf9ae9d8 3167 low_set_pc (regcache, event_child->stop_pc);
8090aef2 3168
d7146cda 3169 if (!low_breakpoint_at (event_child->stop_pc))
15c66dd6 3170 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3171 }
3172 }
3173
6bf5e0ba
PA
3174 /* If this event was not handled before, and is not a SIGTRAP, we
3175 report it. SIGILL and SIGSEGV are also treated as traps in case
3176 a breakpoint is inserted at the current PC. If this target does
3177 not support internal breakpoints at all, we also report the
3178 SIGTRAP without further processing; it's of no concern to us. */
3179 maybe_internal_trap
bf9ae9d8 3180 = (low_supports_breakpoints ()
6bf5e0ba
PA
3181 && (WSTOPSIG (w) == SIGTRAP
3182 || ((WSTOPSIG (w) == SIGILL
3183 || WSTOPSIG (w) == SIGSEGV)
d7146cda 3184 && low_breakpoint_at (event_child->stop_pc))));
6bf5e0ba
PA
3185
3186 if (maybe_internal_trap)
3187 {
3188 /* Handle anything that requires bookkeeping before deciding to
3189 report the event or continue waiting. */
3190
3191 /* First check if we can explain the SIGTRAP with an internal
3192 breakpoint, or if we should possibly report the event to GDB.
3193 Do this before anything that may remove or insert a
3194 breakpoint. */
3195 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3196
3197 /* We have a SIGTRAP, possibly a step-over dance has just
3198 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3199 reinsert breakpoints and delete any single-step
3200 breakpoints. */
6bf5e0ba
PA
3201 step_over_finished = finish_step_over (event_child);
3202
3203 /* Now invoke the callbacks of any internal breakpoints there. */
3204 check_breakpoints (event_child->stop_pc);
3205
219f2f23
PA
3206 /* Handle tracepoint data collecting. This may overflow the
3207 trace buffer, and cause a tracing stop, removing
3208 breakpoints. */
3209 trace_event = handle_tracepoints (event_child);
3210
6bf5e0ba
PA
3211 if (bp_explains_trap)
3212 {
6bf5e0ba 3213 if (debug_threads)
87ce2a04 3214 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3215 }
3216 }
3217 else
3218 {
3219 /* We have some other signal, possibly a step-over dance was in
3220 progress, and it should be cancelled too. */
3221 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3222 }
3223
3224 /* We have all the data we need. Either report the event to GDB, or
3225 resume threads and keep waiting for more. */
3226
3227 /* If we're collecting a fast tracepoint, finish the collection and
3228 move out of the jump pad before delivering a signal. See
3229 linux_stabilize_threads. */
3230
3231 if (WIFSTOPPED (w)
3232 && WSTOPSIG (w) != SIGTRAP
3233 && supports_fast_tracepoints ()
58b4daa5 3234 && agent_loaded_p ())
fa593d66
PA
3235 {
3236 if (debug_threads)
87ce2a04
DE
3237 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3238 "to defer or adjust it.\n",
0bfdf32f 3239 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3240
3241 /* Allow debugging the jump pad itself. */
0bfdf32f 3242 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3243 && maybe_move_out_of_jump_pad (event_child, &w))
3244 {
3245 enqueue_one_deferred_signal (event_child, &w);
3246
3247 if (debug_threads)
87ce2a04 3248 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3249 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3250
df95181f 3251 resume_one_lwp (event_child, 0, 0, NULL);
582511be 3252
edeeb602
YQ
3253 if (debug_threads)
3254 debug_exit ();
582511be 3255 return ignore_event (ourstatus);
fa593d66
PA
3256 }
3257 }
219f2f23 3258
229d26fc
SM
3259 if (event_child->collecting_fast_tracepoint
3260 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3261 {
3262 if (debug_threads)
87ce2a04
DE
3263 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3264 "Check if we're already there.\n",
0bfdf32f 3265 lwpid_of (current_thread),
229d26fc 3266 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3267
3268 trace_event = 1;
3269
3270 event_child->collecting_fast_tracepoint
3271 = linux_fast_tracepoint_collecting (event_child, NULL);
3272
229d26fc
SM
3273 if (event_child->collecting_fast_tracepoint
3274 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3275 {
3276 /* No longer need this breakpoint. */
3277 if (event_child->exit_jump_pad_bkpt != NULL)
3278 {
3279 if (debug_threads)
87ce2a04
DE
3280 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3281 "stopping all threads momentarily.\n");
fa593d66
PA
3282
3283 /* Other running threads could hit this breakpoint.
3284 We don't handle moribund locations like GDB does,
3285 instead we always pause all threads when removing
3286 breakpoints, so that any step-over or
3287 decr_pc_after_break adjustment is always taken
3288 care of while the breakpoint is still
3289 inserted. */
3290 stop_all_lwps (1, event_child);
fa593d66
PA
3291
3292 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3293 event_child->exit_jump_pad_bkpt = NULL;
3294
3295 unstop_all_lwps (1, event_child);
3296
3297 gdb_assert (event_child->suspended >= 0);
3298 }
3299 }
3300
229d26fc
SM
3301 if (event_child->collecting_fast_tracepoint
3302 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3303 {
3304 if (debug_threads)
87ce2a04
DE
3305 debug_printf ("fast tracepoint finished "
3306 "collecting successfully.\n");
fa593d66
PA
3307
3308 /* We may have a deferred signal to report. */
3309 if (dequeue_one_deferred_signal (event_child, &w))
3310 {
3311 if (debug_threads)
87ce2a04 3312 debug_printf ("dequeued one signal.\n");
fa593d66 3313 }
3c11dd79 3314 else
fa593d66 3315 {
3c11dd79 3316 if (debug_threads)
87ce2a04 3317 debug_printf ("no deferred signals.\n");
fa593d66
PA
3318
3319 if (stabilizing_threads)
3320 {
3321 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3322 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3323
3324 if (debug_threads)
3325 {
d16f3f6c 3326 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3327 "while stabilizing threads\n",
0bfdf32f 3328 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3329 debug_exit ();
3330 }
3331
0bfdf32f 3332 return ptid_of (current_thread);
fa593d66
PA
3333 }
3334 }
3335 }
6bf5e0ba
PA
3336 }
3337
e471f25b
PA
3338 /* Check whether GDB would be interested in this event. */
3339
82075af2
JS
3340 /* Check if GDB is interested in this syscall. */
3341 if (WIFSTOPPED (w)
3342 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3343 && !gdb_catch_this_syscall_p (event_child))
3344 {
3345 if (debug_threads)
3346 {
3347 debug_printf ("Ignored syscall for LWP %ld.\n",
3348 lwpid_of (current_thread));
3349 }
3350
df95181f 3351 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602
YQ
3352
3353 if (debug_threads)
3354 debug_exit ();
82075af2
JS
3355 return ignore_event (ourstatus);
3356 }
3357
e471f25b
PA
3358 /* If GDB is not interested in this signal, don't stop other
3359 threads, and don't report it to GDB. Just resume the inferior
3360 right away. We do this for threading-related signals as well as
3361 any that GDB specifically requested we ignore. But never ignore
3362 SIGSTOP if we sent it ourselves, and do not ignore signals when
3363 stepping - they may require special handling to skip the signal
c9587f88
AT
3364 handler. Also never ignore signals that could be caused by a
3365 breakpoint. */
e471f25b 3366 if (WIFSTOPPED (w)
0bfdf32f 3367 && current_thread->last_resume_kind != resume_step
e471f25b 3368 && (
1a981360 3369#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3370 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3371 && (WSTOPSIG (w) == __SIGRTMIN
3372 || WSTOPSIG (w) == __SIGRTMIN + 1))
3373 ||
3374#endif
c12a5089 3375 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3376 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3377 && current_thread->last_resume_kind == resume_stop)
3378 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3379 {
3380 siginfo_t info, *info_p;
3381
3382 if (debug_threads)
87ce2a04 3383 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3384 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3385
0bfdf32f 3386 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3387 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3388 info_p = &info;
3389 else
3390 info_p = NULL;
863d01bd
PA
3391
3392 if (step_over_finished)
3393 {
3394 /* We cancelled this thread's step-over above. We still
3395 need to unsuspend all other LWPs, and set them back
3396 running again while the signal handler runs. */
3397 unsuspend_all_lwps (event_child);
3398
3399 /* Enqueue the pending signal info so that proceed_all_lwps
3400 doesn't lose it. */
3401 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3402
3403 proceed_all_lwps ();
3404 }
3405 else
3406 {
df95181f
TBA
3407 resume_one_lwp (event_child, event_child->stepping,
3408 WSTOPSIG (w), info_p);
863d01bd 3409 }
edeeb602
YQ
3410
3411 if (debug_threads)
3412 debug_exit ();
3413
582511be 3414 return ignore_event (ourstatus);
e471f25b
PA
3415 }
3416
c2d6af84
PA
3417 /* Note that all addresses are always "out of the step range" when
3418 there's no range to begin with. */
3419 in_step_range = lwp_in_step_range (event_child);
3420
3421 /* If GDB wanted this thread to single step, and the thread is out
3422 of the step range, we always want to report the SIGTRAP, and let
3423 GDB handle it. Watchpoints should always be reported. So should
3424 signals we can't explain. A SIGTRAP we can't explain could be a
3425 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3426 do, we're be able to handle GDB breakpoints on top of internal
3427 breakpoints, by handling the internal breakpoint and still
3428 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3429 won't see the breakpoint hit. If we see a single-step event but
3430 the thread should be continuing, don't pass the trap to gdb.
3431 That indicates that we had previously finished a single-step but
3432 left the single-step pending -- see
3433 complete_ongoing_step_over. */
6bf5e0ba 3434 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3435 || (current_thread->last_resume_kind == resume_step
c2d6af84 3436 && !in_step_range)
15c66dd6 3437 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3438 || (!in_step_range
3439 && !bp_explains_trap
3440 && !trace_event
3441 && !step_over_finished
3442 && !(current_thread->last_resume_kind == resume_continue
3443 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3444 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3445 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3446 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3447 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3448
3449 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3450
3451 /* We found no reason GDB would want us to stop. We either hit one
3452 of our own breakpoints, or finished an internal step GDB
3453 shouldn't know about. */
3454 if (!report_to_gdb)
3455 {
3456 if (debug_threads)
3457 {
3458 if (bp_explains_trap)
87ce2a04 3459 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3460 if (step_over_finished)
87ce2a04 3461 debug_printf ("Step-over finished.\n");
219f2f23 3462 if (trace_event)
87ce2a04 3463 debug_printf ("Tracepoint event.\n");
c2d6af84 3464 if (lwp_in_step_range (event_child))
87ce2a04
DE
3465 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3466 paddress (event_child->stop_pc),
3467 paddress (event_child->step_range_start),
3468 paddress (event_child->step_range_end));
6bf5e0ba
PA
3469 }
3470
3471 /* We're not reporting this breakpoint to GDB, so apply the
3472 decr_pc_after_break adjustment to the inferior's regcache
3473 ourselves. */
3474
bf9ae9d8 3475 if (low_supports_breakpoints ())
6bf5e0ba
PA
3476 {
3477 struct regcache *regcache
0bfdf32f 3478 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3479 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3480 }
3481
7984d532 3482 if (step_over_finished)
e3652c84
YQ
3483 {
3484 /* If we have finished stepping over a breakpoint, we've
3485 stopped and suspended all LWPs momentarily except the
3486 stepping one. This is where we resume them all again.
3487 We're going to keep waiting, so use proceed, which
3488 handles stepping over the next breakpoint. */
3489 unsuspend_all_lwps (event_child);
3490 }
3491 else
3492 {
3493 /* Remove the single-step breakpoints if any. Note that
3494 there isn't single-step breakpoint if we finished stepping
3495 over. */
7582c77c 3496 if (supports_software_single_step ()
e3652c84
YQ
3497 && has_single_step_breakpoints (current_thread))
3498 {
3499 stop_all_lwps (0, event_child);
3500 delete_single_step_breakpoints (current_thread);
3501 unstop_all_lwps (0, event_child);
3502 }
3503 }
7984d532 3504
e3652c84
YQ
3505 if (debug_threads)
3506 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3507 proceed_all_lwps ();
edeeb602
YQ
3508
3509 if (debug_threads)
3510 debug_exit ();
3511
582511be 3512 return ignore_event (ourstatus);
6bf5e0ba
PA
3513 }
3514
3515 if (debug_threads)
3516 {
00db26fa 3517 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3518 {
23fdd69e
SM
3519 std::string str
3520 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3521
ad071a30 3522 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3523 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3524 }
0bfdf32f 3525 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3526 {
3527 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3528 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3529 else if (!lwp_in_step_range (event_child))
87ce2a04 3530 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3531 }
15c66dd6 3532 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3533 debug_printf ("Stopped by watchpoint.\n");
582511be 3534 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3535 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3536 if (debug_threads)
87ce2a04 3537 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3538 }
3539
3540 /* Alright, we're going to report a stop. */
3541
3b9a79ef 3542 /* Remove single-step breakpoints. */
7582c77c 3543 if (supports_software_single_step ())
8901d193 3544 {
3b9a79ef 3545 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3546 lwps, so that other threads won't hit the breakpoint in the
3547 staled memory. */
3b9a79ef 3548 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3549
3550 if (non_stop)
3551 {
3b9a79ef
YQ
3552 remove_single_step_breakpoints_p
3553 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3554 }
3555 else
3556 {
3557 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3558 requests. Delete all single-step breakpoints. */
8901d193 3559
9c80ecd6
SM
3560 find_thread ([&] (thread_info *thread) {
3561 if (has_single_step_breakpoints (thread))
3562 {
3563 remove_single_step_breakpoints_p = 1;
3564 return true;
3565 }
8901d193 3566
9c80ecd6
SM
3567 return false;
3568 });
8901d193
YQ
3569 }
3570
3b9a79ef 3571 if (remove_single_step_breakpoints_p)
8901d193 3572 {
3b9a79ef 3573 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3574 so that other threads won't hit the breakpoint in the staled
3575 memory. */
3576 stop_all_lwps (0, event_child);
3577
3578 if (non_stop)
3579 {
3b9a79ef
YQ
3580 gdb_assert (has_single_step_breakpoints (current_thread));
3581 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3582 }
3583 else
3584 {
9c80ecd6
SM
3585 for_each_thread ([] (thread_info *thread){
3586 if (has_single_step_breakpoints (thread))
3587 delete_single_step_breakpoints (thread);
3588 });
8901d193
YQ
3589 }
3590
3591 unstop_all_lwps (0, event_child);
3592 }
3593 }
3594
582511be 3595 if (!stabilizing_threads)
6bf5e0ba
PA
3596 {
3597 /* In all-stop, stop all threads. */
582511be
PA
3598 if (!non_stop)
3599 stop_all_lwps (0, NULL);
6bf5e0ba 3600
c03e6ccc 3601 if (step_over_finished)
582511be
PA
3602 {
3603 if (!non_stop)
3604 {
3605 /* If we were doing a step-over, all other threads but
3606 the stepping one had been paused in start_step_over,
3607 with their suspend counts incremented. We don't want
3608 to do a full unstop/unpause, because we're in
3609 all-stop mode (so we want threads stopped), but we
3610 still need to unsuspend the other threads, to
3611 decrement their `suspended' count back. */
3612 unsuspend_all_lwps (event_child);
3613 }
3614 else
3615 {
3616 /* If we just finished a step-over, then all threads had
3617 been momentarily paused. In all-stop, that's fine,
3618 we want threads stopped by now anyway. In non-stop,
3619 we need to re-resume threads that GDB wanted to be
3620 running. */
3621 unstop_all_lwps (1, event_child);
3622 }
3623 }
c03e6ccc 3624
3aa5cfa0
AT
3625 /* If we're not waiting for a specific LWP, choose an event LWP
3626 from among those that have had events. Giving equal priority
3627 to all LWPs that have had events helps prevent
3628 starvation. */
d7e15655 3629 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3630 {
3631 event_child->status_pending_p = 1;
3632 event_child->status_pending = w;
3633
3634 select_event_lwp (&event_child);
3635
3636 /* current_thread and event_child must stay in sync. */
3637 current_thread = get_lwp_thread (event_child);
3638
3639 event_child->status_pending_p = 0;
3640 w = event_child->status_pending;
3641 }
3642
3643
fa593d66 3644 /* Stabilize threads (move out of jump pads). */
582511be 3645 if (!non_stop)
5c9eb2f2 3646 target_stabilize_threads ();
6bf5e0ba
PA
3647 }
3648 else
3649 {
3650 /* If we just finished a step-over, then all threads had been
3651 momentarily paused. In all-stop, that's fine, we want
3652 threads stopped by now anyway. In non-stop, we need to
3653 re-resume threads that GDB wanted to be running. */
3654 if (step_over_finished)
7984d532 3655 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3656 }
3657
00db26fa 3658 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3659 {
00db26fa
PA
3660 /* If the reported event is an exit, fork, vfork or exec, let
3661 GDB know. */
5a04c4cf
PA
3662
3663 /* Break the unreported fork relationship chain. */
3664 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3665 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3666 {
3667 event_child->fork_relative->fork_relative = NULL;
3668 event_child->fork_relative = NULL;
3669 }
3670
00db26fa 3671 *ourstatus = event_child->waitstatus;
de0d863e
DB
3672 /* Clear the event lwp's waitstatus since we handled it already. */
3673 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3674 }
3675 else
3676 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3677
582511be 3678 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3679 it was a software breakpoint, and the client doesn't know we can
3680 adjust the breakpoint ourselves. */
3681 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3682 && !cs.swbreak_feature)
582511be 3683 {
d4807ea2 3684 int decr_pc = low_decr_pc_after_break ();
582511be
PA
3685
3686 if (decr_pc != 0)
3687 {
3688 struct regcache *regcache
3689 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3690 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3691 }
3692 }
3693
82075af2
JS
3694 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3695 {
82075af2 3696 get_syscall_trapinfo (event_child,
4cc32bec 3697 &ourstatus->value.syscall_number);
82075af2
JS
3698 ourstatus->kind = event_child->syscall_state;
3699 }
3700 else if (current_thread->last_resume_kind == resume_stop
3701 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3702 {
3703 /* A thread that has been requested to stop by GDB with vCont;t,
3704 and it stopped cleanly, so report as SIG0. The use of
3705 SIGSTOP is an implementation detail. */
a493e3e2 3706 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3707 }
0bfdf32f 3708 else if (current_thread->last_resume_kind == resume_stop
8336d594 3709 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3710 {
3711 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3712 but, it stopped for other reasons. */
2ea28649 3713 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3714 }
de0d863e 3715 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3716 {
2ea28649 3717 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3718 }
3719
d7e15655 3720 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3721
bd99dc85 3722 if (debug_threads)
87ce2a04 3723 {
d16f3f6c 3724 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3725 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3726 ourstatus->kind, ourstatus->value.sig);
3727 debug_exit ();
3728 }
bd99dc85 3729
65706a29
PA
3730 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3731 return filter_exit_event (event_child, ourstatus);
3732
0bfdf32f 3733 return ptid_of (current_thread);
bd99dc85
PA
3734}
3735
3736/* Get rid of any pending event in the pipe. */
3737static void
3738async_file_flush (void)
3739{
3740 int ret;
3741 char buf;
3742
3743 do
3744 ret = read (linux_event_pipe[0], &buf, 1);
3745 while (ret >= 0 || (ret == -1 && errno == EINTR));
3746}
3747
3748/* Put something in the pipe, so the event loop wakes up. */
3749static void
3750async_file_mark (void)
3751{
3752 int ret;
3753
3754 async_file_flush ();
3755
3756 do
3757 ret = write (linux_event_pipe[1], "+", 1);
3758 while (ret == 0 || (ret == -1 && errno == EINTR));
3759
3760 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3761 be awakened anyway. */
3762}
3763
6532e7e3
TBA
3764ptid_t
3765linux_process_target::wait (ptid_t ptid,
3766 target_waitstatus *ourstatus,
3767 int target_options)
bd99dc85 3768{
95954743 3769 ptid_t event_ptid;
bd99dc85 3770
bd99dc85
PA
3771 /* Flush the async file first. */
3772 if (target_is_async_p ())
3773 async_file_flush ();
3774
582511be
PA
3775 do
3776 {
d16f3f6c 3777 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3778 }
3779 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3780 && event_ptid == null_ptid
582511be 3781 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3782
3783 /* If at least one stop was reported, there may be more. A single
3784 SIGCHLD can signal more than one child stop. */
3785 if (target_is_async_p ()
3786 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3787 && event_ptid != null_ptid)
bd99dc85
PA
3788 async_file_mark ();
3789
3790 return event_ptid;
da6d8c04
DJ
3791}
3792
c5f62d5f 3793/* Send a signal to an LWP. */
fd500816
DJ
3794
3795static int
a1928bad 3796kill_lwp (unsigned long lwpid, int signo)
fd500816 3797{
4a6ed09b 3798 int ret;
fd500816 3799
4a6ed09b
PA
3800 errno = 0;
3801 ret = syscall (__NR_tkill, lwpid, signo);
3802 if (errno == ENOSYS)
3803 {
3804 /* If tkill fails, then we are not using nptl threads, a
3805 configuration we no longer support. */
3806 perror_with_name (("tkill"));
3807 }
3808 return ret;
fd500816
DJ
3809}
3810
964e4306
PA
3811void
3812linux_stop_lwp (struct lwp_info *lwp)
3813{
3814 send_sigstop (lwp);
3815}
3816
0d62e5e8 3817static void
02fc4de7 3818send_sigstop (struct lwp_info *lwp)
0d62e5e8 3819{
bd99dc85 3820 int pid;
0d62e5e8 3821
d86d4aaf 3822 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3823
0d62e5e8
DJ
3824 /* If we already have a pending stop signal for this process, don't
3825 send another. */
54a0b537 3826 if (lwp->stop_expected)
0d62e5e8 3827 {
ae13219e 3828 if (debug_threads)
87ce2a04 3829 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3830
0d62e5e8
DJ
3831 return;
3832 }
3833
3834 if (debug_threads)
87ce2a04 3835 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3836
d50171e4 3837 lwp->stop_expected = 1;
bd99dc85 3838 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3839}
3840
df3e4dbe
SM
3841static void
3842send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3843{
d86d4aaf 3844 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3845
7984d532
PA
3846 /* Ignore EXCEPT. */
3847 if (lwp == except)
df3e4dbe 3848 return;
7984d532 3849
02fc4de7 3850 if (lwp->stopped)
df3e4dbe 3851 return;
02fc4de7
PA
3852
3853 send_sigstop (lwp);
7984d532
PA
3854}
3855
3856/* Increment the suspend count of an LWP, and stop it, if not stopped
3857 yet. */
df3e4dbe
SM
3858static void
3859suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3860{
d86d4aaf 3861 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3862
3863 /* Ignore EXCEPT. */
3864 if (lwp == except)
df3e4dbe 3865 return;
7984d532 3866
863d01bd 3867 lwp_suspended_inc (lwp);
7984d532 3868
df3e4dbe 3869 send_sigstop (thread, except);
02fc4de7
PA
3870}
3871
95954743
PA
3872static void
3873mark_lwp_dead (struct lwp_info *lwp, int wstat)
3874{
95954743
PA
3875 /* Store the exit status for later. */
3876 lwp->status_pending_p = 1;
3877 lwp->status_pending = wstat;
3878
00db26fa
PA
3879 /* Store in waitstatus as well, as there's nothing else to process
3880 for this event. */
3881 if (WIFEXITED (wstat))
3882 {
3883 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3884 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3885 }
3886 else if (WIFSIGNALED (wstat))
3887 {
3888 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3889 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3890 }
3891
95954743
PA
3892 /* Prevent trying to stop it. */
3893 lwp->stopped = 1;
3894
3895 /* No further stops are expected from a dead lwp. */
3896 lwp->stop_expected = 0;
3897}
3898
00db26fa
PA
3899/* Return true if LWP has exited already, and has a pending exit event
3900 to report to GDB. */
3901
3902static int
3903lwp_is_marked_dead (struct lwp_info *lwp)
3904{
3905 return (lwp->status_pending_p
3906 && (WIFEXITED (lwp->status_pending)
3907 || WIFSIGNALED (lwp->status_pending)));
3908}
3909
d16f3f6c
TBA
3910void
3911linux_process_target::wait_for_sigstop ()
0d62e5e8 3912{
0bfdf32f 3913 struct thread_info *saved_thread;
95954743 3914 ptid_t saved_tid;
fa96cb38
PA
3915 int wstat;
3916 int ret;
0d62e5e8 3917
0bfdf32f
GB
3918 saved_thread = current_thread;
3919 if (saved_thread != NULL)
9c80ecd6 3920 saved_tid = saved_thread->id;
bd99dc85 3921 else
95954743 3922 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3923
d50171e4 3924 if (debug_threads)
fa96cb38 3925 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3926
fa96cb38
PA
3927 /* Passing NULL_PTID as filter indicates we want all events to be
3928 left pending. Eventually this returns when there are no
3929 unwaited-for children left. */
d16f3f6c 3930 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3931 gdb_assert (ret == -1);
0d62e5e8 3932
13d3d99b 3933 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3934 current_thread = saved_thread;
0d62e5e8
DJ
3935 else
3936 {
3937 if (debug_threads)
87ce2a04 3938 debug_printf ("Previously current thread died.\n");
0d62e5e8 3939
f0db101d
PA
3940 /* We can't change the current inferior behind GDB's back,
3941 otherwise, a subsequent command may apply to the wrong
3942 process. */
3943 current_thread = NULL;
0d62e5e8
DJ
3944 }
3945}
3946
13e567af
TBA
3947bool
3948linux_process_target::stuck_in_jump_pad (thread_info *thread)
fa593d66 3949{
d86d4aaf 3950 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3951
863d01bd
PA
3952 if (lwp->suspended != 0)
3953 {
3954 internal_error (__FILE__, __LINE__,
3955 "LWP %ld is suspended, suspended=%d\n",
3956 lwpid_of (thread), lwp->suspended);
3957 }
fa593d66
PA
3958 gdb_assert (lwp->stopped);
3959
3960 /* Allow debugging the jump pad, gdb_collect, etc.. */
3961 return (supports_fast_tracepoints ()
58b4daa5 3962 && agent_loaded_p ()
fa593d66 3963 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3964 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3965 || thread->last_resume_kind == resume_step)
229d26fc
SM
3966 && (linux_fast_tracepoint_collecting (lwp, NULL)
3967 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3968}
3969
d16f3f6c
TBA
3970void
3971linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3972{
f0ce0d3a 3973 struct thread_info *saved_thread;
d86d4aaf 3974 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3975 int *wstat;
3976
863d01bd
PA
3977 if (lwp->suspended != 0)
3978 {
3979 internal_error (__FILE__, __LINE__,
3980 "LWP %ld is suspended, suspended=%d\n",
3981 lwpid_of (thread), lwp->suspended);
3982 }
fa593d66
PA
3983 gdb_assert (lwp->stopped);
3984
f0ce0d3a
PA
3985 /* For gdb_breakpoint_here. */
3986 saved_thread = current_thread;
3987 current_thread = thread;
3988
fa593d66
PA
3989 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3990
3991 /* Allow debugging the jump pad, gdb_collect, etc. */
3992 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3993 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3994 && thread->last_resume_kind != resume_step
3995 && maybe_move_out_of_jump_pad (lwp, wstat))
3996 {
3997 if (debug_threads)
87ce2a04 3998 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3999 lwpid_of (thread));
fa593d66
PA
4000
4001 if (wstat)
4002 {
4003 lwp->status_pending_p = 0;
4004 enqueue_one_deferred_signal (lwp, wstat);
4005
4006 if (debug_threads)
87ce2a04
DE
4007 debug_printf ("Signal %d for LWP %ld deferred "
4008 "(in jump pad)\n",
d86d4aaf 4009 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4010 }
4011
df95181f 4012 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
4013 }
4014 else
863d01bd 4015 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4016
4017 current_thread = saved_thread;
fa593d66
PA
4018}
4019
5a6b0a41
SM
4020static bool
4021lwp_running (thread_info *thread)
fa593d66 4022{
d86d4aaf 4023 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4024
00db26fa 4025 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4026 return false;
4027
4028 return !lwp->stopped;
fa593d66
PA
4029}
4030
d16f3f6c
TBA
4031void
4032linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 4033{
bde24c0a
PA
4034 /* Should not be called recursively. */
4035 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4036
87ce2a04
DE
4037 if (debug_threads)
4038 {
4039 debug_enter ();
4040 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4041 suspend ? "stop-and-suspend" : "stop",
4042 except != NULL
d86d4aaf 4043 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4044 : "none");
4045 }
4046
bde24c0a
PA
4047 stopping_threads = (suspend
4048 ? STOPPING_AND_SUSPENDING_THREADS
4049 : STOPPING_THREADS);
7984d532
PA
4050
4051 if (suspend)
df3e4dbe
SM
4052 for_each_thread ([&] (thread_info *thread)
4053 {
4054 suspend_and_send_sigstop (thread, except);
4055 });
7984d532 4056 else
df3e4dbe
SM
4057 for_each_thread ([&] (thread_info *thread)
4058 {
4059 send_sigstop (thread, except);
4060 });
4061
fa96cb38 4062 wait_for_sigstop ();
bde24c0a 4063 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4064
4065 if (debug_threads)
4066 {
4067 debug_printf ("stop_all_lwps done, setting stopping_threads "
4068 "back to !stopping\n");
4069 debug_exit ();
4070 }
0d62e5e8
DJ
4071}
4072
863d01bd
PA
4073/* Enqueue one signal in the chain of signals which need to be
4074 delivered to this process on next resume. */
4075
4076static void
4077enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4078{
8d749320 4079 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4080
863d01bd
PA
4081 p_sig->prev = lwp->pending_signals;
4082 p_sig->signal = signal;
4083 if (info == NULL)
4084 memset (&p_sig->info, 0, sizeof (siginfo_t));
4085 else
4086 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4087 lwp->pending_signals = p_sig;
4088}
4089
df95181f
TBA
4090void
4091linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4092{
984a2c04
YQ
4093 struct thread_info *thread = get_lwp_thread (lwp);
4094 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4095
4096 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4097
984a2c04 4098 current_thread = thread;
7582c77c 4099 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
fa5308bd 4100
a0ff9e1a 4101 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4102 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4103}
4104
df95181f
TBA
4105int
4106linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4107{
4108 int step = 0;
4109
4110 if (can_hardware_single_step ())
4111 {
4112 step = 1;
4113 }
7582c77c 4114 else if (supports_software_single_step ())
7fe5e27e
AT
4115 {
4116 install_software_single_step_breakpoints (lwp);
4117 step = 0;
4118 }
4119 else
4120 {
4121 if (debug_threads)
4122 debug_printf ("stepping is not implemented on this target");
4123 }
4124
4125 return step;
4126}
4127
35ac8b3e 4128/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4129 finish a fast tracepoint collect. Since signal can be delivered in
4130 the step-over, the program may go to signal handler and trap again
4131 after return from the signal handler. We can live with the spurious
4132 double traps. */
35ac8b3e
YQ
4133
4134static int
4135lwp_signal_can_be_delivered (struct lwp_info *lwp)
4136{
229d26fc
SM
4137 return (lwp->collecting_fast_tracepoint
4138 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4139}
4140
df95181f
TBA
4141void
4142linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4143 int signal, siginfo_t *info)
da6d8c04 4144{
d86d4aaf 4145 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4146 struct thread_info *saved_thread;
82075af2 4147 int ptrace_request;
c06cbd92
YQ
4148 struct process_info *proc = get_thread_process (thread);
4149
4150 /* Note that target description may not be initialised
4151 (proc->tdesc == NULL) at this point because the program hasn't
4152 stopped at the first instruction yet. It means GDBserver skips
4153 the extra traps from the wrapper program (see option --wrapper).
4154 Code in this function that requires register access should be
4155 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4156
54a0b537 4157 if (lwp->stopped == 0)
0d62e5e8
DJ
4158 return;
4159
65706a29
PA
4160 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4161
229d26fc
SM
4162 fast_tpoint_collect_result fast_tp_collecting
4163 = lwp->collecting_fast_tracepoint;
fa593d66 4164
229d26fc
SM
4165 gdb_assert (!stabilizing_threads
4166 || (fast_tp_collecting
4167 != fast_tpoint_collect_result::not_collecting));
fa593d66 4168
219f2f23
PA
4169 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4170 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4171 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4172 {
4173 /* Collecting 'while-stepping' actions doesn't make sense
4174 anymore. */
d86d4aaf 4175 release_while_stepping_state_list (thread);
219f2f23
PA
4176 }
4177
0d62e5e8 4178 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4179 signal. Also enqueue the signal if it can't be delivered to the
4180 inferior right now. */
0d62e5e8 4181 if (signal != 0
fa593d66
PA
4182 && (lwp->status_pending_p
4183 || lwp->pending_signals != NULL
35ac8b3e 4184 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4185 {
4186 enqueue_pending_signal (lwp, signal, info);
4187
4188 /* Postpone any pending signal. It was enqueued above. */
4189 signal = 0;
4190 }
0d62e5e8 4191
d50171e4
PA
4192 if (lwp->status_pending_p)
4193 {
4194 if (debug_threads)
94610ec4 4195 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4196 " has pending status\n",
94610ec4 4197 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4198 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4199 return;
4200 }
0d62e5e8 4201
0bfdf32f
GB
4202 saved_thread = current_thread;
4203 current_thread = thread;
0d62e5e8 4204
0d62e5e8
DJ
4205 /* This bit needs some thinking about. If we get a signal that
4206 we must report while a single-step reinsert is still pending,
4207 we often end up resuming the thread. It might be better to
4208 (ew) allow a stack of pending events; then we could be sure that
4209 the reinsert happened right away and not lose any signals.
4210
4211 Making this stack would also shrink the window in which breakpoints are
54a0b537 4212 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4213 complete correctness, so it won't solve that problem. It may be
4214 worthwhile just to solve this one, however. */
54a0b537 4215 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4216 {
4217 if (debug_threads)
87ce2a04
DE
4218 debug_printf (" pending reinsert at 0x%s\n",
4219 paddress (lwp->bp_reinsert));
d50171e4 4220
85e00e85 4221 if (can_hardware_single_step ())
d50171e4 4222 {
229d26fc 4223 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4224 {
4225 if (step == 0)
9986ba08 4226 warning ("BAD - reinserting but not stepping.");
fa593d66 4227 if (lwp->suspended)
9986ba08
PA
4228 warning ("BAD - reinserting and suspended(%d).",
4229 lwp->suspended);
fa593d66 4230 }
d50171e4 4231 }
f79b145d
YQ
4232
4233 step = maybe_hw_step (thread);
0d62e5e8
DJ
4234 }
4235
229d26fc 4236 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4237 {
4238 if (debug_threads)
87ce2a04
DE
4239 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4240 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4241 lwpid_of (thread));
fa593d66 4242 }
229d26fc 4243 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4244 {
4245 if (debug_threads)
87ce2a04
DE
4246 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4247 " single-stepping\n",
d86d4aaf 4248 lwpid_of (thread));
fa593d66
PA
4249
4250 if (can_hardware_single_step ())
4251 step = 1;
4252 else
38e08fca
GB
4253 {
4254 internal_error (__FILE__, __LINE__,
4255 "moving out of jump pad single-stepping"
4256 " not implemented on this target");
4257 }
fa593d66
PA
4258 }
4259
219f2f23
PA
4260 /* If we have while-stepping actions in this thread set it stepping.
4261 If we have a signal to deliver, it may or may not be set to
4262 SIG_IGN, we don't know. Assume so, and allow collecting
4263 while-stepping into a signal handler. A possible smart thing to
4264 do would be to set an internal breakpoint at the signal return
4265 address, continue, and carry on catching this while-stepping
4266 action only when that breakpoint is hit. A future
4267 enhancement. */
7fe5e27e 4268 if (thread->while_stepping != NULL)
219f2f23
PA
4269 {
4270 if (debug_threads)
87ce2a04 4271 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4272 lwpid_of (thread));
7fe5e27e
AT
4273
4274 step = single_step (lwp);
219f2f23
PA
4275 }
4276
bf9ae9d8 4277 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4278 {
0bfdf32f 4279 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4280
bf9ae9d8 4281 lwp->stop_pc = low_get_pc (regcache);
582511be
PA
4282
4283 if (debug_threads)
4284 {
4285 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4286 (long) lwp->stop_pc);
4287 }
0d62e5e8
DJ
4288 }
4289
35ac8b3e
YQ
4290 /* If we have pending signals, consume one if it can be delivered to
4291 the inferior. */
4292 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4293 {
4294 struct pending_signals **p_sig;
4295
54a0b537 4296 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4297 while ((*p_sig)->prev != NULL)
4298 p_sig = &(*p_sig)->prev;
4299
4300 signal = (*p_sig)->signal;
32ca6d61 4301 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4302 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4303 &(*p_sig)->info);
32ca6d61 4304
0d62e5e8
DJ
4305 free (*p_sig);
4306 *p_sig = NULL;
4307 }
4308
94610ec4
YQ
4309 if (debug_threads)
4310 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4311 lwpid_of (thread), step ? "step" : "continue", signal,
4312 lwp->stop_expected ? "expected" : "not expected");
4313
d7599cc0 4314 low_prepare_to_resume (lwp);
aa5ca48f 4315
d86d4aaf 4316 regcache_invalidate_thread (thread);
da6d8c04 4317 errno = 0;
54a0b537 4318 lwp->stepping = step;
82075af2
JS
4319 if (step)
4320 ptrace_request = PTRACE_SINGLESTEP;
4321 else if (gdb_catching_syscalls_p (lwp))
4322 ptrace_request = PTRACE_SYSCALL;
4323 else
4324 ptrace_request = PTRACE_CONT;
4325 ptrace (ptrace_request,
4326 lwpid_of (thread),
b8e1b30e 4327 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4328 /* Coerce to a uintptr_t first to avoid potential gcc warning
4329 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4330 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4331
0bfdf32f 4332 current_thread = saved_thread;
da6d8c04 4333 if (errno)
23f238d3
PA
4334 perror_with_name ("resuming thread");
4335
4336 /* Successfully resumed. Clear state that no longer makes sense,
4337 and mark the LWP as running. Must not do this before resuming
4338 otherwise if that fails other code will be confused. E.g., we'd
4339 later try to stop the LWP and hang forever waiting for a stop
4340 status. Note that we must not throw after this is cleared,
4341 otherwise handle_zombie_lwp_error would get confused. */
4342 lwp->stopped = 0;
4343 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4344}
4345
d7599cc0
TBA
4346void
4347linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4348{
4349 /* Nop. */
4350}
4351
23f238d3
PA
4352/* Called when we try to resume a stopped LWP and that errors out. If
4353 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4354 or about to become), discard the error, clear any pending status
4355 the LWP may have, and return true (we'll collect the exit status
4356 soon enough). Otherwise, return false. */
4357
4358static int
4359check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4360{
4361 struct thread_info *thread = get_lwp_thread (lp);
4362
4363 /* If we get an error after resuming the LWP successfully, we'd
4364 confuse !T state for the LWP being gone. */
4365 gdb_assert (lp->stopped);
4366
4367 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4368 because even if ptrace failed with ESRCH, the tracee may be "not
4369 yet fully dead", but already refusing ptrace requests. In that
4370 case the tracee has 'R (Running)' state for a little bit
4371 (observed in Linux 3.18). See also the note on ESRCH in the
4372 ptrace(2) man page. Instead, check whether the LWP has any state
4373 other than ptrace-stopped. */
4374
4375 /* Don't assume anything if /proc/PID/status can't be read. */
4376 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4377 {
23f238d3
PA
4378 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4379 lp->status_pending_p = 0;
4380 return 1;
4381 }
4382 return 0;
4383}
4384
df95181f
TBA
4385void
4386linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4387 siginfo_t *info)
23f238d3 4388{
a70b8144 4389 try
23f238d3 4390 {
df95181f 4391 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4392 }
230d2906 4393 catch (const gdb_exception_error &ex)
23f238d3
PA
4394 {
4395 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4396 throw;
3221518c 4397 }
da6d8c04
DJ
4398}
4399
5fdda392
SM
4400/* This function is called once per thread via for_each_thread.
4401 We look up which resume request applies to THREAD and mark it with a
4402 pointer to the appropriate resume request.
5544ad89
DJ
4403
4404 This algorithm is O(threads * resume elements), but resume elements
4405 is small (and will remain small at least until GDB supports thread
4406 suspension). */
ebcf782c 4407
5fdda392
SM
4408static void
4409linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4410{
d86d4aaf 4411 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4412
5fdda392 4413 for (int ndx = 0; ndx < n; ndx++)
95954743 4414 {
5fdda392 4415 ptid_t ptid = resume[ndx].thread;
d7e15655 4416 if (ptid == minus_one_ptid
9c80ecd6 4417 || ptid == thread->id
0c9070b3
YQ
4418 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4419 of PID'. */
e99b03dc 4420 || (ptid.pid () == pid_of (thread)
0e998d96 4421 && (ptid.is_pid ()
e38504b3 4422 || ptid.lwp () == -1)))
95954743 4423 {
5fdda392 4424 if (resume[ndx].kind == resume_stop
8336d594 4425 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4426 {
4427 if (debug_threads)
87ce2a04
DE
4428 debug_printf ("already %s LWP %ld at GDB's request\n",
4429 (thread->last_status.kind
4430 == TARGET_WAITKIND_STOPPED)
4431 ? "stopped"
4432 : "stopping",
d86d4aaf 4433 lwpid_of (thread));
d50171e4
PA
4434
4435 continue;
4436 }
4437
5a04c4cf
PA
4438 /* Ignore (wildcard) resume requests for already-resumed
4439 threads. */
5fdda392 4440 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4441 && thread->last_resume_kind != resume_stop)
4442 {
4443 if (debug_threads)
4444 debug_printf ("already %s LWP %ld at GDB's request\n",
4445 (thread->last_resume_kind
4446 == resume_step)
4447 ? "stepping"
4448 : "continuing",
4449 lwpid_of (thread));
4450 continue;
4451 }
4452
4453 /* Don't let wildcard resumes resume fork children that GDB
4454 does not yet know are new fork children. */
4455 if (lwp->fork_relative != NULL)
4456 {
5a04c4cf
PA
4457 struct lwp_info *rel = lwp->fork_relative;
4458
4459 if (rel->status_pending_p
4460 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4461 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4462 {
4463 if (debug_threads)
4464 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4465 lwpid_of (thread));
4466 continue;
4467 }
4468 }
4469
4470 /* If the thread has a pending event that has already been
4471 reported to GDBserver core, but GDB has not pulled the
4472 event out of the vStopped queue yet, likewise, ignore the
4473 (wildcard) resume request. */
9c80ecd6 4474 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4475 {
4476 if (debug_threads)
4477 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4478 lwpid_of (thread));
4479 continue;
4480 }
4481
5fdda392 4482 lwp->resume = &resume[ndx];
8336d594 4483 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4484
c2d6af84
PA
4485 lwp->step_range_start = lwp->resume->step_range_start;
4486 lwp->step_range_end = lwp->resume->step_range_end;
4487
fa593d66
PA
4488 /* If we had a deferred signal to report, dequeue one now.
4489 This can happen if LWP gets more than one signal while
4490 trying to get out of a jump pad. */
4491 if (lwp->stopped
4492 && !lwp->status_pending_p
4493 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4494 {
4495 lwp->status_pending_p = 1;
4496
4497 if (debug_threads)
87ce2a04
DE
4498 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4499 "leaving status pending.\n",
d86d4aaf
DE
4500 WSTOPSIG (lwp->status_pending),
4501 lwpid_of (thread));
fa593d66
PA
4502 }
4503
5fdda392 4504 return;
95954743
PA
4505 }
4506 }
2bd7c093
PA
4507
4508 /* No resume action for this thread. */
4509 lwp->resume = NULL;
5544ad89
DJ
4510}
4511
df95181f
TBA
4512bool
4513linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4514{
d86d4aaf 4515 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4516
bd99dc85
PA
4517 /* LWPs which will not be resumed are not interesting, because
4518 we might not wait for them next time through linux_wait. */
2bd7c093 4519 if (lwp->resume == NULL)
25c28b4d 4520 return false;
64386c31 4521
df95181f 4522 return thread_still_has_status_pending (thread);
d50171e4
PA
4523}
4524
df95181f
TBA
4525bool
4526linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4527{
d86d4aaf 4528 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4529 struct thread_info *saved_thread;
d50171e4 4530 CORE_ADDR pc;
c06cbd92
YQ
4531 struct process_info *proc = get_thread_process (thread);
4532
4533 /* GDBserver is skipping the extra traps from the wrapper program,
4534 don't have to do step over. */
4535 if (proc->tdesc == NULL)
eca55aec 4536 return false;
d50171e4
PA
4537
4538 /* LWPs which will not be resumed are not interesting, because we
4539 might not wait for them next time through linux_wait. */
4540
4541 if (!lwp->stopped)
4542 {
4543 if (debug_threads)
87ce2a04 4544 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4545 lwpid_of (thread));
eca55aec 4546 return false;
d50171e4
PA
4547 }
4548
8336d594 4549 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4550 {
4551 if (debug_threads)
87ce2a04
DE
4552 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4553 " stopped\n",
d86d4aaf 4554 lwpid_of (thread));
eca55aec 4555 return false;
d50171e4
PA
4556 }
4557
7984d532
PA
4558 gdb_assert (lwp->suspended >= 0);
4559
4560 if (lwp->suspended)
4561 {
4562 if (debug_threads)
87ce2a04 4563 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4564 lwpid_of (thread));
eca55aec 4565 return false;
7984d532
PA
4566 }
4567
bd99dc85 4568 if (lwp->status_pending_p)
d50171e4
PA
4569 {
4570 if (debug_threads)
87ce2a04
DE
4571 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4572 " status.\n",
d86d4aaf 4573 lwpid_of (thread));
eca55aec 4574 return false;
d50171e4
PA
4575 }
4576
4577 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4578 or we have. */
4579 pc = get_pc (lwp);
4580
4581 /* If the PC has changed since we stopped, then don't do anything,
4582 and let the breakpoint/tracepoint be hit. This happens if, for
4583 instance, GDB handled the decr_pc_after_break subtraction itself,
4584 GDB is OOL stepping this thread, or the user has issued a "jump"
4585 command, or poked thread's registers herself. */
4586 if (pc != lwp->stop_pc)
4587 {
4588 if (debug_threads)
87ce2a04
DE
4589 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4590 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4591 lwpid_of (thread),
4592 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4593 return false;
d50171e4
PA
4594 }
4595
484b3c32
YQ
4596 /* On software single step target, resume the inferior with signal
4597 rather than stepping over. */
7582c77c 4598 if (supports_software_single_step ()
484b3c32
YQ
4599 && lwp->pending_signals != NULL
4600 && lwp_signal_can_be_delivered (lwp))
4601 {
4602 if (debug_threads)
4603 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4604 " signals.\n",
4605 lwpid_of (thread));
4606
eca55aec 4607 return false;
484b3c32
YQ
4608 }
4609
0bfdf32f
GB
4610 saved_thread = current_thread;
4611 current_thread = thread;
d50171e4 4612
8b07ae33 4613 /* We can only step over breakpoints we know about. */
fa593d66 4614 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4615 {
8b07ae33 4616 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4617 though. If the condition is being evaluated on the target's side
4618 and it evaluate to false, step over this breakpoint as well. */
4619 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4620 && gdb_condition_true_at_breakpoint (pc)
4621 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4622 {
4623 if (debug_threads)
87ce2a04
DE
4624 debug_printf ("Need step over [LWP %ld]? yes, but found"
4625 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4626 lwpid_of (thread), paddress (pc));
d50171e4 4627
0bfdf32f 4628 current_thread = saved_thread;
eca55aec 4629 return false;
8b07ae33
PA
4630 }
4631 else
4632 {
4633 if (debug_threads)
87ce2a04
DE
4634 debug_printf ("Need step over [LWP %ld]? yes, "
4635 "found breakpoint at 0x%s\n",
d86d4aaf 4636 lwpid_of (thread), paddress (pc));
d50171e4 4637
8b07ae33 4638 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4639 that find_thread stops looking. */
0bfdf32f 4640 current_thread = saved_thread;
8b07ae33 4641
eca55aec 4642 return true;
8b07ae33 4643 }
d50171e4
PA
4644 }
4645
0bfdf32f 4646 current_thread = saved_thread;
d50171e4
PA
4647
4648 if (debug_threads)
87ce2a04
DE
4649 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4650 " at 0x%s\n",
d86d4aaf 4651 lwpid_of (thread), paddress (pc));
c6ecbae5 4652
eca55aec 4653 return false;
5544ad89
DJ
4654}
4655
d16f3f6c
TBA
4656void
4657linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4658{
d86d4aaf 4659 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4660 struct thread_info *saved_thread;
d50171e4
PA
4661 CORE_ADDR pc;
4662 int step;
4663
4664 if (debug_threads)
87ce2a04 4665 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4666 lwpid_of (thread));
d50171e4 4667
7984d532 4668 stop_all_lwps (1, lwp);
863d01bd
PA
4669
4670 if (lwp->suspended != 0)
4671 {
4672 internal_error (__FILE__, __LINE__,
4673 "LWP %ld suspended=%d\n", lwpid_of (thread),
4674 lwp->suspended);
4675 }
d50171e4
PA
4676
4677 if (debug_threads)
87ce2a04 4678 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4679
4680 /* Note, we should always reach here with an already adjusted PC,
4681 either by GDB (if we're resuming due to GDB's request), or by our
4682 caller, if we just finished handling an internal breakpoint GDB
4683 shouldn't care about. */
4684 pc = get_pc (lwp);
4685
0bfdf32f
GB
4686 saved_thread = current_thread;
4687 current_thread = thread;
d50171e4
PA
4688
4689 lwp->bp_reinsert = pc;
4690 uninsert_breakpoints_at (pc);
fa593d66 4691 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4692
7fe5e27e 4693 step = single_step (lwp);
d50171e4 4694
0bfdf32f 4695 current_thread = saved_thread;
d50171e4 4696
df95181f 4697 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4698
4699 /* Require next event from this LWP. */
9c80ecd6 4700 step_over_bkpt = thread->id;
d50171e4
PA
4701}
4702
4703/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3b9a79ef 4704 start_step_over, if still there, and delete any single-step
d50171e4
PA
4705 breakpoints we've set, on non hardware single-step targets. */
4706
4707static int
4708finish_step_over (struct lwp_info *lwp)
4709{
4710 if (lwp->bp_reinsert != 0)
4711 {
f79b145d
YQ
4712 struct thread_info *saved_thread = current_thread;
4713
d50171e4 4714 if (debug_threads)
87ce2a04 4715 debug_printf ("Finished step over.\n");
d50171e4 4716
f79b145d
YQ
4717 current_thread = get_lwp_thread (lwp);
4718
d50171e4
PA
4719 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4720 may be no breakpoint to reinsert there by now. */
4721 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4722 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4723
4724 lwp->bp_reinsert = 0;
4725
3b9a79ef
YQ
4726 /* Delete any single-step breakpoints. No longer needed. We
4727 don't have to worry about other threads hitting this trap,
4728 and later not being able to explain it, because we were
4729 stepping over a breakpoint, and we hold all threads but
4730 LWP stopped while doing that. */
d50171e4 4731 if (!can_hardware_single_step ())
f79b145d 4732 {
3b9a79ef
YQ
4733 gdb_assert (has_single_step_breakpoints (current_thread));
4734 delete_single_step_breakpoints (current_thread);
f79b145d 4735 }
d50171e4
PA
4736
4737 step_over_bkpt = null_ptid;
f79b145d 4738 current_thread = saved_thread;
d50171e4
PA
4739 return 1;
4740 }
4741 else
4742 return 0;
4743}
4744
d16f3f6c
TBA
4745void
4746linux_process_target::complete_ongoing_step_over ()
863d01bd 4747{
d7e15655 4748 if (step_over_bkpt != null_ptid)
863d01bd
PA
4749 {
4750 struct lwp_info *lwp;
4751 int wstat;
4752 int ret;
4753
4754 if (debug_threads)
4755 debug_printf ("detach: step over in progress, finish it first\n");
4756
4757 /* Passing NULL_PTID as filter indicates we want all events to
4758 be left pending. Eventually this returns when there are no
4759 unwaited-for children left. */
d16f3f6c
TBA
4760 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4761 __WALL);
863d01bd
PA
4762 gdb_assert (ret == -1);
4763
4764 lwp = find_lwp_pid (step_over_bkpt);
4765 if (lwp != NULL)
4766 finish_step_over (lwp);
4767 step_over_bkpt = null_ptid;
4768 unsuspend_all_lwps (lwp);
4769 }
4770}
4771
df95181f
TBA
4772void
4773linux_process_target::resume_one_thread (thread_info *thread,
4774 bool leave_all_stopped)
5544ad89 4775{
d86d4aaf 4776 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4777 int leave_pending;
5544ad89 4778
2bd7c093 4779 if (lwp->resume == NULL)
c80825ff 4780 return;
5544ad89 4781
bd99dc85 4782 if (lwp->resume->kind == resume_stop)
5544ad89 4783 {
bd99dc85 4784 if (debug_threads)
d86d4aaf 4785 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4786
4787 if (!lwp->stopped)
4788 {
4789 if (debug_threads)
d86d4aaf 4790 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4791
d50171e4
PA
4792 /* Stop the thread, and wait for the event asynchronously,
4793 through the event loop. */
02fc4de7 4794 send_sigstop (lwp);
bd99dc85
PA
4795 }
4796 else
4797 {
4798 if (debug_threads)
87ce2a04 4799 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4800 lwpid_of (thread));
d50171e4
PA
4801
4802 /* The LWP may have been stopped in an internal event that
4803 was not meant to be notified back to GDB (e.g., gdbserver
4804 breakpoint), so we should be reporting a stop event in
4805 this case too. */
4806
4807 /* If the thread already has a pending SIGSTOP, this is a
4808 no-op. Otherwise, something later will presumably resume
4809 the thread and this will cause it to cancel any pending
4810 operation, due to last_resume_kind == resume_stop. If
4811 the thread already has a pending status to report, we
4812 will still report it the next time we wait - see
4813 status_pending_p_callback. */
1a981360
PA
4814
4815 /* If we already have a pending signal to report, then
4816 there's no need to queue a SIGSTOP, as this means we're
4817 midway through moving the LWP out of the jumppad, and we
4818 will report the pending signal as soon as that is
4819 finished. */
4820 if (lwp->pending_signals_to_report == NULL)
4821 send_sigstop (lwp);
bd99dc85 4822 }
32ca6d61 4823
bd99dc85
PA
4824 /* For stop requests, we're done. */
4825 lwp->resume = NULL;
fc7238bb 4826 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4827 return;
5544ad89
DJ
4828 }
4829
bd99dc85 4830 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4831 then don't resume it - we can just report the pending status.
4832 Likewise if it is suspended, because e.g., another thread is
4833 stepping past a breakpoint. Make sure to queue any signals that
4834 would otherwise be sent. In all-stop mode, we do this decision
4835 based on if *any* thread has a pending status. If there's a
4836 thread that needs the step-over-breakpoint dance, then don't
4837 resume any other thread but that particular one. */
4838 leave_pending = (lwp->suspended
4839 || lwp->status_pending_p
4840 || leave_all_stopped);
5544ad89 4841
0e9a339e
YQ
4842 /* If we have a new signal, enqueue the signal. */
4843 if (lwp->resume->sig != 0)
4844 {
4845 siginfo_t info, *info_p;
4846
4847 /* If this is the same signal we were previously stopped by,
4848 make sure to queue its siginfo. */
4849 if (WIFSTOPPED (lwp->last_status)
4850 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4851 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4852 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4853 info_p = &info;
4854 else
4855 info_p = NULL;
4856
4857 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4858 }
4859
d50171e4 4860 if (!leave_pending)
bd99dc85
PA
4861 {
4862 if (debug_threads)
d86d4aaf 4863 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4864
9c80ecd6 4865 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4866 }
4867 else
4868 {
4869 if (debug_threads)
d86d4aaf 4870 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4871 }
5544ad89 4872
fc7238bb 4873 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4874 lwp->resume = NULL;
0d62e5e8
DJ
4875}
4876
0e4d7e35
TBA
4877void
4878linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4879{
d86d4aaf 4880 struct thread_info *need_step_over = NULL;
c6ecbae5 4881
87ce2a04
DE
4882 if (debug_threads)
4883 {
4884 debug_enter ();
4885 debug_printf ("linux_resume:\n");
4886 }
4887
5fdda392
SM
4888 for_each_thread ([&] (thread_info *thread)
4889 {
4890 linux_set_resume_request (thread, resume_info, n);
4891 });
5544ad89 4892
d50171e4
PA
4893 /* If there is a thread which would otherwise be resumed, which has
4894 a pending status, then don't resume any threads - we can just
4895 report the pending status. Make sure to queue any signals that
4896 would otherwise be sent. In non-stop mode, we'll apply this
4897 logic to each thread individually. We consume all pending events
4898 before considering to start a step-over (in all-stop). */
25c28b4d 4899 bool any_pending = false;
bd99dc85 4900 if (!non_stop)
df95181f
TBA
4901 any_pending = find_thread ([this] (thread_info *thread)
4902 {
4903 return resume_status_pending (thread);
4904 }) != nullptr;
d50171e4
PA
4905
4906 /* If there is a thread which would otherwise be resumed, which is
4907 stopped at a breakpoint that needs stepping over, then don't
4908 resume any threads - have it step over the breakpoint with all
4909 other threads stopped, then resume all threads again. Make sure
4910 to queue any signals that would otherwise be delivered or
4911 queued. */
bf9ae9d8 4912 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4913 need_step_over = find_thread ([this] (thread_info *thread)
4914 {
4915 return thread_needs_step_over (thread);
4916 });
d50171e4 4917
c80825ff 4918 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4919
4920 if (debug_threads)
4921 {
4922 if (need_step_over != NULL)
87ce2a04 4923 debug_printf ("Not resuming all, need step over\n");
d50171e4 4924 else if (any_pending)
87ce2a04
DE
4925 debug_printf ("Not resuming, all-stop and found "
4926 "an LWP with pending status\n");
d50171e4 4927 else
87ce2a04 4928 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4929 }
4930
4931 /* Even if we're leaving threads stopped, queue all signals we'd
4932 otherwise deliver. */
c80825ff
SM
4933 for_each_thread ([&] (thread_info *thread)
4934 {
df95181f 4935 resume_one_thread (thread, leave_all_stopped);
c80825ff 4936 });
d50171e4
PA
4937
4938 if (need_step_over)
d86d4aaf 4939 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4940
4941 if (debug_threads)
4942 {
4943 debug_printf ("linux_resume done\n");
4944 debug_exit ();
4945 }
1bebeeca
PA
4946
4947 /* We may have events that were pending that can/should be sent to
4948 the client now. Trigger a linux_wait call. */
4949 if (target_is_async_p ())
4950 async_file_mark ();
d50171e4
PA
4951}
4952
df95181f
TBA
4953void
4954linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4955{
d86d4aaf 4956 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4957 int step;
4958
7984d532 4959 if (lwp == except)
e2b44075 4960 return;
d50171e4
PA
4961
4962 if (debug_threads)
d86d4aaf 4963 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4964
4965 if (!lwp->stopped)
4966 {
4967 if (debug_threads)
d86d4aaf 4968 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 4969 return;
d50171e4
PA
4970 }
4971
02fc4de7
PA
4972 if (thread->last_resume_kind == resume_stop
4973 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4974 {
4975 if (debug_threads)
87ce2a04 4976 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4977 lwpid_of (thread));
e2b44075 4978 return;
d50171e4
PA
4979 }
4980
4981 if (lwp->status_pending_p)
4982 {
4983 if (debug_threads)
87ce2a04 4984 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4985 lwpid_of (thread));
e2b44075 4986 return;
d50171e4
PA
4987 }
4988
7984d532
PA
4989 gdb_assert (lwp->suspended >= 0);
4990
d50171e4
PA
4991 if (lwp->suspended)
4992 {
4993 if (debug_threads)
d86d4aaf 4994 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 4995 return;
d50171e4
PA
4996 }
4997
1a981360
PA
4998 if (thread->last_resume_kind == resume_stop
4999 && lwp->pending_signals_to_report == NULL
229d26fc
SM
5000 && (lwp->collecting_fast_tracepoint
5001 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
5002 {
5003 /* We haven't reported this LWP as stopped yet (otherwise, the
5004 last_status.kind check above would catch it, and we wouldn't
5005 reach here. This LWP may have been momentarily paused by a
5006 stop_all_lwps call while handling for example, another LWP's
5007 step-over. In that case, the pending expected SIGSTOP signal
5008 that was queued at vCont;t handling time will have already
5009 been consumed by wait_for_sigstop, and so we need to requeue
5010 another one here. Note that if the LWP already has a SIGSTOP
5011 pending, this is a no-op. */
5012
5013 if (debug_threads)
87ce2a04
DE
5014 debug_printf ("Client wants LWP %ld to stop. "
5015 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5016 lwpid_of (thread));
02fc4de7
PA
5017
5018 send_sigstop (lwp);
5019 }
5020
863d01bd
PA
5021 if (thread->last_resume_kind == resume_step)
5022 {
5023 if (debug_threads)
5024 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5025 lwpid_of (thread));
8901d193 5026
3b9a79ef 5027 /* If resume_step is requested by GDB, install single-step
8901d193 5028 breakpoints when the thread is about to be actually resumed if
3b9a79ef 5029 the single-step breakpoints weren't removed. */
7582c77c 5030 if (supports_software_single_step ()
3b9a79ef 5031 && !has_single_step_breakpoints (thread))
8901d193
YQ
5032 install_software_single_step_breakpoints (lwp);
5033
5034 step = maybe_hw_step (thread);
863d01bd
PA
5035 }
5036 else if (lwp->bp_reinsert != 0)
5037 {
5038 if (debug_threads)
5039 debug_printf (" stepping LWP %ld, reinsert set\n",
5040 lwpid_of (thread));
f79b145d
YQ
5041
5042 step = maybe_hw_step (thread);
863d01bd
PA
5043 }
5044 else
5045 step = 0;
5046
df95181f 5047 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5048}
5049
df95181f
TBA
5050void
5051linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5052 lwp_info *except)
7984d532 5053{
d86d4aaf 5054 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5055
5056 if (lwp == except)
e2b44075 5057 return;
7984d532 5058
863d01bd 5059 lwp_suspended_decr (lwp);
7984d532 5060
e2b44075 5061 proceed_one_lwp (thread, except);
d50171e4
PA
5062}
5063
d16f3f6c
TBA
5064void
5065linux_process_target::proceed_all_lwps ()
d50171e4 5066{
d86d4aaf 5067 struct thread_info *need_step_over;
d50171e4
PA
5068
5069 /* If there is a thread which would otherwise be resumed, which is
5070 stopped at a breakpoint that needs stepping over, then don't
5071 resume any threads - have it step over the breakpoint with all
5072 other threads stopped, then resume all threads again. */
5073
bf9ae9d8 5074 if (low_supports_breakpoints ())
d50171e4 5075 {
df95181f
TBA
5076 need_step_over = find_thread ([this] (thread_info *thread)
5077 {
5078 return thread_needs_step_over (thread);
5079 });
d50171e4
PA
5080
5081 if (need_step_over != NULL)
5082 {
5083 if (debug_threads)
87ce2a04
DE
5084 debug_printf ("proceed_all_lwps: found "
5085 "thread %ld needing a step-over\n",
5086 lwpid_of (need_step_over));
d50171e4 5087
d86d4aaf 5088 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5089 return;
5090 }
5091 }
5544ad89 5092
d50171e4 5093 if (debug_threads)
87ce2a04 5094 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5095
df95181f 5096 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
5097 {
5098 proceed_one_lwp (thread, NULL);
5099 });
d50171e4
PA
5100}
5101
d16f3f6c
TBA
5102void
5103linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5104{
5544ad89
DJ
5105 if (debug_threads)
5106 {
87ce2a04 5107 debug_enter ();
d50171e4 5108 if (except)
87ce2a04 5109 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5110 lwpid_of (get_lwp_thread (except)));
5544ad89 5111 else
87ce2a04 5112 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5113 }
5114
7984d532 5115 if (unsuspend)
e2b44075
SM
5116 for_each_thread ([&] (thread_info *thread)
5117 {
5118 unsuspend_and_proceed_one_lwp (thread, except);
5119 });
7984d532 5120 else
e2b44075
SM
5121 for_each_thread ([&] (thread_info *thread)
5122 {
5123 proceed_one_lwp (thread, except);
5124 });
87ce2a04
DE
5125
5126 if (debug_threads)
5127 {
5128 debug_printf ("unstop_all_lwps done\n");
5129 debug_exit ();
5130 }
0d62e5e8
DJ
5131}
5132
58caa3dc
DJ
5133
5134#ifdef HAVE_LINUX_REGSETS
5135
1faeff08
MR
5136#define use_linux_regsets 1
5137
030031ee
PA
5138/* Returns true if REGSET has been disabled. */
5139
5140static int
5141regset_disabled (struct regsets_info *info, struct regset_info *regset)
5142{
5143 return (info->disabled_regsets != NULL
5144 && info->disabled_regsets[regset - info->regsets]);
5145}
5146
5147/* Disable REGSET. */
5148
5149static void
5150disable_regset (struct regsets_info *info, struct regset_info *regset)
5151{
5152 int dr_offset;
5153
5154 dr_offset = regset - info->regsets;
5155 if (info->disabled_regsets == NULL)
224c3ddb 5156 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5157 info->disabled_regsets[dr_offset] = 1;
5158}
5159
58caa3dc 5160static int
3aee8918
PA
5161regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5162 struct regcache *regcache)
58caa3dc
DJ
5163{
5164 struct regset_info *regset;
e9d25b98 5165 int saw_general_regs = 0;
95954743 5166 int pid;
1570b33e 5167 struct iovec iov;
58caa3dc 5168
0bfdf32f 5169 pid = lwpid_of (current_thread);
28eef672 5170 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5171 {
1570b33e
L
5172 void *buf, *data;
5173 int nt_type, res;
58caa3dc 5174
030031ee 5175 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5176 continue;
58caa3dc 5177
bca929d3 5178 buf = xmalloc (regset->size);
1570b33e
L
5179
5180 nt_type = regset->nt_type;
5181 if (nt_type)
5182 {
5183 iov.iov_base = buf;
5184 iov.iov_len = regset->size;
5185 data = (void *) &iov;
5186 }
5187 else
5188 data = buf;
5189
dfb64f85 5190#ifndef __sparc__
f15f9948 5191 res = ptrace (regset->get_request, pid,
b8e1b30e 5192 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5193#else
1570b33e 5194 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5195#endif
58caa3dc
DJ
5196 if (res < 0)
5197 {
1ef53e6b
AH
5198 if (errno == EIO
5199 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5200 {
1ef53e6b
AH
5201 /* If we get EIO on a regset, or an EINVAL and the regset is
5202 optional, do not try it again for this process mode. */
030031ee 5203 disable_regset (regsets_info, regset);
58caa3dc 5204 }
e5a9158d
AA
5205 else if (errno == ENODATA)
5206 {
5207 /* ENODATA may be returned if the regset is currently
5208 not "active". This can happen in normal operation,
5209 so suppress the warning in this case. */
5210 }
fcd4a73d
YQ
5211 else if (errno == ESRCH)
5212 {
5213 /* At this point, ESRCH should mean the process is
5214 already gone, in which case we simply ignore attempts
5215 to read its registers. */
5216 }
58caa3dc
DJ
5217 else
5218 {
0d62e5e8 5219 char s[256];
95954743
PA
5220 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5221 pid);
0d62e5e8 5222 perror (s);
58caa3dc
DJ
5223 }
5224 }
098dbe61
AA
5225 else
5226 {
5227 if (regset->type == GENERAL_REGS)
5228 saw_general_regs = 1;
5229 regset->store_function (regcache, buf);
5230 }
fdeb2a12 5231 free (buf);
58caa3dc 5232 }
e9d25b98
DJ
5233 if (saw_general_regs)
5234 return 0;
5235 else
5236 return 1;
58caa3dc
DJ
5237}
5238
5239static int
3aee8918
PA
5240regsets_store_inferior_registers (struct regsets_info *regsets_info,
5241 struct regcache *regcache)
58caa3dc
DJ
5242{
5243 struct regset_info *regset;
e9d25b98 5244 int saw_general_regs = 0;
95954743 5245 int pid;
1570b33e 5246 struct iovec iov;
58caa3dc 5247
0bfdf32f 5248 pid = lwpid_of (current_thread);
28eef672 5249 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5250 {
1570b33e
L
5251 void *buf, *data;
5252 int nt_type, res;
58caa3dc 5253
feea5f36
AA
5254 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5255 || regset->fill_function == NULL)
28eef672 5256 continue;
58caa3dc 5257
bca929d3 5258 buf = xmalloc (regset->size);
545587ee
DJ
5259
5260 /* First fill the buffer with the current register set contents,
5261 in case there are any items in the kernel's regset that are
5262 not in gdbserver's regcache. */
1570b33e
L
5263
5264 nt_type = regset->nt_type;
5265 if (nt_type)
5266 {
5267 iov.iov_base = buf;
5268 iov.iov_len = regset->size;
5269 data = (void *) &iov;
5270 }
5271 else
5272 data = buf;
5273
dfb64f85 5274#ifndef __sparc__
f15f9948 5275 res = ptrace (regset->get_request, pid,
b8e1b30e 5276 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5277#else
689cc2ae 5278 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5279#endif
545587ee
DJ
5280
5281 if (res == 0)
5282 {
5283 /* Then overlay our cached registers on that. */
442ea881 5284 regset->fill_function (regcache, buf);
545587ee
DJ
5285
5286 /* Only now do we write the register set. */
dfb64f85 5287#ifndef __sparc__
f15f9948 5288 res = ptrace (regset->set_request, pid,
b8e1b30e 5289 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5290#else
1570b33e 5291 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5292#endif
545587ee
DJ
5293 }
5294
58caa3dc
DJ
5295 if (res < 0)
5296 {
1ef53e6b
AH
5297 if (errno == EIO
5298 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5299 {
1ef53e6b
AH
5300 /* If we get EIO on a regset, or an EINVAL and the regset is
5301 optional, do not try it again for this process mode. */
030031ee 5302 disable_regset (regsets_info, regset);
58caa3dc 5303 }
3221518c
UW
5304 else if (errno == ESRCH)
5305 {
1b3f6016
PA
5306 /* At this point, ESRCH should mean the process is
5307 already gone, in which case we simply ignore attempts
5308 to change its registers. See also the related
df95181f 5309 comment in resume_one_lwp. */
fdeb2a12 5310 free (buf);
3221518c
UW
5311 return 0;
5312 }
58caa3dc
DJ
5313 else
5314 {
ce3a066d 5315 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5316 }
5317 }
e9d25b98
DJ
5318 else if (regset->type == GENERAL_REGS)
5319 saw_general_regs = 1;
09ec9b38 5320 free (buf);
58caa3dc 5321 }
e9d25b98
DJ
5322 if (saw_general_regs)
5323 return 0;
5324 else
5325 return 1;
58caa3dc
DJ
5326}
5327
1faeff08 5328#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5329
1faeff08 5330#define use_linux_regsets 0
3aee8918
PA
5331#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5332#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5333
58caa3dc 5334#endif
1faeff08
MR
5335
5336/* Return 1 if register REGNO is supported by one of the regset ptrace
5337 calls or 0 if it has to be transferred individually. */
5338
5339static int
3aee8918 5340linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5341{
5342 unsigned char mask = 1 << (regno % 8);
5343 size_t index = regno / 8;
5344
5345 return (use_linux_regsets
3aee8918
PA
5346 && (regs_info->regset_bitmap == NULL
5347 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5348}
5349
58caa3dc 5350#ifdef HAVE_LINUX_USRREGS
1faeff08 5351
5b3da067 5352static int
3aee8918 5353register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5354{
5355 int addr;
5356
3aee8918 5357 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5358 error ("Invalid register number %d.", regnum);
5359
3aee8918 5360 addr = usrregs->regmap[regnum];
1faeff08
MR
5361
5362 return addr;
5363}
5364
daca57a7
TBA
5365
5366void
5367linux_process_target::fetch_register (const usrregs_info *usrregs,
5368 regcache *regcache, int regno)
1faeff08
MR
5369{
5370 CORE_ADDR regaddr;
5371 int i, size;
5372 char *buf;
5373 int pid;
5374
3aee8918 5375 if (regno >= usrregs->num_regs)
1faeff08 5376 return;
daca57a7 5377 if (low_cannot_fetch_register (regno))
1faeff08
MR
5378 return;
5379
3aee8918 5380 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5381 if (regaddr == -1)
5382 return;
5383
3aee8918
PA
5384 size = ((register_size (regcache->tdesc, regno)
5385 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5386 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5387 buf = (char *) alloca (size);
1faeff08 5388
0bfdf32f 5389 pid = lwpid_of (current_thread);
1faeff08
MR
5390 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5391 {
5392 errno = 0;
5393 *(PTRACE_XFER_TYPE *) (buf + i) =
5394 ptrace (PTRACE_PEEKUSER, pid,
5395 /* Coerce to a uintptr_t first to avoid potential gcc warning
5396 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5397 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5398 regaddr += sizeof (PTRACE_XFER_TYPE);
5399 if (errno != 0)
9a70f35c
YQ
5400 {
5401 /* Mark register REGNO unavailable. */
5402 supply_register (regcache, regno, NULL);
5403 return;
5404 }
1faeff08
MR
5405 }
5406
b35db733 5407 low_supply_ptrace_register (regcache, regno, buf);
1faeff08
MR
5408}
5409
daca57a7
TBA
5410void
5411linux_process_target::store_register (const usrregs_info *usrregs,
5412 regcache *regcache, int regno)
1faeff08
MR
5413{
5414 CORE_ADDR regaddr;
5415 int i, size;
5416 char *buf;
5417 int pid;
5418
3aee8918 5419 if (regno >= usrregs->num_regs)
1faeff08 5420 return;
daca57a7 5421 if (low_cannot_store_register (regno))
1faeff08
MR
5422 return;
5423
3aee8918 5424 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5425 if (regaddr == -1)
5426 return;
5427
3aee8918
PA
5428 size = ((register_size (regcache->tdesc, regno)
5429 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5430 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5431 buf = (char *) alloca (size);
1faeff08
MR
5432 memset (buf, 0, size);
5433
b35db733 5434 low_collect_ptrace_register (regcache, regno, buf);
1faeff08 5435
0bfdf32f 5436 pid = lwpid_of (current_thread);
1faeff08
MR
5437 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5438 {
5439 errno = 0;
5440 ptrace (PTRACE_POKEUSER, pid,
5441 /* Coerce to a uintptr_t first to avoid potential gcc warning
5442 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5443 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5444 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5445 if (errno != 0)
5446 {
5447 /* At this point, ESRCH should mean the process is
5448 already gone, in which case we simply ignore attempts
5449 to change its registers. See also the related
df95181f 5450 comment in resume_one_lwp. */
1faeff08
MR
5451 if (errno == ESRCH)
5452 return;
5453
daca57a7
TBA
5454
5455 if (!low_cannot_store_register (regno))
6d91ce9a 5456 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5457 }
5458 regaddr += sizeof (PTRACE_XFER_TYPE);
5459 }
5460}
daca57a7 5461#endif /* HAVE_LINUX_USRREGS */
1faeff08 5462
b35db733
TBA
5463void
5464linux_process_target::low_collect_ptrace_register (regcache *regcache,
5465 int regno, char *buf)
5466{
5467 collect_register (regcache, regno, buf);
5468}
5469
5470void
5471linux_process_target::low_supply_ptrace_register (regcache *regcache,
5472 int regno, const char *buf)
5473{
5474 supply_register (regcache, regno, buf);
5475}
5476
daca57a7
TBA
5477void
5478linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5479 regcache *regcache,
5480 int regno, int all)
1faeff08 5481{
daca57a7 5482#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5483 struct usrregs_info *usr = regs_info->usrregs;
5484
1faeff08
MR
5485 if (regno == -1)
5486 {
3aee8918
PA
5487 for (regno = 0; regno < usr->num_regs; regno++)
5488 if (all || !linux_register_in_regsets (regs_info, regno))
5489 fetch_register (usr, regcache, regno);
1faeff08
MR
5490 }
5491 else
3aee8918 5492 fetch_register (usr, regcache, regno);
daca57a7 5493#endif
1faeff08
MR
5494}
5495
daca57a7
TBA
5496void
5497linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5498 regcache *regcache,
5499 int regno, int all)
1faeff08 5500{
daca57a7 5501#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5502 struct usrregs_info *usr = regs_info->usrregs;
5503
1faeff08
MR
5504 if (regno == -1)
5505 {
3aee8918
PA
5506 for (regno = 0; regno < usr->num_regs; regno++)
5507 if (all || !linux_register_in_regsets (regs_info, regno))
5508 store_register (usr, regcache, regno);
1faeff08
MR
5509 }
5510 else
3aee8918 5511 store_register (usr, regcache, regno);
58caa3dc 5512#endif
daca57a7 5513}
1faeff08 5514
a5a4d4cd
TBA
5515void
5516linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5517{
5518 int use_regsets;
5519 int all = 0;
aa8d21c9 5520 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5521
5522 if (regno == -1)
5523 {
bd70b1f2 5524 if (regs_info->usrregs != NULL)
3aee8918 5525 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5526 low_fetch_register (regcache, regno);
c14dfd32 5527
3aee8918
PA
5528 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5529 if (regs_info->usrregs != NULL)
5530 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5531 }
5532 else
5533 {
bd70b1f2 5534 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5535 return;
5536
3aee8918 5537 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5538 if (use_regsets)
3aee8918
PA
5539 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5540 regcache);
5541 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5542 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5543 }
58caa3dc
DJ
5544}
5545
a5a4d4cd
TBA
5546void
5547linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5548{
1faeff08
MR
5549 int use_regsets;
5550 int all = 0;
aa8d21c9 5551 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5552
5553 if (regno == -1)
5554 {
3aee8918
PA
5555 all = regsets_store_inferior_registers (regs_info->regsets_info,
5556 regcache);
5557 if (regs_info->usrregs != NULL)
5558 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5559 }
5560 else
5561 {
3aee8918 5562 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5563 if (use_regsets)
3aee8918
PA
5564 all = regsets_store_inferior_registers (regs_info->regsets_info,
5565 regcache);
5566 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5567 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5568 }
58caa3dc
DJ
5569}
5570
bd70b1f2
TBA
5571bool
5572linux_process_target::low_fetch_register (regcache *regcache, int regno)
5573{
5574 return false;
5575}
da6d8c04 5576
e2558df3 5577/* A wrapper for the read_memory target op. */
da6d8c04 5578
c3e735a6 5579static int
f450004a 5580linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5581{
52405d85 5582 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5583}
5584
5585/* Copy LEN bytes from inferior's memory starting at MEMADDR
5586 to debugger memory starting at MYADDR. */
5587
5588int
5589linux_process_target::read_memory (CORE_ADDR memaddr,
5590 unsigned char *myaddr, int len)
da6d8c04 5591{
0bfdf32f 5592 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5593 PTRACE_XFER_TYPE *buffer;
5594 CORE_ADDR addr;
5595 int count;
4934b29e 5596 char filename[64];
ae3e2ccf 5597 int i;
4934b29e 5598 int ret;
fd462a61 5599 int fd;
fd462a61
DJ
5600
5601 /* Try using /proc. Don't bother for one word. */
5602 if (len >= 3 * sizeof (long))
5603 {
4934b29e
MR
5604 int bytes;
5605
fd462a61
DJ
5606 /* We could keep this file open and cache it - possibly one per
5607 thread. That requires some juggling, but is even faster. */
95954743 5608 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5609 fd = open (filename, O_RDONLY | O_LARGEFILE);
5610 if (fd == -1)
5611 goto no_proc;
5612
5613 /* If pread64 is available, use it. It's faster if the kernel
5614 supports it (only one syscall), and it's 64-bit safe even on
5615 32-bit platforms (for instance, SPARC debugging a SPARC64
5616 application). */
5617#ifdef HAVE_PREAD64
4934b29e 5618 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5619#else
4934b29e
MR
5620 bytes = -1;
5621 if (lseek (fd, memaddr, SEEK_SET) != -1)
5622 bytes = read (fd, myaddr, len);
fd462a61 5623#endif
fd462a61
DJ
5624
5625 close (fd);
4934b29e
MR
5626 if (bytes == len)
5627 return 0;
5628
5629 /* Some data was read, we'll try to get the rest with ptrace. */
5630 if (bytes > 0)
5631 {
5632 memaddr += bytes;
5633 myaddr += bytes;
5634 len -= bytes;
5635 }
fd462a61 5636 }
da6d8c04 5637
fd462a61 5638 no_proc:
4934b29e
MR
5639 /* Round starting address down to longword boundary. */
5640 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5641 /* Round ending address up; get number of longwords that makes. */
5642 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5643 / sizeof (PTRACE_XFER_TYPE));
5644 /* Allocate buffer of that many longwords. */
8d749320 5645 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5646
da6d8c04 5647 /* Read all the longwords */
4934b29e 5648 errno = 0;
da6d8c04
DJ
5649 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5650 {
14ce3065
DE
5651 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5652 about coercing an 8 byte integer to a 4 byte pointer. */
5653 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5654 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5655 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5656 if (errno)
4934b29e 5657 break;
da6d8c04 5658 }
4934b29e 5659 ret = errno;
da6d8c04
DJ
5660
5661 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5662 if (i > 0)
5663 {
5664 i *= sizeof (PTRACE_XFER_TYPE);
5665 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5666 memcpy (myaddr,
5667 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5668 i < len ? i : len);
5669 }
c3e735a6 5670
4934b29e 5671 return ret;
da6d8c04
DJ
5672}
5673
93ae6fdc
PA
5674/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5675 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5676 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5677
e2558df3
TBA
5678int
5679linux_process_target::write_memory (CORE_ADDR memaddr,
5680 const unsigned char *myaddr, int len)
da6d8c04 5681{
ae3e2ccf 5682 int i;
da6d8c04 5683 /* Round starting address down to longword boundary. */
ae3e2ccf 5684 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5685 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5686 int count
493e2a69
MS
5687 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5688 / sizeof (PTRACE_XFER_TYPE);
5689
da6d8c04 5690 /* Allocate buffer of that many longwords. */
ae3e2ccf 5691 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5692
0bfdf32f 5693 int pid = lwpid_of (current_thread);
da6d8c04 5694
f0ae6fc3
PA
5695 if (len == 0)
5696 {
5697 /* Zero length write always succeeds. */
5698 return 0;
5699 }
5700
0d62e5e8
DJ
5701 if (debug_threads)
5702 {
58d6951d 5703 /* Dump up to four bytes. */
bf47e248
PA
5704 char str[4 * 2 + 1];
5705 char *p = str;
5706 int dump = len < 4 ? len : 4;
5707
5708 for (i = 0; i < dump; i++)
5709 {
5710 sprintf (p, "%02x", myaddr[i]);
5711 p += 2;
5712 }
5713 *p = '\0';
5714
5715 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5716 str, (long) memaddr, pid);
0d62e5e8
DJ
5717 }
5718
da6d8c04
DJ
5719 /* Fill start and end extra bytes of buffer with existing memory data. */
5720
93ae6fdc 5721 errno = 0;
14ce3065
DE
5722 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5723 about coercing an 8 byte integer to a 4 byte pointer. */
5724 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5725 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5726 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5727 if (errno)
5728 return errno;
da6d8c04
DJ
5729
5730 if (count > 1)
5731 {
93ae6fdc 5732 errno = 0;
da6d8c04 5733 buffer[count - 1]
95954743 5734 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5735 /* Coerce to a uintptr_t first to avoid potential gcc warning
5736 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5737 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5738 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5739 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5740 if (errno)
5741 return errno;
da6d8c04
DJ
5742 }
5743
93ae6fdc 5744 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5745
493e2a69
MS
5746 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5747 myaddr, len);
da6d8c04
DJ
5748
5749 /* Write the entire buffer. */
5750
5751 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5752 {
5753 errno = 0;
14ce3065
DE
5754 ptrace (PTRACE_POKETEXT, pid,
5755 /* Coerce to a uintptr_t first to avoid potential gcc warning
5756 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5757 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5758 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5759 if (errno)
5760 return errno;
5761 }
5762
5763 return 0;
5764}
2f2893d9 5765
2a31c7aa
TBA
5766void
5767linux_process_target::look_up_symbols ()
2f2893d9 5768{
0d62e5e8 5769#ifdef USE_THREAD_DB
95954743
PA
5770 struct process_info *proc = current_process ();
5771
fe978cb0 5772 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5773 return;
5774
9b4c5f87 5775 thread_db_init ();
0d62e5e8
DJ
5776#endif
5777}
5778
eb497a2a
TBA
5779void
5780linux_process_target::request_interrupt ()
e5379b03 5781{
78708b7c
PA
5782 /* Send a SIGINT to the process group. This acts just like the user
5783 typed a ^C on the controlling terminal. */
eb497a2a 5784 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5785}
5786
eac215cc
TBA
5787bool
5788linux_process_target::supports_read_auxv ()
5789{
5790 return true;
5791}
5792
aa691b87
RM
5793/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5794 to debugger memory starting at MYADDR. */
5795
eac215cc
TBA
5796int
5797linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5798 unsigned int len)
aa691b87
RM
5799{
5800 char filename[PATH_MAX];
5801 int fd, n;
0bfdf32f 5802 int pid = lwpid_of (current_thread);
aa691b87 5803
6cebaf6e 5804 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5805
5806 fd = open (filename, O_RDONLY);
5807 if (fd < 0)
5808 return -1;
5809
5810 if (offset != (CORE_ADDR) 0
5811 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5812 n = -1;
5813 else
5814 n = read (fd, myaddr, len);
5815
5816 close (fd);
5817
5818 return n;
5819}
5820
7e0bde70
TBA
5821int
5822linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5823 int size, raw_breakpoint *bp)
e013ee27 5824{
c8f4bfdd
YQ
5825 if (type == raw_bkpt_type_sw)
5826 return insert_memory_breakpoint (bp);
e013ee27 5827 else
9db9aa23
TBA
5828 return low_insert_point (type, addr, size, bp);
5829}
5830
5831int
5832linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5833 int size, raw_breakpoint *bp)
5834{
5835 /* Unsupported (see target.h). */
5836 return 1;
e013ee27
OF
5837}
5838
7e0bde70
TBA
5839int
5840linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5841 int size, raw_breakpoint *bp)
e013ee27 5842{
c8f4bfdd
YQ
5843 if (type == raw_bkpt_type_sw)
5844 return remove_memory_breakpoint (bp);
e013ee27 5845 else
9db9aa23
TBA
5846 return low_remove_point (type, addr, size, bp);
5847}
5848
5849int
5850linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5851 int size, raw_breakpoint *bp)
5852{
5853 /* Unsupported (see target.h). */
5854 return 1;
e013ee27
OF
5855}
5856
84320c4e 5857/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5858 method. */
5859
84320c4e
TBA
5860bool
5861linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5862{
5863 struct lwp_info *lwp = get_thread_lwp (current_thread);
5864
5865 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5866}
5867
84320c4e 5868/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5869 method. */
5870
84320c4e
TBA
5871bool
5872linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5873{
5874 return USE_SIGTRAP_SIGINFO;
5875}
5876
93fe88b2 5877/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5878 method. */
5879
93fe88b2
TBA
5880bool
5881linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5882{
5883 struct lwp_info *lwp = get_thread_lwp (current_thread);
5884
5885 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5886}
5887
93fe88b2 5888/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5889 method. */
5890
93fe88b2
TBA
5891bool
5892linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5893{
5894 return USE_SIGTRAP_SIGINFO;
5895}
5896
70b90b91 5897/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5898
22aa6223
TBA
5899bool
5900linux_process_target::supports_hardware_single_step ()
45614f15 5901{
45614f15
YQ
5902 return can_hardware_single_step ();
5903}
5904
6eeb5c55
TBA
5905bool
5906linux_process_target::stopped_by_watchpoint ()
e013ee27 5907{
0bfdf32f 5908 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5909
15c66dd6 5910 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5911}
5912
6eeb5c55
TBA
5913CORE_ADDR
5914linux_process_target::stopped_data_address ()
e013ee27 5915{
0bfdf32f 5916 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5917
5918 return lwp->stopped_data_address;
e013ee27
OF
5919}
5920
db0dfaa0
LM
5921/* This is only used for targets that define PT_TEXT_ADDR,
5922 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5923 the target has different ways of acquiring this information, like
5924 loadmaps. */
52fb6437 5925
5203ae1e
TBA
5926bool
5927linux_process_target::supports_read_offsets ()
5928{
5929#ifdef SUPPORTS_READ_OFFSETS
5930 return true;
5931#else
5932 return false;
5933#endif
5934}
5935
52fb6437
NS
5936/* Under uClinux, programs are loaded at non-zero offsets, which we need
5937 to tell gdb about. */
5938
5203ae1e
TBA
5939int
5940linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5941{
5203ae1e 5942#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5943 unsigned long text, text_end, data;
62828379 5944 int pid = lwpid_of (current_thread);
52fb6437
NS
5945
5946 errno = 0;
5947
b8e1b30e
LM
5948 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5949 (PTRACE_TYPE_ARG4) 0);
5950 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5951 (PTRACE_TYPE_ARG4) 0);
5952 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5953 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5954
5955 if (errno == 0)
5956 {
5957 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5958 used by gdb) are relative to the beginning of the program,
5959 with the data segment immediately following the text segment.
5960 However, the actual runtime layout in memory may put the data
5961 somewhere else, so when we send gdb a data base-address, we
5962 use the real data base address and subtract the compile-time
5963 data base-address from it (which is just the length of the
5964 text segment). BSS immediately follows data in both
5965 cases. */
52fb6437
NS
5966 *text_p = text;
5967 *data_p = data - (text_end - text);
1b3f6016 5968
52fb6437
NS
5969 return 1;
5970 }
5203ae1e
TBA
5971 return 0;
5972#else
5973 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5974#endif
5203ae1e 5975}
52fb6437 5976
6e3fd7e9
TBA
5977bool
5978linux_process_target::supports_get_tls_address ()
5979{
5980#ifdef USE_THREAD_DB
5981 return true;
5982#else
5983 return false;
5984#endif
5985}
5986
5987int
5988linux_process_target::get_tls_address (thread_info *thread,
5989 CORE_ADDR offset,
5990 CORE_ADDR load_module,
5991 CORE_ADDR *address)
5992{
5993#ifdef USE_THREAD_DB
5994 return thread_db_get_tls_address (thread, offset, load_module, address);
5995#else
5996 return -1;
5997#endif
5998}
5999
2d0795ee
TBA
6000bool
6001linux_process_target::supports_qxfer_osdata ()
6002{
6003 return true;
6004}
6005
6006int
6007linux_process_target::qxfer_osdata (const char *annex,
6008 unsigned char *readbuf,
6009 unsigned const char *writebuf,
6010 CORE_ADDR offset, int len)
07e059b5 6011{
d26e3629 6012 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6013}
6014
cb63de7c
TBA
6015void
6016linux_process_target::siginfo_fixup (siginfo_t *siginfo,
6017 gdb_byte *inf_siginfo, int direction)
d0722149 6018{
cb63de7c 6019 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
d0722149
DE
6020
6021 /* If there was no callback, or the callback didn't do anything,
6022 then just do a straight memcpy. */
6023 if (!done)
6024 {
6025 if (direction == 1)
a5362b9a 6026 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6027 else
a5362b9a 6028 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6029 }
6030}
6031
cb63de7c
TBA
6032bool
6033linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
6034 int direction)
6035{
6036 return false;
6037}
6038
d7abedf7
TBA
6039bool
6040linux_process_target::supports_qxfer_siginfo ()
6041{
6042 return true;
6043}
6044
6045int
6046linux_process_target::qxfer_siginfo (const char *annex,
6047 unsigned char *readbuf,
6048 unsigned const char *writebuf,
6049 CORE_ADDR offset, int len)
4aa995e1 6050{
d0722149 6051 int pid;
a5362b9a 6052 siginfo_t siginfo;
8adce034 6053 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6054
0bfdf32f 6055 if (current_thread == NULL)
4aa995e1
PA
6056 return -1;
6057
0bfdf32f 6058 pid = lwpid_of (current_thread);
4aa995e1
PA
6059
6060 if (debug_threads)
87ce2a04
DE
6061 debug_printf ("%s siginfo for lwp %d.\n",
6062 readbuf != NULL ? "Reading" : "Writing",
6063 pid);
4aa995e1 6064
0adea5f7 6065 if (offset >= sizeof (siginfo))
4aa995e1
PA
6066 return -1;
6067
b8e1b30e 6068 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6069 return -1;
6070
d0722149
DE
6071 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6072 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6073 inferior with a 64-bit GDBSERVER should look the same as debugging it
6074 with a 32-bit GDBSERVER, we need to convert it. */
6075 siginfo_fixup (&siginfo, inf_siginfo, 0);
6076
4aa995e1
PA
6077 if (offset + len > sizeof (siginfo))
6078 len = sizeof (siginfo) - offset;
6079
6080 if (readbuf != NULL)
d0722149 6081 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6082 else
6083 {
d0722149
DE
6084 memcpy (inf_siginfo + offset, writebuf, len);
6085
6086 /* Convert back to ptrace layout before flushing it out. */
6087 siginfo_fixup (&siginfo, inf_siginfo, 1);
6088
b8e1b30e 6089 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6090 return -1;
6091 }
6092
6093 return len;
6094}
6095
bd99dc85
PA
6096/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6097 so we notice when children change state; as the handler for the
6098 sigsuspend in my_waitpid. */
6099
6100static void
6101sigchld_handler (int signo)
6102{
6103 int old_errno = errno;
6104
6105 if (debug_threads)
e581f2b4
PA
6106 {
6107 do
6108 {
a7e559cc
AH
6109 /* Use the async signal safe debug function. */
6110 if (debug_write ("sigchld_handler\n",
6111 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6112 break; /* just ignore */
6113 } while (0);
6114 }
bd99dc85
PA
6115
6116 if (target_is_async_p ())
6117 async_file_mark (); /* trigger a linux_wait */
6118
6119 errno = old_errno;
6120}
6121
0dc587d4
TBA
6122bool
6123linux_process_target::supports_non_stop ()
bd99dc85 6124{
0dc587d4 6125 return true;
bd99dc85
PA
6126}
6127
0dc587d4
TBA
6128bool
6129linux_process_target::async (bool enable)
bd99dc85 6130{
0dc587d4 6131 bool previous = target_is_async_p ();
bd99dc85 6132
8336d594 6133 if (debug_threads)
87ce2a04
DE
6134 debug_printf ("linux_async (%d), previous=%d\n",
6135 enable, previous);
8336d594 6136
bd99dc85
PA
6137 if (previous != enable)
6138 {
6139 sigset_t mask;
6140 sigemptyset (&mask);
6141 sigaddset (&mask, SIGCHLD);
6142
21987b9c 6143 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6144
6145 if (enable)
6146 {
6147 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6148 {
6149 linux_event_pipe[0] = -1;
6150 linux_event_pipe[1] = -1;
21987b9c 6151 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6152
6153 warning ("creating event pipe failed.");
6154 return previous;
6155 }
bd99dc85
PA
6156
6157 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6158 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6159
6160 /* Register the event loop handler. */
6161 add_file_handler (linux_event_pipe[0],
6162 handle_target_event, NULL);
6163
6164 /* Always trigger a linux_wait. */
6165 async_file_mark ();
6166 }
6167 else
6168 {
6169 delete_file_handler (linux_event_pipe[0]);
6170
6171 close (linux_event_pipe[0]);
6172 close (linux_event_pipe[1]);
6173 linux_event_pipe[0] = -1;
6174 linux_event_pipe[1] = -1;
6175 }
6176
21987b9c 6177 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6178 }
6179
6180 return previous;
6181}
6182
0dc587d4
TBA
6183int
6184linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6185{
6186 /* Register or unregister from event-loop accordingly. */
0dc587d4 6187 target_async (nonstop);
aa96c426 6188
0dc587d4 6189 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6190 return -1;
6191
bd99dc85
PA
6192 return 0;
6193}
6194
652aef77
TBA
6195bool
6196linux_process_target::supports_multi_process ()
cf8fd78b 6197{
652aef77 6198 return true;
cf8fd78b
PA
6199}
6200
89245bc0
DB
6201/* Check if fork events are supported. */
6202
9690a72a
TBA
6203bool
6204linux_process_target::supports_fork_events ()
89245bc0
DB
6205{
6206 return linux_supports_tracefork ();
6207}
6208
6209/* Check if vfork events are supported. */
6210
9690a72a
TBA
6211bool
6212linux_process_target::supports_vfork_events ()
89245bc0
DB
6213{
6214 return linux_supports_tracefork ();
6215}
6216
94585166
DB
6217/* Check if exec events are supported. */
6218
9690a72a
TBA
6219bool
6220linux_process_target::supports_exec_events ()
94585166
DB
6221{
6222 return linux_supports_traceexec ();
6223}
6224
de0d863e
DB
6225/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6226 ptrace flags for all inferiors. This is in case the new GDB connection
6227 doesn't support the same set of events that the previous one did. */
6228
fb00dfce
TBA
6229void
6230linux_process_target::handle_new_gdb_connection ()
de0d863e 6231{
de0d863e 6232 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6233 for_each_thread ([] (thread_info *thread)
6234 {
6235 struct lwp_info *lwp = get_thread_lwp (thread);
6236
6237 if (!lwp->stopped)
6238 {
6239 /* Stop the lwp so we can modify its ptrace options. */
6240 lwp->must_set_ptrace_flags = 1;
6241 linux_stop_lwp (lwp);
6242 }
6243 else
6244 {
6245 /* Already stopped; go ahead and set the ptrace options. */
6246 struct process_info *proc = find_process_pid (pid_of (thread));
6247 int options = linux_low_ptrace_options (proc->attached);
6248
6249 linux_enable_event_reporting (lwpid_of (thread), options);
6250 lwp->must_set_ptrace_flags = 0;
6251 }
6252 });
de0d863e
DB
6253}
6254
55cf3021
TBA
6255int
6256linux_process_target::handle_monitor_command (char *mon)
6257{
6258#ifdef USE_THREAD_DB
6259 return thread_db_handle_monitor_command (mon);
6260#else
6261 return 0;
6262#endif
6263}
6264
95a45fc1
TBA
6265int
6266linux_process_target::core_of_thread (ptid_t ptid)
6267{
6268 return linux_common_core_of_thread (ptid);
6269}
6270
c756403b
TBA
6271bool
6272linux_process_target::supports_disable_randomization ()
03583c20
UW
6273{
6274#ifdef HAVE_PERSONALITY
c756403b 6275 return true;
03583c20 6276#else
c756403b 6277 return false;
03583c20
UW
6278#endif
6279}
efcbbd14 6280
c0245cb9
TBA
6281bool
6282linux_process_target::supports_agent ()
d1feda86 6283{
c0245cb9 6284 return true;
d1feda86
YQ
6285}
6286
2526e0cd
TBA
6287bool
6288linux_process_target::supports_range_stepping ()
c2d6af84 6289{
7582c77c 6290 if (supports_software_single_step ())
2526e0cd 6291 return true;
c2d6af84 6292 if (*the_low_target.supports_range_stepping == NULL)
2526e0cd 6293 return false;
c2d6af84
PA
6294
6295 return (*the_low_target.supports_range_stepping) ();
6296}
6297
8247b823
TBA
6298bool
6299linux_process_target::supports_pid_to_exec_file ()
6300{
6301 return true;
6302}
6303
6304char *
6305linux_process_target::pid_to_exec_file (int pid)
6306{
6307 return linux_proc_pid_to_exec_file (pid);
6308}
6309
c9b7b804
TBA
6310bool
6311linux_process_target::supports_multifs ()
6312{
6313 return true;
6314}
6315
6316int
6317linux_process_target::multifs_open (int pid, const char *filename,
6318 int flags, mode_t mode)
6319{
6320 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6321}
6322
6323int
6324linux_process_target::multifs_unlink (int pid, const char *filename)
6325{
6326 return linux_mntns_unlink (pid, filename);
6327}
6328
6329ssize_t
6330linux_process_target::multifs_readlink (int pid, const char *filename,
6331 char *buf, size_t bufsiz)
6332{
6333 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6334}
6335
723b724b 6336#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6337struct target_loadseg
6338{
6339 /* Core address to which the segment is mapped. */
6340 Elf32_Addr addr;
6341 /* VMA recorded in the program header. */
6342 Elf32_Addr p_vaddr;
6343 /* Size of this segment in memory. */
6344 Elf32_Word p_memsz;
6345};
6346
723b724b 6347# if defined PT_GETDSBT
78d85199
YQ
6348struct target_loadmap
6349{
6350 /* Protocol version number, must be zero. */
6351 Elf32_Word version;
6352 /* Pointer to the DSBT table, its size, and the DSBT index. */
6353 unsigned *dsbt_table;
6354 unsigned dsbt_size, dsbt_index;
6355 /* Number of segments in this map. */
6356 Elf32_Word nsegs;
6357 /* The actual memory map. */
6358 struct target_loadseg segs[/*nsegs*/];
6359};
723b724b
MF
6360# define LINUX_LOADMAP PT_GETDSBT
6361# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6362# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6363# else
6364struct target_loadmap
6365{
6366 /* Protocol version number, must be zero. */
6367 Elf32_Half version;
6368 /* Number of segments in this map. */
6369 Elf32_Half nsegs;
6370 /* The actual memory map. */
6371 struct target_loadseg segs[/*nsegs*/];
6372};
6373# define LINUX_LOADMAP PTRACE_GETFDPIC
6374# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6375# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6376# endif
78d85199 6377
9da41fda
TBA
6378bool
6379linux_process_target::supports_read_loadmap ()
6380{
6381 return true;
6382}
6383
6384int
6385linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6386 unsigned char *myaddr, unsigned int len)
78d85199 6387{
0bfdf32f 6388 int pid = lwpid_of (current_thread);
78d85199
YQ
6389 int addr = -1;
6390 struct target_loadmap *data = NULL;
6391 unsigned int actual_length, copy_length;
6392
6393 if (strcmp (annex, "exec") == 0)
723b724b 6394 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6395 else if (strcmp (annex, "interp") == 0)
723b724b 6396 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6397 else
6398 return -1;
6399
723b724b 6400 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6401 return -1;
6402
6403 if (data == NULL)
6404 return -1;
6405
6406 actual_length = sizeof (struct target_loadmap)
6407 + sizeof (struct target_loadseg) * data->nsegs;
6408
6409 if (offset < 0 || offset > actual_length)
6410 return -1;
6411
6412 copy_length = actual_length - offset < len ? actual_length - offset : len;
6413 memcpy (myaddr, (char *) data + offset, copy_length);
6414 return copy_length;
6415}
723b724b 6416#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6417
bc8d3ae4
TBA
6418bool
6419linux_process_target::supports_catch_syscall ()
82075af2
JS
6420{
6421 return (the_low_target.get_syscall_trapinfo != NULL
6422 && linux_supports_tracesysgood ());
6423}
6424
d633e831
TBA
6425int
6426linux_process_target::get_ipa_tdesc_idx ()
ae91f625
MK
6427{
6428 if (the_low_target.get_ipa_tdesc_idx == NULL)
6429 return 0;
6430
6431 return (*the_low_target.get_ipa_tdesc_idx) ();
6432}
6433
770d8f6a
TBA
6434CORE_ADDR
6435linux_process_target::read_pc (regcache *regcache)
219f2f23 6436{
bf9ae9d8 6437 if (!low_supports_breakpoints ())
219f2f23
PA
6438 return 0;
6439
bf9ae9d8 6440 return low_get_pc (regcache);
219f2f23
PA
6441}
6442
770d8f6a
TBA
6443void
6444linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6445{
bf9ae9d8 6446 gdb_assert (low_supports_breakpoints ());
219f2f23 6447
bf9ae9d8 6448 low_set_pc (regcache, pc);
219f2f23
PA
6449}
6450
68119632
TBA
6451bool
6452linux_process_target::supports_thread_stopped ()
6453{
6454 return true;
6455}
6456
6457bool
6458linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6459{
6460 return get_thread_lwp (thread)->stopped;
6461}
6462
6463/* This exposes stop-all-threads functionality to other modules. */
6464
29e8dc09
TBA
6465void
6466linux_process_target::pause_all (bool freeze)
8336d594 6467{
7984d532
PA
6468 stop_all_lwps (freeze, NULL);
6469}
6470
6471/* This exposes unstop-all-threads functionality to other gdbserver
6472 modules. */
6473
29e8dc09
TBA
6474void
6475linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6476{
6477 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6478}
6479
79b44087
TBA
6480int
6481linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6482{
6483 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6484 running LWP. */
6485 if (non_stop)
29e8dc09 6486 target_pause_all (true);
90d74c30
PA
6487 return 0;
6488}
6489
79b44087
TBA
6490void
6491linux_process_target::done_accessing_memory ()
90d74c30
PA
6492{
6493 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6494 running LWP. */
6495 if (non_stop)
29e8dc09 6496 target_unpause_all (true);
90d74c30
PA
6497}
6498
2268b414
JK
6499/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6500
6501static int
6502get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6503 CORE_ADDR *phdr_memaddr, int *num_phdr)
6504{
6505 char filename[PATH_MAX];
6506 int fd;
6507 const int auxv_size = is_elf64
6508 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6509 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6510
6511 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6512
6513 fd = open (filename, O_RDONLY);
6514 if (fd < 0)
6515 return 1;
6516
6517 *phdr_memaddr = 0;
6518 *num_phdr = 0;
6519 while (read (fd, buf, auxv_size) == auxv_size
6520 && (*phdr_memaddr == 0 || *num_phdr == 0))
6521 {
6522 if (is_elf64)
6523 {
6524 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6525
6526 switch (aux->a_type)
6527 {
6528 case AT_PHDR:
6529 *phdr_memaddr = aux->a_un.a_val;
6530 break;
6531 case AT_PHNUM:
6532 *num_phdr = aux->a_un.a_val;
6533 break;
6534 }
6535 }
6536 else
6537 {
6538 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6539
6540 switch (aux->a_type)
6541 {
6542 case AT_PHDR:
6543 *phdr_memaddr = aux->a_un.a_val;
6544 break;
6545 case AT_PHNUM:
6546 *num_phdr = aux->a_un.a_val;
6547 break;
6548 }
6549 }
6550 }
6551
6552 close (fd);
6553
6554 if (*phdr_memaddr == 0 || *num_phdr == 0)
6555 {
6556 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6557 "phdr_memaddr = %ld, phdr_num = %d",
6558 (long) *phdr_memaddr, *num_phdr);
6559 return 2;
6560 }
6561
6562 return 0;
6563}
6564
6565/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6566
6567static CORE_ADDR
6568get_dynamic (const int pid, const int is_elf64)
6569{
6570 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6571 int num_phdr, i;
2268b414 6572 unsigned char *phdr_buf;
db1ff28b 6573 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6574
6575 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6576 return 0;
6577
6578 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6579 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6580
6581 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6582 return 0;
6583
6584 /* Compute relocation: it is expected to be 0 for "regular" executables,
6585 non-zero for PIE ones. */
6586 relocation = -1;
db1ff28b
JK
6587 for (i = 0; relocation == -1 && i < num_phdr; i++)
6588 if (is_elf64)
6589 {
6590 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6591
6592 if (p->p_type == PT_PHDR)
6593 relocation = phdr_memaddr - p->p_vaddr;
6594 }
6595 else
6596 {
6597 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6598
6599 if (p->p_type == PT_PHDR)
6600 relocation = phdr_memaddr - p->p_vaddr;
6601 }
6602
2268b414
JK
6603 if (relocation == -1)
6604 {
e237a7e2
JK
6605 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6606 any real world executables, including PIE executables, have always
6607 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6608 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6609 or present DT_DEBUG anyway (fpc binaries are statically linked).
6610
6611 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6612
6613 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6614
2268b414
JK
6615 return 0;
6616 }
6617
db1ff28b
JK
6618 for (i = 0; i < num_phdr; i++)
6619 {
6620 if (is_elf64)
6621 {
6622 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6623
6624 if (p->p_type == PT_DYNAMIC)
6625 return p->p_vaddr + relocation;
6626 }
6627 else
6628 {
6629 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6630
db1ff28b
JK
6631 if (p->p_type == PT_DYNAMIC)
6632 return p->p_vaddr + relocation;
6633 }
6634 }
2268b414
JK
6635
6636 return 0;
6637}
6638
6639/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6640 can be 0 if the inferior does not yet have the library list initialized.
6641 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6642 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6643
6644static CORE_ADDR
6645get_r_debug (const int pid, const int is_elf64)
6646{
6647 CORE_ADDR dynamic_memaddr;
6648 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6649 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6650 CORE_ADDR map = -1;
2268b414
JK
6651
6652 dynamic_memaddr = get_dynamic (pid, is_elf64);
6653 if (dynamic_memaddr == 0)
367ba2c2 6654 return map;
2268b414
JK
6655
6656 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6657 {
6658 if (is_elf64)
6659 {
6660 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6661#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6662 union
6663 {
6664 Elf64_Xword map;
6665 unsigned char buf[sizeof (Elf64_Xword)];
6666 }
6667 rld_map;
a738da3a
MF
6668#endif
6669#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6670 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6671 {
6672 if (linux_read_memory (dyn->d_un.d_val,
6673 rld_map.buf, sizeof (rld_map.buf)) == 0)
6674 return rld_map.map;
6675 else
6676 break;
6677 }
75f62ce7 6678#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6679#ifdef DT_MIPS_RLD_MAP_REL
6680 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6681 {
6682 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6683 rld_map.buf, sizeof (rld_map.buf)) == 0)
6684 return rld_map.map;
6685 else
6686 break;
6687 }
6688#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6689
367ba2c2
MR
6690 if (dyn->d_tag == DT_DEBUG && map == -1)
6691 map = dyn->d_un.d_val;
2268b414
JK
6692
6693 if (dyn->d_tag == DT_NULL)
6694 break;
6695 }
6696 else
6697 {
6698 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6699#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6700 union
6701 {
6702 Elf32_Word map;
6703 unsigned char buf[sizeof (Elf32_Word)];
6704 }
6705 rld_map;
a738da3a
MF
6706#endif
6707#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6708 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6709 {
6710 if (linux_read_memory (dyn->d_un.d_val,
6711 rld_map.buf, sizeof (rld_map.buf)) == 0)
6712 return rld_map.map;
6713 else
6714 break;
6715 }
75f62ce7 6716#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6717#ifdef DT_MIPS_RLD_MAP_REL
6718 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6719 {
6720 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6721 rld_map.buf, sizeof (rld_map.buf)) == 0)
6722 return rld_map.map;
6723 else
6724 break;
6725 }
6726#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6727
367ba2c2
MR
6728 if (dyn->d_tag == DT_DEBUG && map == -1)
6729 map = dyn->d_un.d_val;
2268b414
JK
6730
6731 if (dyn->d_tag == DT_NULL)
6732 break;
6733 }
6734
6735 dynamic_memaddr += dyn_size;
6736 }
6737
367ba2c2 6738 return map;
2268b414
JK
6739}
6740
6741/* Read one pointer from MEMADDR in the inferior. */
6742
6743static int
6744read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6745{
485f1ee4
PA
6746 int ret;
6747
6748 /* Go through a union so this works on either big or little endian
6749 hosts, when the inferior's pointer size is smaller than the size
6750 of CORE_ADDR. It is assumed the inferior's endianness is the
6751 same of the superior's. */
6752 union
6753 {
6754 CORE_ADDR core_addr;
6755 unsigned int ui;
6756 unsigned char uc;
6757 } addr;
6758
6759 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6760 if (ret == 0)
6761 {
6762 if (ptr_size == sizeof (CORE_ADDR))
6763 *ptr = addr.core_addr;
6764 else if (ptr_size == sizeof (unsigned int))
6765 *ptr = addr.ui;
6766 else
6767 gdb_assert_not_reached ("unhandled pointer size");
6768 }
6769 return ret;
2268b414
JK
6770}
6771
974387bb
TBA
6772bool
6773linux_process_target::supports_qxfer_libraries_svr4 ()
6774{
6775 return true;
6776}
6777
2268b414
JK
6778struct link_map_offsets
6779 {
6780 /* Offset and size of r_debug.r_version. */
6781 int r_version_offset;
6782
6783 /* Offset and size of r_debug.r_map. */
6784 int r_map_offset;
6785
6786 /* Offset to l_addr field in struct link_map. */
6787 int l_addr_offset;
6788
6789 /* Offset to l_name field in struct link_map. */
6790 int l_name_offset;
6791
6792 /* Offset to l_ld field in struct link_map. */
6793 int l_ld_offset;
6794
6795 /* Offset to l_next field in struct link_map. */
6796 int l_next_offset;
6797
6798 /* Offset to l_prev field in struct link_map. */
6799 int l_prev_offset;
6800 };
6801
fb723180 6802/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6803
974387bb
TBA
6804int
6805linux_process_target::qxfer_libraries_svr4 (const char *annex,
6806 unsigned char *readbuf,
6807 unsigned const char *writebuf,
6808 CORE_ADDR offset, int len)
2268b414 6809{
fe978cb0 6810 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6811 char filename[PATH_MAX];
6812 int pid, is_elf64;
6813
6814 static const struct link_map_offsets lmo_32bit_offsets =
6815 {
6816 0, /* r_version offset. */
6817 4, /* r_debug.r_map offset. */
6818 0, /* l_addr offset in link_map. */
6819 4, /* l_name offset in link_map. */
6820 8, /* l_ld offset in link_map. */
6821 12, /* l_next offset in link_map. */
6822 16 /* l_prev offset in link_map. */
6823 };
6824
6825 static const struct link_map_offsets lmo_64bit_offsets =
6826 {
6827 0, /* r_version offset. */
6828 8, /* r_debug.r_map offset. */
6829 0, /* l_addr offset in link_map. */
6830 8, /* l_name offset in link_map. */
6831 16, /* l_ld offset in link_map. */
6832 24, /* l_next offset in link_map. */
6833 32 /* l_prev offset in link_map. */
6834 };
6835 const struct link_map_offsets *lmo;
214d508e 6836 unsigned int machine;
b1fbec62
GB
6837 int ptr_size;
6838 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6839 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6840 int header_done = 0;
2268b414
JK
6841
6842 if (writebuf != NULL)
6843 return -2;
6844 if (readbuf == NULL)
6845 return -1;
6846
0bfdf32f 6847 pid = lwpid_of (current_thread);
2268b414 6848 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6849 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6850 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6851 ptr_size = is_elf64 ? 8 : 4;
2268b414 6852
b1fbec62
GB
6853 while (annex[0] != '\0')
6854 {
6855 const char *sep;
6856 CORE_ADDR *addrp;
da4ae14a 6857 int name_len;
2268b414 6858
b1fbec62
GB
6859 sep = strchr (annex, '=');
6860 if (sep == NULL)
6861 break;
0c5bf5a9 6862
da4ae14a
TT
6863 name_len = sep - annex;
6864 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6865 addrp = &lm_addr;
da4ae14a 6866 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6867 addrp = &lm_prev;
6868 else
6869 {
6870 annex = strchr (sep, ';');
6871 if (annex == NULL)
6872 break;
6873 annex++;
6874 continue;
6875 }
6876
6877 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6878 }
b1fbec62
GB
6879
6880 if (lm_addr == 0)
2268b414 6881 {
b1fbec62
GB
6882 int r_version = 0;
6883
6884 if (priv->r_debug == 0)
6885 priv->r_debug = get_r_debug (pid, is_elf64);
6886
6887 /* We failed to find DT_DEBUG. Such situation will not change
6888 for this inferior - do not retry it. Report it to GDB as
6889 E01, see for the reasons at the GDB solib-svr4.c side. */
6890 if (priv->r_debug == (CORE_ADDR) -1)
6891 return -1;
6892
6893 if (priv->r_debug != 0)
2268b414 6894 {
b1fbec62
GB
6895 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6896 (unsigned char *) &r_version,
6897 sizeof (r_version)) != 0
6898 || r_version != 1)
6899 {
6900 warning ("unexpected r_debug version %d", r_version);
6901 }
6902 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6903 &lm_addr, ptr_size) != 0)
6904 {
6905 warning ("unable to read r_map from 0x%lx",
6906 (long) priv->r_debug + lmo->r_map_offset);
6907 }
2268b414 6908 }
b1fbec62 6909 }
2268b414 6910
f6e8a41e 6911 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6912
6913 while (lm_addr
6914 && read_one_ptr (lm_addr + lmo->l_name_offset,
6915 &l_name, ptr_size) == 0
6916 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6917 &l_addr, ptr_size) == 0
6918 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6919 &l_ld, ptr_size) == 0
6920 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6921 &l_prev, ptr_size) == 0
6922 && read_one_ptr (lm_addr + lmo->l_next_offset,
6923 &l_next, ptr_size) == 0)
6924 {
6925 unsigned char libname[PATH_MAX];
6926
6927 if (lm_prev != l_prev)
2268b414 6928 {
b1fbec62
GB
6929 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6930 (long) lm_prev, (long) l_prev);
6931 break;
2268b414
JK
6932 }
6933
d878444c
JK
6934 /* Ignore the first entry even if it has valid name as the first entry
6935 corresponds to the main executable. The first entry should not be
6936 skipped if the dynamic loader was loaded late by a static executable
6937 (see solib-svr4.c parameter ignore_first). But in such case the main
6938 executable does not have PT_DYNAMIC present and this function already
6939 exited above due to failed get_r_debug. */
6940 if (lm_prev == 0)
f6e8a41e 6941 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
6942 else
6943 {
6944 /* Not checking for error because reading may stop before
6945 we've got PATH_MAX worth of characters. */
6946 libname[0] = '\0';
6947 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6948 libname[sizeof (libname) - 1] = '\0';
6949 if (libname[0] != '\0')
2268b414 6950 {
d878444c
JK
6951 if (!header_done)
6952 {
6953 /* Terminate `<library-list-svr4'. */
f6e8a41e 6954 document += '>';
d878444c
JK
6955 header_done = 1;
6956 }
2268b414 6957
e6a58aa8
SM
6958 string_appendf (document, "<library name=\"");
6959 xml_escape_text_append (&document, (char *) libname);
6960 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 6961 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
6962 (unsigned long) lm_addr, (unsigned long) l_addr,
6963 (unsigned long) l_ld);
d878444c 6964 }
0afae3cf 6965 }
b1fbec62
GB
6966
6967 lm_prev = lm_addr;
6968 lm_addr = l_next;
2268b414
JK
6969 }
6970
b1fbec62
GB
6971 if (!header_done)
6972 {
6973 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 6974 document += "/>";
b1fbec62
GB
6975 }
6976 else
f6e8a41e 6977 document += "</library-list-svr4>";
b1fbec62 6978
f6e8a41e 6979 int document_len = document.length ();
2268b414
JK
6980 if (offset < document_len)
6981 document_len -= offset;
6982 else
6983 document_len = 0;
6984 if (len > document_len)
6985 len = document_len;
6986
f6e8a41e 6987 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
6988
6989 return len;
6990}
6991
9accd112
MM
6992#ifdef HAVE_LINUX_BTRACE
6993
79597bdd
TBA
6994btrace_target_info *
6995linux_process_target::enable_btrace (ptid_t ptid,
6996 const btrace_config *conf)
6997{
6998 return linux_enable_btrace (ptid, conf);
6999}
7000
969c39fb 7001/* See to_disable_btrace target method. */
9accd112 7002
79597bdd
TBA
7003int
7004linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
7005{
7006 enum btrace_error err;
7007
7008 err = linux_disable_btrace (tinfo);
7009 return (err == BTRACE_ERR_NONE ? 0 : -1);
7010}
7011
bc504a31 7012/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7013
7014static void
7015linux_low_encode_pt_config (struct buffer *buffer,
7016 const struct btrace_data_pt_config *config)
7017{
7018 buffer_grow_str (buffer, "<pt-config>\n");
7019
7020 switch (config->cpu.vendor)
7021 {
7022 case CV_INTEL:
7023 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7024 "model=\"%u\" stepping=\"%u\"/>\n",
7025 config->cpu.family, config->cpu.model,
7026 config->cpu.stepping);
7027 break;
7028
7029 default:
7030 break;
7031 }
7032
7033 buffer_grow_str (buffer, "</pt-config>\n");
7034}
7035
7036/* Encode a raw buffer. */
7037
7038static void
7039linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7040 unsigned int size)
7041{
7042 if (size == 0)
7043 return;
7044
268a13a5 7045 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
7046 buffer_grow_str (buffer, "<raw>\n");
7047
7048 while (size-- > 0)
7049 {
7050 char elem[2];
7051
7052 elem[0] = tohex ((*data >> 4) & 0xf);
7053 elem[1] = tohex (*data++ & 0xf);
7054
7055 buffer_grow (buffer, elem, 2);
7056 }
7057
7058 buffer_grow_str (buffer, "</raw>\n");
7059}
7060
969c39fb
MM
7061/* See to_read_btrace target method. */
7062
79597bdd
TBA
7063int
7064linux_process_target::read_btrace (btrace_target_info *tinfo,
7065 buffer *buffer,
7066 enum btrace_read_type type)
9accd112 7067{
734b0e4b 7068 struct btrace_data btrace;
969c39fb 7069 enum btrace_error err;
9accd112 7070
969c39fb
MM
7071 err = linux_read_btrace (&btrace, tinfo, type);
7072 if (err != BTRACE_ERR_NONE)
7073 {
7074 if (err == BTRACE_ERR_OVERFLOW)
7075 buffer_grow_str0 (buffer, "E.Overflow.");
7076 else
7077 buffer_grow_str0 (buffer, "E.Generic Error.");
7078
8dcc53b3 7079 return -1;
969c39fb 7080 }
9accd112 7081
734b0e4b
MM
7082 switch (btrace.format)
7083 {
7084 case BTRACE_FORMAT_NONE:
7085 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7086 return -1;
734b0e4b
MM
7087
7088 case BTRACE_FORMAT_BTS:
7089 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7090 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7091
46f29a9a 7092 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7093 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7094 paddress (block.begin), paddress (block.end));
9accd112 7095
734b0e4b
MM
7096 buffer_grow_str0 (buffer, "</btrace>\n");
7097 break;
7098
b20a6524
MM
7099 case BTRACE_FORMAT_PT:
7100 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7101 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7102 buffer_grow_str (buffer, "<pt>\n");
7103
7104 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7105
b20a6524
MM
7106 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7107 btrace.variant.pt.size);
7108
7109 buffer_grow_str (buffer, "</pt>\n");
7110 buffer_grow_str0 (buffer, "</btrace>\n");
7111 break;
7112
7113 default:
7114 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7115 return -1;
734b0e4b 7116 }
969c39fb
MM
7117
7118 return 0;
9accd112 7119}
f4abbc16
MM
7120
7121/* See to_btrace_conf target method. */
7122
79597bdd
TBA
7123int
7124linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7125 buffer *buffer)
f4abbc16
MM
7126{
7127 const struct btrace_config *conf;
7128
7129 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7130 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7131
7132 conf = linux_btrace_conf (tinfo);
7133 if (conf != NULL)
7134 {
7135 switch (conf->format)
7136 {
7137 case BTRACE_FORMAT_NONE:
7138 break;
7139
7140 case BTRACE_FORMAT_BTS:
d33501a5
MM
7141 buffer_xml_printf (buffer, "<bts");
7142 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7143 buffer_xml_printf (buffer, " />\n");
f4abbc16 7144 break;
b20a6524
MM
7145
7146 case BTRACE_FORMAT_PT:
7147 buffer_xml_printf (buffer, "<pt");
7148 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7149 buffer_xml_printf (buffer, "/>\n");
7150 break;
f4abbc16
MM
7151 }
7152 }
7153
7154 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7155 return 0;
7156}
9accd112
MM
7157#endif /* HAVE_LINUX_BTRACE */
7158
7b669087
GB
7159/* See nat/linux-nat.h. */
7160
7161ptid_t
7162current_lwp_ptid (void)
7163{
7164 return ptid_of (current_thread);
7165}
7166
7f63b89b
TBA
7167const char *
7168linux_process_target::thread_name (ptid_t thread)
7169{
7170 return linux_proc_tid_get_name (thread);
7171}
7172
7173#if USE_THREAD_DB
7174bool
7175linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7176 int *handle_len)
7177{
7178 return thread_db_thread_handle (ptid, handle, handle_len);
7179}
7180#endif
7181
276d4552
YQ
7182/* Default implementation of linux_target_ops method "set_pc" for
7183 32-bit pc register which is literally named "pc". */
7184
7185void
7186linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7187{
7188 uint32_t newpc = pc;
7189
7190 supply_register_by_name (regcache, "pc", &newpc);
7191}
7192
7193/* Default implementation of linux_target_ops method "get_pc" for
7194 32-bit pc register which is literally named "pc". */
7195
7196CORE_ADDR
7197linux_get_pc_32bit (struct regcache *regcache)
7198{
7199 uint32_t pc;
7200
7201 collect_register_by_name (regcache, "pc", &pc);
7202 if (debug_threads)
7203 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7204 return pc;
7205}
7206
6f69e520
YQ
7207/* Default implementation of linux_target_ops method "set_pc" for
7208 64-bit pc register which is literally named "pc". */
7209
7210void
7211linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7212{
7213 uint64_t newpc = pc;
7214
7215 supply_register_by_name (regcache, "pc", &newpc);
7216}
7217
7218/* Default implementation of linux_target_ops method "get_pc" for
7219 64-bit pc register which is literally named "pc". */
7220
7221CORE_ADDR
7222linux_get_pc_64bit (struct regcache *regcache)
7223{
7224 uint64_t pc;
7225
7226 collect_register_by_name (regcache, "pc", &pc);
7227 if (debug_threads)
7228 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7229 return pc;
7230}
7231
0570503d 7232/* See linux-low.h. */
974c89e0 7233
0570503d
PFC
7234int
7235linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7236{
7237 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7238 int offset = 0;
7239
7240 gdb_assert (wordsize == 4 || wordsize == 8);
7241
52405d85 7242 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7243 {
7244 if (wordsize == 4)
7245 {
0570503d 7246 uint32_t *data_p = (uint32_t *) data;
974c89e0 7247 if (data_p[0] == match)
0570503d
PFC
7248 {
7249 *valp = data_p[1];
7250 return 1;
7251 }
974c89e0
AH
7252 }
7253 else
7254 {
0570503d 7255 uint64_t *data_p = (uint64_t *) data;
974c89e0 7256 if (data_p[0] == match)
0570503d
PFC
7257 {
7258 *valp = data_p[1];
7259 return 1;
7260 }
974c89e0
AH
7261 }
7262
7263 offset += 2 * wordsize;
7264 }
7265
7266 return 0;
7267}
7268
7269/* See linux-low.h. */
7270
7271CORE_ADDR
7272linux_get_hwcap (int wordsize)
7273{
0570503d
PFC
7274 CORE_ADDR hwcap = 0;
7275 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7276 return hwcap;
974c89e0
AH
7277}
7278
7279/* See linux-low.h. */
7280
7281CORE_ADDR
7282linux_get_hwcap2 (int wordsize)
7283{
0570503d
PFC
7284 CORE_ADDR hwcap2 = 0;
7285 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7286 return hwcap2;
974c89e0 7287}
6f69e520 7288
3aee8918
PA
7289#ifdef HAVE_LINUX_REGSETS
7290void
7291initialize_regsets_info (struct regsets_info *info)
7292{
7293 for (info->num_regsets = 0;
7294 info->regsets[info->num_regsets].size >= 0;
7295 info->num_regsets++)
7296 ;
3aee8918
PA
7297}
7298#endif
7299
da6d8c04
DJ
7300void
7301initialize_low (void)
7302{
bd99dc85 7303 struct sigaction sigchld_action;
dd373349 7304
bd99dc85 7305 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7306 set_target_ops (the_linux_target);
dd373349 7307
aa7c7447 7308 linux_ptrace_init_warnings ();
1b919490 7309 linux_proc_init_warnings ();
bd99dc85
PA
7310
7311 sigchld_action.sa_handler = sigchld_handler;
7312 sigemptyset (&sigchld_action.sa_mask);
7313 sigchld_action.sa_flags = SA_RESTART;
7314 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7315
7316 initialize_low_arch ();
89245bc0
DB
7317
7318 linux_check_ptrace_features ();
da6d8c04 7319}
This page took 2.09293 seconds and 4 git commands to generate.