gdbserver/linux-low: turn 'get_pc' and 'set_pc' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
b811d2c2 2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
268a13a5 22#include "gdbsupport/agent.h"
de0d863e 23#include "tdesc.h"
268a13a5
TT
24#include "gdbsupport/rsp-low.h"
25#include "gdbsupport/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
268a13a5 28#include "gdbsupport/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
268a13a5 46#include "gdbsupport/filestuff.h"
c144c7a0 47#include "tracepoint.h"
276d4552 48#include <inttypes.h>
268a13a5 49#include "gdbsupport/common-inferior.h"
2090129c 50#include "nat/fork-inferior.h"
268a13a5 51#include "gdbsupport/environ.h"
21987b9c 52#include "gdbsupport/gdb-sigmask.h"
268a13a5 53#include "gdbsupport/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14 62
03583c20
UW
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
fd462a61
DJ
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
1a981360 73
69f4c9cc
AH
74#ifndef AT_HWCAP2
75#define AT_HWCAP2 26
76#endif
77
db0dfaa0
LM
78/* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81#if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84#if defined(__mcoldfire__)
85/* These are still undefined in 3.10 kernels. */
86#define PT_TEXT_ADDR 49*4
87#define PT_DATA_ADDR 50*4
88#define PT_TEXT_END_ADDR 51*4
89/* BFIN already defines these since at least 2.6.32 kernels. */
90#elif defined(BFIN)
91#define PT_TEXT_ADDR 220
92#define PT_TEXT_END_ADDR 224
93#define PT_DATA_ADDR 228
94/* These are still undefined in 3.10 kernels. */
95#elif defined(__TMS320C6X__)
96#define PT_TEXT_ADDR (0x10000*4)
97#define PT_DATA_ADDR (0x10004*4)
98#define PT_TEXT_END_ADDR (0x10008*4)
99#endif
100#endif
101
5203ae1e
TBA
102#if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107#define SUPPORTS_READ_OFFSETS
108#endif
109
9accd112 110#ifdef HAVE_LINUX_BTRACE
125f8a3d 111# include "nat/linux-btrace.h"
268a13a5 112# include "gdbsupport/btrace-common.h"
9accd112
MM
113#endif
114
8365dcf5
TJB
115#ifndef HAVE_ELF32_AUXV_T
116/* Copied from glibc's elf.h. */
117typedef struct
118{
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127} Elf32_auxv_t;
128#endif
129
130#ifndef HAVE_ELF64_AUXV_T
131/* Copied from glibc's elf.h. */
132typedef struct
133{
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142} Elf64_auxv_t;
143#endif
144
ded48a5e
YQ
145/* Does the current host support PTRACE_GETREGSET? */
146int have_ptrace_getregset = -1;
147
cff068da
GB
148/* LWP accessors. */
149
150/* See nat/linux-nat.h. */
151
152ptid_t
153ptid_of_lwp (struct lwp_info *lwp)
154{
155 return ptid_of (get_lwp_thread (lwp));
156}
157
158/* See nat/linux-nat.h. */
159
4b134ca1
GB
160void
161lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163{
164 lwp->arch_private = info;
165}
166
167/* See nat/linux-nat.h. */
168
169struct arch_lwp_info *
170lwp_arch_private_info (struct lwp_info *lwp)
171{
172 return lwp->arch_private;
173}
174
175/* See nat/linux-nat.h. */
176
cff068da
GB
177int
178lwp_is_stopped (struct lwp_info *lwp)
179{
180 return lwp->stopped;
181}
182
183/* See nat/linux-nat.h. */
184
185enum target_stop_reason
186lwp_stop_reason (struct lwp_info *lwp)
187{
188 return lwp->stop_reason;
189}
190
0e00e962
AA
191/* See nat/linux-nat.h. */
192
193int
194lwp_is_stepping (struct lwp_info *lwp)
195{
196 return lwp->stepping;
197}
198
05044653
PA
199/* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
24a09b5f 202
05044653
PA
203struct simple_pid_list
204{
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
216/* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219static void
220add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221{
8d749320 222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228}
229
230static int
231pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232{
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246}
24a09b5f 247
bde24c0a
PA
248enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260/* This is set while stop_all_lwps is in effect. */
261enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
262
263/* FIXME make into a target method? */
24a09b5f 264int using_threads = 1;
24a09b5f 265
fa593d66
PA
266/* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268static int stabilizing_threads;
269
f50bf8e5 270static void unsuspend_all_lwps (struct lwp_info *except);
b3312d80 271static struct lwp_info *add_lwp (ptid_t ptid);
95954743 272static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 273static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 274static int finish_step_over (struct lwp_info *lwp);
d50171e4 275static int kill_lwp (unsigned long lwpid, int signo);
863d01bd 276static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
ece66d65 277static int linux_low_ptrace_options (int attached);
ced2dffb 278static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
d50171e4 279
582511be
PA
280/* When the event-loop is doing a step-over, this points at the thread
281 being stepped. */
282ptid_t step_over_bkpt;
283
7d00775e 284/* True if the low target can hardware single-step. */
d50171e4
PA
285
286static int
287can_hardware_single_step (void)
288{
7d00775e
AT
289 if (the_low_target.supports_hardware_single_step != NULL)
290 return the_low_target.supports_hardware_single_step ();
291 else
292 return 0;
293}
294
295/* True if the low target can software single-step. Such targets
fa5308bd 296 implement the GET_NEXT_PCS callback. */
7d00775e
AT
297
298static int
299can_software_single_step (void)
300{
fa5308bd 301 return (the_low_target.get_next_pcs != NULL);
d50171e4
PA
302}
303
bf9ae9d8
TBA
304bool
305linux_process_target::low_supports_breakpoints ()
306{
307 return false;
308}
d50171e4 309
bf9ae9d8
TBA
310CORE_ADDR
311linux_process_target::low_get_pc (regcache *regcache)
312{
313 return 0;
314}
315
316void
317linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
d50171e4 318{
bf9ae9d8 319 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
d50171e4 320}
0d62e5e8 321
fa593d66
PA
322/* Returns true if this target can support fast tracepoints. This
323 does not mean that the in-process agent has been loaded in the
324 inferior. */
325
326static int
327supports_fast_tracepoints (void)
328{
329 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
330}
331
c2d6af84
PA
332/* True if LWP is stopped in its stepping range. */
333
334static int
335lwp_in_step_range (struct lwp_info *lwp)
336{
337 CORE_ADDR pc = lwp->stop_pc;
338
339 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
340}
341
0d62e5e8
DJ
342struct pending_signals
343{
344 int signal;
32ca6d61 345 siginfo_t info;
0d62e5e8
DJ
346 struct pending_signals *prev;
347};
611cb4a5 348
bd99dc85
PA
349/* The read/write ends of the pipe registered as waitable file in the
350 event loop. */
351static int linux_event_pipe[2] = { -1, -1 };
352
353/* True if we're currently in async mode. */
354#define target_is_async_p() (linux_event_pipe[0] != -1)
355
02fc4de7 356static void send_sigstop (struct lwp_info *lwp);
bd99dc85 357
d0722149
DE
358/* Return non-zero if HEADER is a 64-bit ELF file. */
359
360static int
214d508e 361elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 362{
214d508e
L
363 if (header->e_ident[EI_MAG0] == ELFMAG0
364 && header->e_ident[EI_MAG1] == ELFMAG1
365 && header->e_ident[EI_MAG2] == ELFMAG2
366 && header->e_ident[EI_MAG3] == ELFMAG3)
367 {
368 *machine = header->e_machine;
369 return header->e_ident[EI_CLASS] == ELFCLASS64;
370
371 }
372 *machine = EM_NONE;
373 return -1;
d0722149
DE
374}
375
376/* Return non-zero if FILE is a 64-bit ELF file,
377 zero if the file is not a 64-bit ELF file,
378 and -1 if the file is not accessible or doesn't exist. */
379
be07f1a2 380static int
214d508e 381elf_64_file_p (const char *file, unsigned int *machine)
d0722149 382{
957f3f49 383 Elf64_Ehdr header;
d0722149
DE
384 int fd;
385
386 fd = open (file, O_RDONLY);
387 if (fd < 0)
388 return -1;
389
390 if (read (fd, &header, sizeof (header)) != sizeof (header))
391 {
392 close (fd);
393 return 0;
394 }
395 close (fd);
396
214d508e 397 return elf_64_header_p (&header, machine);
d0722149
DE
398}
399
be07f1a2
PA
400/* Accepts an integer PID; Returns true if the executable PID is
401 running is a 64-bit ELF file.. */
402
403int
214d508e 404linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 405{
d8d2a3ee 406 char file[PATH_MAX];
be07f1a2
PA
407
408 sprintf (file, "/proc/%d/exe", pid);
214d508e 409 return elf_64_file_p (file, machine);
be07f1a2
PA
410}
411
bd99dc85
PA
412static void
413delete_lwp (struct lwp_info *lwp)
414{
fa96cb38
PA
415 struct thread_info *thr = get_lwp_thread (lwp);
416
417 if (debug_threads)
418 debug_printf ("deleting %ld\n", lwpid_of (thr));
419
420 remove_thread (thr);
466eecee
SM
421
422 if (the_low_target.delete_thread != NULL)
423 the_low_target.delete_thread (lwp->arch_private);
424 else
425 gdb_assert (lwp->arch_private == NULL);
426
bd99dc85
PA
427 free (lwp);
428}
429
95954743
PA
430/* Add a process to the common process list, and set its private
431 data. */
432
433static struct process_info *
434linux_add_process (int pid, int attached)
435{
436 struct process_info *proc;
437
95954743 438 proc = add_process (pid, attached);
8d749320 439 proc->priv = XCNEW (struct process_info_private);
95954743 440
aa5ca48f 441 if (the_low_target.new_process != NULL)
fe978cb0 442 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 443
95954743
PA
444 return proc;
445}
446
797bcff5
TBA
447void
448linux_process_target::arch_setup_thread (thread_info *thread)
94585166
DB
449{
450 struct thread_info *saved_thread;
451
452 saved_thread = current_thread;
453 current_thread = thread;
454
797bcff5 455 low_arch_setup ();
94585166
DB
456
457 current_thread = saved_thread;
458}
459
d16f3f6c
TBA
460int
461linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
462 int wstat)
24a09b5f 463{
c12a5089 464 client_state &cs = get_client_state ();
94585166 465 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 466 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 467 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 468 struct lwp_info *new_lwp;
24a09b5f 469
65706a29
PA
470 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
471
82075af2
JS
472 /* All extended events we currently use are mid-syscall. Only
473 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
474 you have to be using PTRACE_SEIZE to get that. */
475 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
476
c269dbdb
DB
477 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
478 || (event == PTRACE_EVENT_CLONE))
24a09b5f 479 {
95954743 480 ptid_t ptid;
24a09b5f 481 unsigned long new_pid;
05044653 482 int ret, status;
24a09b5f 483
de0d863e 484 /* Get the pid of the new lwp. */
d86d4aaf 485 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 486 &new_pid);
24a09b5f
DJ
487
488 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 489 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
490 {
491 /* The new child has a pending SIGSTOP. We can't affect it until it
492 hits the SIGSTOP, but we're already attached. */
493
97438e3f 494 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
495
496 if (ret == -1)
497 perror_with_name ("waiting for new child");
498 else if (ret != new_pid)
499 warning ("wait returned unexpected PID %d", ret);
da5898ce 500 else if (!WIFSTOPPED (status))
24a09b5f
DJ
501 warning ("wait returned unexpected status 0x%x", status);
502 }
503
c269dbdb 504 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
505 {
506 struct process_info *parent_proc;
507 struct process_info *child_proc;
508 struct lwp_info *child_lwp;
bfacd19d 509 struct thread_info *child_thr;
de0d863e
DB
510 struct target_desc *tdesc;
511
fd79271b 512 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
513
514 if (debug_threads)
515 {
516 debug_printf ("HEW: Got fork event from LWP %ld, "
517 "new child is %d\n",
e38504b3 518 ptid_of (event_thr).lwp (),
e99b03dc 519 ptid.pid ());
de0d863e
DB
520 }
521
522 /* Add the new process to the tables and clone the breakpoint
523 lists of the parent. We need to do this even if the new process
524 will be detached, since we will need the process object and the
525 breakpoints to remove any breakpoints from memory when we
526 detach, and the client side will access registers. */
527 child_proc = linux_add_process (new_pid, 0);
528 gdb_assert (child_proc != NULL);
529 child_lwp = add_lwp (ptid);
530 gdb_assert (child_lwp != NULL);
531 child_lwp->stopped = 1;
bfacd19d
DB
532 child_lwp->must_set_ptrace_flags = 1;
533 child_lwp->status_pending_p = 0;
534 child_thr = get_lwp_thread (child_lwp);
535 child_thr->last_resume_kind = resume_stop;
998d452a
PA
536 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
537
863d01bd 538 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
539 too. If the fork/clone parent is stepping over a breakpoint,
540 all other threads have been suspended already. Leave the
541 child suspended too. */
542 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
543 || event_lwp->bp_reinsert != 0)
863d01bd
PA
544 {
545 if (debug_threads)
546 debug_printf ("HEW: leaving child suspended\n");
547 child_lwp->suspended = 1;
548 }
549
de0d863e
DB
550 parent_proc = get_thread_process (event_thr);
551 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
552
553 if (event_lwp->bp_reinsert != 0
554 && can_software_single_step ()
555 && event == PTRACE_EVENT_VFORK)
556 {
3b9a79ef
YQ
557 /* If we leave single-step breakpoints there, child will
558 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
559 (and child). Once vfork child is done, reinsert
560 them back to parent. */
3b9a79ef 561 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
562 }
563
63c40ec7 564 clone_all_breakpoints (child_thr, event_thr);
de0d863e 565
cc397f3a 566 tdesc = allocate_target_description ();
de0d863e
DB
567 copy_target_description (tdesc, parent_proc->tdesc);
568 child_proc->tdesc = tdesc;
de0d863e 569
3a8a0396
DB
570 /* Clone arch-specific process data. */
571 if (the_low_target.new_fork != NULL)
572 the_low_target.new_fork (parent_proc, child_proc);
573
de0d863e 574 /* Save fork info in the parent thread. */
c269dbdb
DB
575 if (event == PTRACE_EVENT_FORK)
576 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
577 else if (event == PTRACE_EVENT_VFORK)
578 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
579
de0d863e 580 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 581
de0d863e
DB
582 /* The status_pending field contains bits denoting the
583 extended event, so when the pending event is handled,
584 the handler will look at lwp->waitstatus. */
585 event_lwp->status_pending_p = 1;
586 event_lwp->status_pending = wstat;
587
5a04c4cf
PA
588 /* Link the threads until the parent event is passed on to
589 higher layers. */
590 event_lwp->fork_relative = child_lwp;
591 child_lwp->fork_relative = event_lwp;
592
3b9a79ef
YQ
593 /* If the parent thread is doing step-over with single-step
594 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
595 from the parent's. Remove them from the child process.
596 In case of vfork, we'll reinsert them back once vforked
597 child is done. */
8a81c5d7 598 if (event_lwp->bp_reinsert != 0
2e7b624b 599 && can_software_single_step ())
8a81c5d7 600 {
8a81c5d7
YQ
601 /* The child process is forked and stopped, so it is safe
602 to access its memory without stopping all other threads
603 from other processes. */
3b9a79ef 604 delete_single_step_breakpoints (child_thr);
8a81c5d7 605
3b9a79ef
YQ
606 gdb_assert (has_single_step_breakpoints (event_thr));
607 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
608 }
609
de0d863e
DB
610 /* Report the event. */
611 return 0;
612 }
613
fa96cb38
PA
614 if (debug_threads)
615 debug_printf ("HEW: Got clone event "
616 "from LWP %ld, new child is LWP %ld\n",
617 lwpid_of (event_thr), new_pid);
618
fd79271b 619 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 620 new_lwp = add_lwp (ptid);
24a09b5f 621
e27d73f6 622 /* Either we're going to immediately resume the new thread
df95181f 623 or leave it stopped. resume_one_lwp is a nop if it
e27d73f6 624 thinks the thread is currently running, so set this first
df95181f 625 before calling resume_one_lwp. */
e27d73f6
DE
626 new_lwp->stopped = 1;
627
0f8288ae
YQ
628 /* If we're suspending all threads, leave this one suspended
629 too. If the fork/clone parent is stepping over a breakpoint,
630 all other threads have been suspended already. Leave the
631 child suspended too. */
632 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
633 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
634 new_lwp->suspended = 1;
635
da5898ce
DJ
636 /* Normally we will get the pending SIGSTOP. But in some cases
637 we might get another signal delivered to the group first.
f21cc1a2 638 If we do get another signal, be sure not to lose it. */
20ba1ce6 639 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 640 {
54a0b537 641 new_lwp->stop_expected = 1;
20ba1ce6
PA
642 new_lwp->status_pending_p = 1;
643 new_lwp->status_pending = status;
da5898ce 644 }
c12a5089 645 else if (cs.report_thread_events)
65706a29
PA
646 {
647 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
648 new_lwp->status_pending_p = 1;
649 new_lwp->status_pending = status;
650 }
de0d863e 651
a0aad537 652#ifdef USE_THREAD_DB
94c207e0 653 thread_db_notice_clone (event_thr, ptid);
a0aad537 654#endif
86299109 655
de0d863e
DB
656 /* Don't report the event. */
657 return 1;
24a09b5f 658 }
c269dbdb
DB
659 else if (event == PTRACE_EVENT_VFORK_DONE)
660 {
661 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
662
2e7b624b
YQ
663 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
664 {
3b9a79ef 665 reinsert_single_step_breakpoints (event_thr);
2e7b624b 666
3b9a79ef 667 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
668 }
669
c269dbdb
DB
670 /* Report the event. */
671 return 0;
672 }
c12a5089 673 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
674 {
675 struct process_info *proc;
f27866ba 676 std::vector<int> syscalls_to_catch;
94585166
DB
677 ptid_t event_ptid;
678 pid_t event_pid;
679
680 if (debug_threads)
681 {
682 debug_printf ("HEW: Got exec event from LWP %ld\n",
683 lwpid_of (event_thr));
684 }
685
686 /* Get the event ptid. */
687 event_ptid = ptid_of (event_thr);
e99b03dc 688 event_pid = event_ptid.pid ();
94585166 689
82075af2 690 /* Save the syscall list from the execing process. */
94585166 691 proc = get_thread_process (event_thr);
f27866ba 692 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
693
694 /* Delete the execing process and all its threads. */
d16f3f6c 695 mourn (proc);
94585166
DB
696 current_thread = NULL;
697
698 /* Create a new process/lwp/thread. */
699 proc = linux_add_process (event_pid, 0);
700 event_lwp = add_lwp (event_ptid);
701 event_thr = get_lwp_thread (event_lwp);
702 gdb_assert (current_thread == event_thr);
797bcff5 703 arch_setup_thread (event_thr);
94585166
DB
704
705 /* Set the event status. */
706 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
707 event_lwp->waitstatus.value.execd_pathname
708 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
709
710 /* Mark the exec status as pending. */
711 event_lwp->stopped = 1;
712 event_lwp->status_pending_p = 1;
713 event_lwp->status_pending = wstat;
714 event_thr->last_resume_kind = resume_continue;
715 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
716
82075af2
JS
717 /* Update syscall state in the new lwp, effectively mid-syscall too. */
718 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
719
720 /* Restore the list to catch. Don't rely on the client, which is free
721 to avoid sending a new list when the architecture doesn't change.
722 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 723 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 724
94585166
DB
725 /* Report the event. */
726 *orig_event_lwp = event_lwp;
727 return 0;
728 }
de0d863e
DB
729
730 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
731}
732
df95181f
TBA
733CORE_ADDR
734linux_process_target::get_pc (lwp_info *lwp)
d50171e4 735{
0bfdf32f 736 struct thread_info *saved_thread;
d50171e4
PA
737 struct regcache *regcache;
738 CORE_ADDR pc;
739
bf9ae9d8 740 if (!low_supports_breakpoints ())
d50171e4
PA
741 return 0;
742
0bfdf32f
GB
743 saved_thread = current_thread;
744 current_thread = get_lwp_thread (lwp);
d50171e4 745
0bfdf32f 746 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 747 pc = low_get_pc (regcache);
d50171e4
PA
748
749 if (debug_threads)
87ce2a04 750 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 751
0bfdf32f 752 current_thread = saved_thread;
d50171e4
PA
753 return pc;
754}
755
82075af2 756/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 757 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
758
759static void
4cc32bec 760get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
761{
762 struct thread_info *saved_thread;
763 struct regcache *regcache;
764
765 if (the_low_target.get_syscall_trapinfo == NULL)
766 {
767 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 768 system call number. */
82075af2 769 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
770 return;
771 }
772
773 saved_thread = current_thread;
774 current_thread = get_lwp_thread (lwp);
775
776 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 777 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
778
779 if (debug_threads)
4cc32bec 780 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
781
782 current_thread = saved_thread;
783}
784
e7ad2f14 785static int check_stopped_by_watchpoint (struct lwp_info *child);
0d62e5e8 786
df95181f
TBA
787bool
788linux_process_target::save_stop_reason (lwp_info *lwp)
0d62e5e8 789{
582511be
PA
790 CORE_ADDR pc;
791 CORE_ADDR sw_breakpoint_pc;
792 struct thread_info *saved_thread;
3e572f71
PA
793#if USE_SIGTRAP_SIGINFO
794 siginfo_t siginfo;
795#endif
d50171e4 796
bf9ae9d8 797 if (!low_supports_breakpoints ())
df95181f 798 return false;
0d62e5e8 799
582511be
PA
800 pc = get_pc (lwp);
801 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 802
582511be
PA
803 /* breakpoint_at reads from the current thread. */
804 saved_thread = current_thread;
805 current_thread = get_lwp_thread (lwp);
47c0c975 806
3e572f71
PA
807#if USE_SIGTRAP_SIGINFO
808 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
809 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
810 {
811 if (siginfo.si_signo == SIGTRAP)
812 {
e7ad2f14
PA
813 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
814 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 815 {
e7ad2f14
PA
816 /* The si_code is ambiguous on this arch -- check debug
817 registers. */
818 if (!check_stopped_by_watchpoint (lwp))
819 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
820 }
821 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
822 {
823 /* If we determine the LWP stopped for a SW breakpoint,
824 trust it. Particularly don't check watchpoint
825 registers, because at least on s390, we'd find
826 stopped-by-watchpoint as long as there's a watchpoint
827 set. */
3e572f71 828 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 829 }
e7ad2f14 830 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 831 {
e7ad2f14
PA
832 /* This can indicate either a hardware breakpoint or
833 hardware watchpoint. Check debug registers. */
834 if (!check_stopped_by_watchpoint (lwp))
835 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 836 }
2bf6fb9d
PA
837 else if (siginfo.si_code == TRAP_TRACE)
838 {
e7ad2f14
PA
839 /* We may have single stepped an instruction that
840 triggered a watchpoint. In that case, on some
841 architectures (such as x86), instead of TRAP_HWBKPT,
842 si_code indicates TRAP_TRACE, and we need to check
843 the debug registers separately. */
844 if (!check_stopped_by_watchpoint (lwp))
845 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 846 }
3e572f71
PA
847 }
848 }
849#else
582511be
PA
850 /* We may have just stepped a breakpoint instruction. E.g., in
851 non-stop mode, GDB first tells the thread A to step a range, and
852 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
853 case we need to report the breakpoint PC. */
854 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be 855 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
e7ad2f14
PA
856 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
857
858 if (hardware_breakpoint_inserted_here (pc))
859 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
860
861 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
862 check_stopped_by_watchpoint (lwp);
863#endif
864
865 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
866 {
867 if (debug_threads)
868 {
869 struct thread_info *thr = get_lwp_thread (lwp);
870
871 debug_printf ("CSBB: %s stopped by software breakpoint\n",
872 target_pid_to_str (ptid_of (thr)));
873 }
874
875 /* Back up the PC if necessary. */
876 if (pc != sw_breakpoint_pc)
e7ad2f14 877 {
582511be
PA
878 struct regcache *regcache
879 = get_thread_regcache (current_thread, 1);
bf9ae9d8 880 low_set_pc (regcache, sw_breakpoint_pc);
582511be
PA
881 }
882
e7ad2f14
PA
883 /* Update this so we record the correct stop PC below. */
884 pc = sw_breakpoint_pc;
582511be 885 }
e7ad2f14 886 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
887 {
888 if (debug_threads)
889 {
890 struct thread_info *thr = get_lwp_thread (lwp);
891
892 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
893 target_pid_to_str (ptid_of (thr)));
894 }
e7ad2f14
PA
895 }
896 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
897 {
898 if (debug_threads)
899 {
900 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 901
e7ad2f14
PA
902 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
903 target_pid_to_str (ptid_of (thr)));
904 }
582511be 905 }
e7ad2f14
PA
906 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
907 {
908 if (debug_threads)
909 {
910 struct thread_info *thr = get_lwp_thread (lwp);
582511be 911
e7ad2f14
PA
912 debug_printf ("CSBB: %s stopped by trace\n",
913 target_pid_to_str (ptid_of (thr)));
914 }
915 }
916
917 lwp->stop_pc = pc;
582511be 918 current_thread = saved_thread;
df95181f 919 return true;
0d62e5e8 920}
ce3a066d 921
b3312d80 922static struct lwp_info *
95954743 923add_lwp (ptid_t ptid)
611cb4a5 924{
54a0b537 925 struct lwp_info *lwp;
0d62e5e8 926
8d749320 927 lwp = XCNEW (struct lwp_info);
00db26fa
PA
928
929 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 930
754e3168
AH
931 lwp->thread = add_thread (ptid, lwp);
932
aa5ca48f 933 if (the_low_target.new_thread != NULL)
34c703da 934 the_low_target.new_thread (lwp);
aa5ca48f 935
54a0b537 936 return lwp;
0d62e5e8 937}
611cb4a5 938
2090129c
SDJ
939/* Callback to be used when calling fork_inferior, responsible for
940 actually initiating the tracing of the inferior. */
941
942static void
943linux_ptrace_fun ()
944{
945 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
946 (PTRACE_TYPE_ARG4) 0) < 0)
50fa3001 947 trace_start_error_with_name ("ptrace");
2090129c
SDJ
948
949 if (setpgid (0, 0) < 0)
950 trace_start_error_with_name ("setpgid");
951
952 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
953 stdout to stderr so that inferior i/o doesn't corrupt the connection.
954 Also, redirect stdin to /dev/null. */
955 if (remote_connection_is_stdio ())
956 {
957 if (close (0) < 0)
958 trace_start_error_with_name ("close");
959 if (open ("/dev/null", O_RDONLY) < 0)
960 trace_start_error_with_name ("open");
961 if (dup2 (2, 1) < 0)
962 trace_start_error_with_name ("dup2");
963 if (write (2, "stdin/stdout redirected\n",
964 sizeof ("stdin/stdout redirected\n") - 1) < 0)
965 {
966 /* Errors ignored. */;
967 }
968 }
969}
970
da6d8c04 971/* Start an inferior process and returns its pid.
2090129c
SDJ
972 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
973 are its arguments. */
da6d8c04 974
15295543
TBA
975int
976linux_process_target::create_inferior (const char *program,
977 const std::vector<char *> &program_args)
da6d8c04 978{
c12a5089 979 client_state &cs = get_client_state ();
a6dbe5df 980 struct lwp_info *new_lwp;
da6d8c04 981 int pid;
95954743 982 ptid_t ptid;
03583c20 983
41272101
TT
984 {
985 maybe_disable_address_space_randomization restore_personality
c12a5089 986 (cs.disable_randomization);
41272101
TT
987 std::string str_program_args = stringify_argv (program_args);
988
989 pid = fork_inferior (program,
990 str_program_args.c_str (),
991 get_environ ()->envp (), linux_ptrace_fun,
992 NULL, NULL, NULL, NULL);
993 }
03583c20 994
55d7b841 995 linux_add_process (pid, 0);
95954743 996
fd79271b 997 ptid = ptid_t (pid, pid, 0);
95954743 998 new_lwp = add_lwp (ptid);
a6dbe5df 999 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1000
2090129c
SDJ
1001 post_fork_inferior (pid, program);
1002
a9fa9f7d 1003 return pid;
da6d8c04
DJ
1004}
1005
ece66d65
JS
1006/* Implement the post_create_inferior target_ops method. */
1007
6dee9afb
TBA
1008void
1009linux_process_target::post_create_inferior ()
ece66d65
JS
1010{
1011 struct lwp_info *lwp = get_thread_lwp (current_thread);
1012
797bcff5 1013 low_arch_setup ();
ece66d65
JS
1014
1015 if (lwp->must_set_ptrace_flags)
1016 {
1017 struct process_info *proc = current_process ();
1018 int options = linux_low_ptrace_options (proc->attached);
1019
1020 linux_enable_event_reporting (lwpid_of (current_thread), options);
1021 lwp->must_set_ptrace_flags = 0;
1022 }
1023}
1024
8784d563
PA
1025/* Attach to an inferior process. Returns 0 on success, ERRNO on
1026 error. */
da6d8c04 1027
7ae1a6a6
PA
1028int
1029linux_attach_lwp (ptid_t ptid)
da6d8c04 1030{
54a0b537 1031 struct lwp_info *new_lwp;
e38504b3 1032 int lwpid = ptid.lwp ();
611cb4a5 1033
b8e1b30e 1034 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1035 != 0)
7ae1a6a6 1036 return errno;
24a09b5f 1037
b3312d80 1038 new_lwp = add_lwp (ptid);
0d62e5e8 1039
a6dbe5df
PA
1040 /* We need to wait for SIGSTOP before being able to make the next
1041 ptrace call on this LWP. */
1042 new_lwp->must_set_ptrace_flags = 1;
1043
644cebc9 1044 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1045 {
1046 if (debug_threads)
87ce2a04 1047 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1048
1049 /* The process is definitely stopped. It is in a job control
1050 stop, unless the kernel predates the TASK_STOPPED /
1051 TASK_TRACED distinction, in which case it might be in a
1052 ptrace stop. Make sure it is in a ptrace stop; from there we
1053 can kill it, signal it, et cetera.
1054
1055 First make sure there is a pending SIGSTOP. Since we are
1056 already attached, the process can not transition from stopped
1057 to running without a PTRACE_CONT; so we know this signal will
1058 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1059 probably already in the queue (unless this kernel is old
1060 enough to use TASK_STOPPED for ptrace stops); but since
1061 SIGSTOP is not an RT signal, it can only be queued once. */
1062 kill_lwp (lwpid, SIGSTOP);
1063
1064 /* Finally, resume the stopped process. This will deliver the
1065 SIGSTOP (or a higher priority signal, just like normal
1066 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1067 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1068 }
1069
0d62e5e8 1070 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1071 brings it to a halt.
1072
1073 There are several cases to consider here:
1074
1075 1) gdbserver has already attached to the process and is being notified
1b3f6016 1076 of a new thread that is being created.
d50171e4
PA
1077 In this case we should ignore that SIGSTOP and resume the
1078 process. This is handled below by setting stop_expected = 1,
8336d594 1079 and the fact that add_thread sets last_resume_kind ==
d50171e4 1080 resume_continue.
0e21c1ec
DE
1081
1082 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1083 to it via attach_inferior.
1084 In this case we want the process thread to stop.
d50171e4
PA
1085 This is handled by having linux_attach set last_resume_kind ==
1086 resume_stop after we return.
e3deef73
LM
1087
1088 If the pid we are attaching to is also the tgid, we attach to and
1089 stop all the existing threads. Otherwise, we attach to pid and
1090 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1091
1092 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1093 existing threads.
1094 In this case we want the thread to stop.
1095 FIXME: This case is currently not properly handled.
1096 We should wait for the SIGSTOP but don't. Things work apparently
1097 because enough time passes between when we ptrace (ATTACH) and when
1098 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1099
1100 On the other hand, if we are currently trying to stop all threads, we
1101 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1102 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1103 end of the list, and so the new thread has not yet reached
1104 wait_for_sigstop (but will). */
d50171e4 1105 new_lwp->stop_expected = 1;
0d62e5e8 1106
7ae1a6a6 1107 return 0;
95954743
PA
1108}
1109
8784d563
PA
1110/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1111 already attached. Returns true if a new LWP is found, false
1112 otherwise. */
1113
1114static int
1115attach_proc_task_lwp_callback (ptid_t ptid)
1116{
1117 /* Is this a new thread? */
1118 if (find_thread_ptid (ptid) == NULL)
1119 {
e38504b3 1120 int lwpid = ptid.lwp ();
8784d563
PA
1121 int err;
1122
1123 if (debug_threads)
1124 debug_printf ("Found new lwp %d\n", lwpid);
1125
1126 err = linux_attach_lwp (ptid);
1127
1128 /* Be quiet if we simply raced with the thread exiting. EPERM
1129 is returned if the thread's task still exists, and is marked
1130 as exited or zombie, as well as other conditions, so in that
1131 case, confirm the status in /proc/PID/status. */
1132 if (err == ESRCH
1133 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1134 {
1135 if (debug_threads)
1136 {
1137 debug_printf ("Cannot attach to lwp %d: "
1138 "thread is gone (%d: %s)\n",
6d91ce9a 1139 lwpid, err, safe_strerror (err));
8784d563
PA
1140 }
1141 }
1142 else if (err != 0)
1143 {
4d9b86e1 1144 std::string reason
50fa3001 1145 = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1146
1147 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1148 }
1149
1150 return 1;
1151 }
1152 return 0;
1153}
1154
500c1d85
PA
1155static void async_file_mark (void);
1156
e3deef73
LM
1157/* Attach to PID. If PID is the tgid, attach to it and all
1158 of its threads. */
1159
ef03dad8
TBA
1160int
1161linux_process_target::attach (unsigned long pid)
0d62e5e8 1162{
500c1d85
PA
1163 struct process_info *proc;
1164 struct thread_info *initial_thread;
fd79271b 1165 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1166 int err;
1167
df0da8a2
AH
1168 proc = linux_add_process (pid, 1);
1169
e3deef73
LM
1170 /* Attach to PID. We will check for other threads
1171 soon. */
7ae1a6a6
PA
1172 err = linux_attach_lwp (ptid);
1173 if (err != 0)
4d9b86e1 1174 {
df0da8a2 1175 remove_process (proc);
4d9b86e1 1176
50fa3001
SDJ
1177 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1178 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
4d9b86e1 1179 }
7ae1a6a6 1180
500c1d85
PA
1181 /* Don't ignore the initial SIGSTOP if we just attached to this
1182 process. It will be collected by wait shortly. */
fd79271b 1183 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1184 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1185
8784d563
PA
1186 /* We must attach to every LWP. If /proc is mounted, use that to
1187 find them now. On the one hand, the inferior may be using raw
1188 clone instead of using pthreads. On the other hand, even if it
1189 is using pthreads, GDB may not be connected yet (thread_db needs
1190 to do symbol lookups, through qSymbol). Also, thread_db walks
1191 structures in the inferior's address space to find the list of
1192 threads/LWPs, and those structures may well be corrupted. Note
1193 that once thread_db is loaded, we'll still use it to list threads
1194 and associate pthread info with each LWP. */
1195 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1196
1197 /* GDB will shortly read the xml target description for this
1198 process, to figure out the process' architecture. But the target
1199 description is only filled in when the first process/thread in
1200 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1201 that now, otherwise, if GDB is fast enough, it could read the
1202 target description _before_ that initial stop. */
1203 if (non_stop)
1204 {
1205 struct lwp_info *lwp;
1206 int wstat, lwpid;
f2907e49 1207 ptid_t pid_ptid = ptid_t (pid);
500c1d85 1208
d16f3f6c 1209 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
500c1d85
PA
1210 gdb_assert (lwpid > 0);
1211
f2907e49 1212 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1213
1214 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1215 {
1216 lwp->status_pending_p = 1;
1217 lwp->status_pending = wstat;
1218 }
1219
1220 initial_thread->last_resume_kind = resume_continue;
1221
1222 async_file_mark ();
1223
1224 gdb_assert (proc->tdesc != NULL);
1225 }
1226
95954743
PA
1227 return 0;
1228}
1229
95954743 1230static int
e4eb0dec 1231last_thread_of_process_p (int pid)
95954743 1232{
e4eb0dec 1233 bool seen_one = false;
95954743 1234
da4ae14a 1235 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1236 {
e4eb0dec
SM
1237 if (!seen_one)
1238 {
1239 /* This is the first thread of this process we see. */
1240 seen_one = true;
1241 return false;
1242 }
1243 else
1244 {
1245 /* This is the second thread of this process we see. */
1246 return true;
1247 }
1248 });
da6d8c04 1249
e4eb0dec 1250 return thread == NULL;
95954743
PA
1251}
1252
da84f473
PA
1253/* Kill LWP. */
1254
1255static void
1256linux_kill_one_lwp (struct lwp_info *lwp)
1257{
d86d4aaf
DE
1258 struct thread_info *thr = get_lwp_thread (lwp);
1259 int pid = lwpid_of (thr);
da84f473
PA
1260
1261 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1262 there is no signal context, and ptrace(PTRACE_KILL) (or
1263 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1264 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1265 alternative is to kill with SIGKILL. We only need one SIGKILL
1266 per process, not one for each thread. But since we still support
4a6ed09b
PA
1267 support debugging programs using raw clone without CLONE_THREAD,
1268 we send one for each thread. For years, we used PTRACE_KILL
1269 only, so we're being a bit paranoid about some old kernels where
1270 PTRACE_KILL might work better (dubious if there are any such, but
1271 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1272 second, and so we're fine everywhere. */
da84f473
PA
1273
1274 errno = 0;
69ff6be5 1275 kill_lwp (pid, SIGKILL);
da84f473 1276 if (debug_threads)
ce9e3fe7
PA
1277 {
1278 int save_errno = errno;
1279
1280 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1281 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1282 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1283 }
da84f473
PA
1284
1285 errno = 0;
b8e1b30e 1286 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1287 if (debug_threads)
ce9e3fe7
PA
1288 {
1289 int save_errno = errno;
1290
1291 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1292 target_pid_to_str (ptid_of (thr)),
6d91ce9a 1293 save_errno ? safe_strerror (save_errno) : "OK");
ce9e3fe7 1294 }
da84f473
PA
1295}
1296
e76126e8
PA
1297/* Kill LWP and wait for it to die. */
1298
1299static void
1300kill_wait_lwp (struct lwp_info *lwp)
1301{
1302 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1303 int pid = ptid_of (thr).pid ();
e38504b3 1304 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1305 int wstat;
1306 int res;
1307
1308 if (debug_threads)
1309 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1310
1311 do
1312 {
1313 linux_kill_one_lwp (lwp);
1314
1315 /* Make sure it died. Notes:
1316
1317 - The loop is most likely unnecessary.
1318
d16f3f6c 1319 - We don't use wait_for_event as that could delete lwps
e76126e8
PA
1320 while we're iterating over them. We're not interested in
1321 any pending status at this point, only in making sure all
1322 wait status on the kernel side are collected until the
1323 process is reaped.
1324
1325 - We don't use __WALL here as the __WALL emulation relies on
1326 SIGCHLD, and killing a stopped process doesn't generate
1327 one, nor an exit status.
1328 */
1329 res = my_waitpid (lwpid, &wstat, 0);
1330 if (res == -1 && errno == ECHILD)
1331 res = my_waitpid (lwpid, &wstat, __WCLONE);
1332 } while (res > 0 && WIFSTOPPED (wstat));
1333
586b02a9
PA
1334 /* Even if it was stopped, the child may have already disappeared.
1335 E.g., if it was killed by SIGKILL. */
1336 if (res < 0 && errno != ECHILD)
1337 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1338}
1339
578290ec 1340/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1341 except the leader. */
95954743 1342
578290ec
SM
1343static void
1344kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1345{
54a0b537 1346 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1347
fd500816
DJ
1348 /* We avoid killing the first thread here, because of a Linux kernel (at
1349 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1350 the children get a chance to be reaped, it will remain a zombie
1351 forever. */
95954743 1352
d86d4aaf 1353 if (lwpid_of (thread) == pid)
95954743
PA
1354 {
1355 if (debug_threads)
87ce2a04 1356 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1357 target_pid_to_str (thread->id));
578290ec 1358 return;
95954743 1359 }
fd500816 1360
e76126e8 1361 kill_wait_lwp (lwp);
da6d8c04
DJ
1362}
1363
c6885a57
TBA
1364int
1365linux_process_target::kill (process_info *process)
0d62e5e8 1366{
a780ef4f 1367 int pid = process->pid;
9d606399 1368
f9e39928
PA
1369 /* If we're killing a running inferior, make sure it is stopped
1370 first, as PTRACE_KILL will not work otherwise. */
7984d532 1371 stop_all_lwps (0, NULL);
f9e39928 1372
578290ec
SM
1373 for_each_thread (pid, [&] (thread_info *thread)
1374 {
1375 kill_one_lwp_callback (thread, pid);
1376 });
fd500816 1377
54a0b537 1378 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1379 thread in the list, so do so now. */
a780ef4f 1380 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1381
784867a5 1382 if (lwp == NULL)
fd500816 1383 {
784867a5 1384 if (debug_threads)
d86d4aaf
DE
1385 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1386 pid);
784867a5
JK
1387 }
1388 else
e76126e8 1389 kill_wait_lwp (lwp);
2d717e4f 1390
8adb37b9 1391 mourn (process);
f9e39928
PA
1392
1393 /* Since we presently can only stop all lwps of all processes, we
1394 need to unstop lwps of other processes. */
7984d532 1395 unstop_all_lwps (0, NULL);
95954743 1396 return 0;
0d62e5e8
DJ
1397}
1398
9b224c5e
PA
1399/* Get pending signal of THREAD, for detaching purposes. This is the
1400 signal the thread last stopped for, which we need to deliver to the
1401 thread when detaching, otherwise, it'd be suppressed/lost. */
1402
1403static int
1404get_detach_signal (struct thread_info *thread)
1405{
c12a5089 1406 client_state &cs = get_client_state ();
a493e3e2 1407 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1408 int status;
1409 struct lwp_info *lp = get_thread_lwp (thread);
1410
1411 if (lp->status_pending_p)
1412 status = lp->status_pending;
1413 else
1414 {
1415 /* If the thread had been suspended by gdbserver, and it stopped
1416 cleanly, then it'll have stopped with SIGSTOP. But we don't
1417 want to deliver that SIGSTOP. */
1418 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1419 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1420 return 0;
1421
1422 /* Otherwise, we may need to deliver the signal we
1423 intercepted. */
1424 status = lp->last_status;
1425 }
1426
1427 if (!WIFSTOPPED (status))
1428 {
1429 if (debug_threads)
87ce2a04 1430 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1431 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1432 return 0;
1433 }
1434
1435 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1436 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1437 {
1438 if (debug_threads)
87ce2a04
DE
1439 debug_printf ("GPS: lwp %s had stopped with extended "
1440 "status: no pending signal\n",
d86d4aaf 1441 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1442 return 0;
1443 }
1444
2ea28649 1445 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1446
c12a5089 1447 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1448 {
1449 if (debug_threads)
87ce2a04 1450 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1451 target_pid_to_str (ptid_of (thread)),
87ce2a04 1452 gdb_signal_to_string (signo));
9b224c5e
PA
1453 return 0;
1454 }
c12a5089 1455 else if (!cs.program_signals_p
9b224c5e
PA
1456 /* If we have no way to know which signals GDB does not
1457 want to have passed to the program, assume
1458 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1459 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1460 {
1461 if (debug_threads)
87ce2a04
DE
1462 debug_printf ("GPS: lwp %s had signal %s, "
1463 "but we don't know if we should pass it. "
1464 "Default to not.\n",
d86d4aaf 1465 target_pid_to_str (ptid_of (thread)),
87ce2a04 1466 gdb_signal_to_string (signo));
9b224c5e
PA
1467 return 0;
1468 }
1469 else
1470 {
1471 if (debug_threads)
87ce2a04 1472 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1473 target_pid_to_str (ptid_of (thread)),
87ce2a04 1474 gdb_signal_to_string (signo));
9b224c5e
PA
1475
1476 return WSTOPSIG (status);
1477 }
1478}
1479
ced2dffb
PA
1480/* Detach from LWP. */
1481
1482static void
1483linux_detach_one_lwp (struct lwp_info *lwp)
6ad8ae5c 1484{
ced2dffb 1485 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1486 int sig;
ced2dffb 1487 int lwpid;
6ad8ae5c 1488
9b224c5e 1489 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1490 if (lwp->stop_expected)
ae13219e 1491 {
9b224c5e 1492 if (debug_threads)
87ce2a04 1493 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1494 target_pid_to_str (ptid_of (thread)));
9b224c5e 1495
d86d4aaf 1496 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1497 lwp->stop_expected = 0;
ae13219e
DJ
1498 }
1499
9b224c5e
PA
1500 /* Pass on any pending signal for this thread. */
1501 sig = get_detach_signal (thread);
1502
ced2dffb
PA
1503 /* Preparing to resume may try to write registers, and fail if the
1504 lwp is zombie. If that happens, ignore the error. We'll handle
1505 it below, when detach fails with ESRCH. */
a70b8144 1506 try
ced2dffb
PA
1507 {
1508 /* Flush any pending changes to the process's registers. */
1509 regcache_invalidate_thread (thread);
1510
1511 /* Finally, let it resume. */
1512 if (the_low_target.prepare_to_resume != NULL)
1513 the_low_target.prepare_to_resume (lwp);
1514 }
230d2906 1515 catch (const gdb_exception_error &ex)
ced2dffb
PA
1516 {
1517 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 1518 throw;
ced2dffb 1519 }
ced2dffb
PA
1520
1521 lwpid = lwpid_of (thread);
1522 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1523 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1524 {
1525 int save_errno = errno;
1526
1527 /* We know the thread exists, so ESRCH must mean the lwp is
1528 zombie. This can happen if one of the already-detached
1529 threads exits the whole thread group. In that case we're
1530 still attached, and must reap the lwp. */
1531 if (save_errno == ESRCH)
1532 {
1533 int ret, status;
1534
1535 ret = my_waitpid (lwpid, &status, __WALL);
1536 if (ret == -1)
1537 {
1538 warning (_("Couldn't reap LWP %d while detaching: %s"),
6d91ce9a 1539 lwpid, safe_strerror (errno));
ced2dffb
PA
1540 }
1541 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1542 {
1543 warning (_("Reaping LWP %d while detaching "
1544 "returned unexpected status 0x%x"),
1545 lwpid, status);
1546 }
1547 }
1548 else
1549 {
1550 error (_("Can't detach %s: %s"),
1551 target_pid_to_str (ptid_of (thread)),
6d91ce9a 1552 safe_strerror (save_errno));
ced2dffb
PA
1553 }
1554 }
1555 else if (debug_threads)
1556 {
1557 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1558 target_pid_to_str (ptid_of (thread)),
1559 strsignal (sig));
1560 }
bd99dc85
PA
1561
1562 delete_lwp (lwp);
ced2dffb
PA
1563}
1564
798a38e8 1565/* Callback for for_each_thread. Detaches from non-leader threads of a
ced2dffb
PA
1566 given process. */
1567
798a38e8
SM
1568static void
1569linux_detach_lwp_callback (thread_info *thread)
ced2dffb 1570{
ced2dffb
PA
1571 /* We don't actually detach from the thread group leader just yet.
1572 If the thread group exits, we must reap the zombie clone lwps
1573 before we're able to reap the leader. */
798a38e8
SM
1574 if (thread->id.pid () == thread->id.lwp ())
1575 return;
ced2dffb 1576
798a38e8 1577 lwp_info *lwp = get_thread_lwp (thread);
ced2dffb 1578 linux_detach_one_lwp (lwp);
6ad8ae5c
DJ
1579}
1580
9061c9cf
TBA
1581int
1582linux_process_target::detach (process_info *process)
95954743 1583{
ced2dffb 1584 struct lwp_info *main_lwp;
95954743 1585
863d01bd
PA
1586 /* As there's a step over already in progress, let it finish first,
1587 otherwise nesting a stabilize_threads operation on top gets real
1588 messy. */
1589 complete_ongoing_step_over ();
1590
f9e39928 1591 /* Stop all threads before detaching. First, ptrace requires that
30baf67b 1592 the thread is stopped to successfully detach. Second, thread_db
f9e39928
PA
1593 may need to uninstall thread event breakpoints from memory, which
1594 only works with a stopped process anyway. */
7984d532 1595 stop_all_lwps (0, NULL);
f9e39928 1596
ca5c370d 1597#ifdef USE_THREAD_DB
8336d594 1598 thread_db_detach (process);
ca5c370d
PA
1599#endif
1600
fa593d66 1601 /* Stabilize threads (move out of jump pads). */
5c9eb2f2 1602 target_stabilize_threads ();
fa593d66 1603
ced2dffb
PA
1604 /* Detach from the clone lwps first. If the thread group exits just
1605 while we're detaching, we must reap the clone lwps before we're
1606 able to reap the leader. */
ef2ddb33 1607 for_each_thread (process->pid, linux_detach_lwp_callback);
ced2dffb 1608
ef2ddb33 1609 main_lwp = find_lwp_pid (ptid_t (process->pid));
ced2dffb 1610 linux_detach_one_lwp (main_lwp);
8336d594 1611
8adb37b9 1612 mourn (process);
f9e39928
PA
1613
1614 /* Since we presently can only stop all lwps of all processes, we
1615 need to unstop lwps of other processes. */
7984d532 1616 unstop_all_lwps (0, NULL);
f9e39928
PA
1617 return 0;
1618}
1619
1620/* Remove all LWPs that belong to process PROC from the lwp list. */
1621
8adb37b9
TBA
1622void
1623linux_process_target::mourn (process_info *process)
8336d594
PA
1624{
1625 struct process_info_private *priv;
1626
1627#ifdef USE_THREAD_DB
1628 thread_db_mourn (process);
1629#endif
1630
6b2a85da
SM
1631 for_each_thread (process->pid, [] (thread_info *thread)
1632 {
1633 delete_lwp (get_thread_lwp (thread));
1634 });
f9e39928 1635
8336d594 1636 /* Freeing all private data. */
fe978cb0 1637 priv = process->priv;
04ec7890
SM
1638 if (the_low_target.delete_process != NULL)
1639 the_low_target.delete_process (priv->arch_private);
1640 else
1641 gdb_assert (priv->arch_private == NULL);
8336d594 1642 free (priv);
fe978cb0 1643 process->priv = NULL;
505106cd
PA
1644
1645 remove_process (process);
8336d594
PA
1646}
1647
95a49a39
TBA
1648void
1649linux_process_target::join (int pid)
444d6139 1650{
444d6139
PA
1651 int status, ret;
1652
1653 do {
d105de22 1654 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1655 if (WIFEXITED (status) || WIFSIGNALED (status))
1656 break;
1657 } while (ret != -1 || errno != ECHILD);
1658}
1659
13d3d99b
TBA
1660/* Return true if the given thread is still alive. */
1661
1662bool
1663linux_process_target::thread_alive (ptid_t ptid)
0d62e5e8 1664{
95954743
PA
1665 struct lwp_info *lwp = find_lwp_pid (ptid);
1666
1667 /* We assume we always know if a thread exits. If a whole process
1668 exited but we still haven't been able to report it to GDB, we'll
1669 hold on to the last lwp of the dead process. */
1670 if (lwp != NULL)
00db26fa 1671 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1672 else
1673 return 0;
1674}
1675
df95181f
TBA
1676bool
1677linux_process_target::thread_still_has_status_pending (thread_info *thread)
582511be
PA
1678{
1679 struct lwp_info *lp = get_thread_lwp (thread);
1680
1681 if (!lp->status_pending_p)
1682 return 0;
1683
582511be 1684 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1685 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1686 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1687 {
1688 struct thread_info *saved_thread;
1689 CORE_ADDR pc;
1690 int discard = 0;
1691
1692 gdb_assert (lp->last_status != 0);
1693
1694 pc = get_pc (lp);
1695
1696 saved_thread = current_thread;
1697 current_thread = thread;
1698
1699 if (pc != lp->stop_pc)
1700 {
1701 if (debug_threads)
1702 debug_printf ("PC of %ld changed\n",
1703 lwpid_of (thread));
1704 discard = 1;
1705 }
3e572f71
PA
1706
1707#if !USE_SIGTRAP_SIGINFO
15c66dd6 1708 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1709 && !(*the_low_target.breakpoint_at) (pc))
1710 {
1711 if (debug_threads)
1712 debug_printf ("previous SW breakpoint of %ld gone\n",
1713 lwpid_of (thread));
1714 discard = 1;
1715 }
15c66dd6 1716 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1717 && !hardware_breakpoint_inserted_here (pc))
1718 {
1719 if (debug_threads)
1720 debug_printf ("previous HW breakpoint of %ld gone\n",
1721 lwpid_of (thread));
1722 discard = 1;
1723 }
3e572f71 1724#endif
582511be
PA
1725
1726 current_thread = saved_thread;
1727
1728 if (discard)
1729 {
1730 if (debug_threads)
1731 debug_printf ("discarding pending breakpoint status\n");
1732 lp->status_pending_p = 0;
1733 return 0;
1734 }
1735 }
1736
1737 return 1;
1738}
1739
a681f9c9
PA
1740/* Returns true if LWP is resumed from the client's perspective. */
1741
1742static int
1743lwp_resumed (struct lwp_info *lwp)
1744{
1745 struct thread_info *thread = get_lwp_thread (lwp);
1746
1747 if (thread->last_resume_kind != resume_stop)
1748 return 1;
1749
1750 /* Did gdb send us a `vCont;t', but we haven't reported the
1751 corresponding stop to gdb yet? If so, the thread is still
1752 resumed/running from gdb's perspective. */
1753 if (thread->last_resume_kind == resume_stop
1754 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1755 return 1;
1756
1757 return 0;
1758}
1759
df95181f
TBA
1760bool
1761linux_process_target::status_pending_p_callback (thread_info *thread,
1762 ptid_t ptid)
0d62e5e8 1763{
582511be 1764 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1765
1766 /* Check if we're only interested in events from a specific process
afa8d396 1767 or a specific LWP. */
83e1b6c1 1768 if (!thread->id.matches (ptid))
95954743 1769 return 0;
0d62e5e8 1770
a681f9c9
PA
1771 if (!lwp_resumed (lp))
1772 return 0;
1773
582511be 1774 if (lp->status_pending_p
df95181f 1775 && !thread_still_has_status_pending (thread))
582511be 1776 {
df95181f 1777 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
582511be
PA
1778 return 0;
1779 }
0d62e5e8 1780
582511be 1781 return lp->status_pending_p;
0d62e5e8
DJ
1782}
1783
95954743
PA
1784struct lwp_info *
1785find_lwp_pid (ptid_t ptid)
1786{
da4ae14a 1787 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1788 {
1789 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1790 return thr_arg->id.lwp () == lwp;
454296a2 1791 });
d86d4aaf
DE
1792
1793 if (thread == NULL)
1794 return NULL;
1795
9c80ecd6 1796 return get_thread_lwp (thread);
95954743
PA
1797}
1798
fa96cb38 1799/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1800
fa96cb38
PA
1801static int
1802num_lwps (int pid)
1803{
fa96cb38 1804 int count = 0;
0d62e5e8 1805
4d3bb80e
SM
1806 for_each_thread (pid, [&] (thread_info *thread)
1807 {
9c80ecd6 1808 count++;
4d3bb80e 1809 });
3aee8918 1810
fa96cb38
PA
1811 return count;
1812}
d61ddec4 1813
6d4ee8c6
GB
1814/* See nat/linux-nat.h. */
1815
1816struct lwp_info *
1817iterate_over_lwps (ptid_t filter,
d3a70e03 1818 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1819{
da4ae14a 1820 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1821 {
da4ae14a 1822 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1823
d3a70e03 1824 return callback (lwp);
6d1e5673 1825 });
6d4ee8c6 1826
9c80ecd6 1827 if (thread == NULL)
6d4ee8c6
GB
1828 return NULL;
1829
9c80ecd6 1830 return get_thread_lwp (thread);
6d4ee8c6
GB
1831}
1832
fa96cb38
PA
1833/* Detect zombie thread group leaders, and "exit" them. We can't reap
1834 their exits until all other threads in the group have exited. */
c3adc08c 1835
fa96cb38
PA
1836static void
1837check_zombie_leaders (void)
1838{
9179355e
SM
1839 for_each_process ([] (process_info *proc) {
1840 pid_t leader_pid = pid_of (proc);
1841 struct lwp_info *leader_lp;
1842
f2907e49 1843 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1844
1845 if (debug_threads)
1846 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1847 "num_lwps=%d, zombie=%d\n",
1848 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1849 linux_proc_pid_is_zombie (leader_pid));
1850
1851 if (leader_lp != NULL && !leader_lp->stopped
1852 /* Check if there are other threads in the group, as we may
1853 have raced with the inferior simply exiting. */
1854 && !last_thread_of_process_p (leader_pid)
1855 && linux_proc_pid_is_zombie (leader_pid))
1856 {
1857 /* A leader zombie can mean one of two things:
1858
1859 - It exited, and there's an exit status pending
1860 available, or only the leader exited (not the whole
1861 program). In the latter case, we can't waitpid the
1862 leader's exit status until all other threads are gone.
1863
1864 - There are 3 or more threads in the group, and a thread
1865 other than the leader exec'd. On an exec, the Linux
1866 kernel destroys all other threads (except the execing
1867 one) in the thread group, and resets the execing thread's
1868 tid to the tgid. No exit notification is sent for the
1869 execing thread -- from the ptracer's perspective, it
1870 appears as though the execing thread just vanishes.
1871 Until we reap all other threads except the leader and the
1872 execing thread, the leader will be zombie, and the
1873 execing thread will be in `D (disc sleep)'. As soon as
1874 all other threads are reaped, the execing thread changes
1875 it's tid to the tgid, and the previous (zombie) leader
1876 vanishes, giving place to the "new" leader. We could try
1877 distinguishing the exit and exec cases, by waiting once
1878 more, and seeing if something comes out, but it doesn't
1879 sound useful. The previous leader _does_ go away, and
1880 we'll re-add the new one once we see the exec event
1881 (which is just the same as what would happen if the
1882 previous leader did exit voluntarily before some other
1883 thread execs). */
1884
1885 if (debug_threads)
1886 debug_printf ("CZL: Thread group leader %d zombie "
1887 "(it exited, or another thread execd).\n",
1888 leader_pid);
1889
1890 delete_lwp (leader_lp);
1891 }
1892 });
fa96cb38 1893}
c3adc08c 1894
a1385b7b
SM
1895/* Callback for `find_thread'. Returns the first LWP that is not
1896 stopped. */
d50171e4 1897
a1385b7b
SM
1898static bool
1899not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1900{
a1385b7b
SM
1901 if (!thread->id.matches (filter))
1902 return false;
47c0c975 1903
a1385b7b 1904 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1905
a1385b7b 1906 return !lwp->stopped;
0d62e5e8 1907}
611cb4a5 1908
863d01bd
PA
1909/* Increment LWP's suspend count. */
1910
1911static void
1912lwp_suspended_inc (struct lwp_info *lwp)
1913{
1914 lwp->suspended++;
1915
1916 if (debug_threads && lwp->suspended > 4)
1917 {
1918 struct thread_info *thread = get_lwp_thread (lwp);
1919
1920 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1921 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1922 }
1923}
1924
1925/* Decrement LWP's suspend count. */
1926
1927static void
1928lwp_suspended_decr (struct lwp_info *lwp)
1929{
1930 lwp->suspended--;
1931
1932 if (lwp->suspended < 0)
1933 {
1934 struct thread_info *thread = get_lwp_thread (lwp);
1935
1936 internal_error (__FILE__, __LINE__,
1937 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1938 lwp->suspended);
1939 }
1940}
1941
219f2f23
PA
1942/* This function should only be called if the LWP got a SIGTRAP.
1943
1944 Handle any tracepoint steps or hits. Return true if a tracepoint
1945 event was handled, 0 otherwise. */
1946
1947static int
1948handle_tracepoints (struct lwp_info *lwp)
1949{
1950 struct thread_info *tinfo = get_lwp_thread (lwp);
1951 int tpoint_related_event = 0;
1952
582511be
PA
1953 gdb_assert (lwp->suspended == 0);
1954
7984d532
PA
1955 /* If this tracepoint hit causes a tracing stop, we'll immediately
1956 uninsert tracepoints. To do this, we temporarily pause all
1957 threads, unpatch away, and then unpause threads. We need to make
1958 sure the unpausing doesn't resume LWP too. */
863d01bd 1959 lwp_suspended_inc (lwp);
7984d532 1960
219f2f23
PA
1961 /* And we need to be sure that any all-threads-stopping doesn't try
1962 to move threads out of the jump pads, as it could deadlock the
1963 inferior (LWP could be in the jump pad, maybe even holding the
1964 lock.) */
1965
1966 /* Do any necessary step collect actions. */
1967 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1968
fa593d66
PA
1969 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1970
219f2f23
PA
1971 /* See if we just hit a tracepoint and do its main collect
1972 actions. */
1973 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1974
863d01bd 1975 lwp_suspended_decr (lwp);
7984d532
PA
1976
1977 gdb_assert (lwp->suspended == 0);
229d26fc
SM
1978 gdb_assert (!stabilizing_threads
1979 || (lwp->collecting_fast_tracepoint
1980 != fast_tpoint_collect_result::not_collecting));
7984d532 1981
219f2f23
PA
1982 if (tpoint_related_event)
1983 {
1984 if (debug_threads)
87ce2a04 1985 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1986 return 1;
1987 }
1988
1989 return 0;
1990}
1991
229d26fc
SM
1992/* Convenience wrapper. Returns information about LWP's fast tracepoint
1993 collection status. */
fa593d66 1994
229d26fc 1995static fast_tpoint_collect_result
fa593d66
PA
1996linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1997 struct fast_tpoint_collect_status *status)
1998{
1999 CORE_ADDR thread_area;
d86d4aaf 2000 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2001
2002 if (the_low_target.get_thread_area == NULL)
229d26fc 2003 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2004
2005 /* Get the thread area address. This is used to recognize which
2006 thread is which when tracing with the in-process agent library.
2007 We don't read anything from the address, and treat it as opaque;
2008 it's the address itself that we assume is unique per-thread. */
d86d4aaf 2009 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
229d26fc 2010 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2011
2012 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2013}
2014
d16f3f6c
TBA
2015bool
2016linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
fa593d66 2017{
0bfdf32f 2018 struct thread_info *saved_thread;
fa593d66 2019
0bfdf32f
GB
2020 saved_thread = current_thread;
2021 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2022
2023 if ((wstat == NULL
2024 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2025 && supports_fast_tracepoints ()
58b4daa5 2026 && agent_loaded_p ())
fa593d66
PA
2027 {
2028 struct fast_tpoint_collect_status status;
fa593d66
PA
2029
2030 if (debug_threads)
87ce2a04
DE
2031 debug_printf ("Checking whether LWP %ld needs to move out of the "
2032 "jump pad.\n",
0bfdf32f 2033 lwpid_of (current_thread));
fa593d66 2034
229d26fc
SM
2035 fast_tpoint_collect_result r
2036 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2037
2038 if (wstat == NULL
2039 || (WSTOPSIG (*wstat) != SIGILL
2040 && WSTOPSIG (*wstat) != SIGFPE
2041 && WSTOPSIG (*wstat) != SIGSEGV
2042 && WSTOPSIG (*wstat) != SIGBUS))
2043 {
2044 lwp->collecting_fast_tracepoint = r;
2045
229d26fc 2046 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2047 {
229d26fc
SM
2048 if (r == fast_tpoint_collect_result::before_insn
2049 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2050 {
2051 /* Haven't executed the original instruction yet.
2052 Set breakpoint there, and wait till it's hit,
2053 then single-step until exiting the jump pad. */
2054 lwp->exit_jump_pad_bkpt
2055 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2056 }
2057
2058 if (debug_threads)
87ce2a04
DE
2059 debug_printf ("Checking whether LWP %ld needs to move out of "
2060 "the jump pad...it does\n",
0bfdf32f
GB
2061 lwpid_of (current_thread));
2062 current_thread = saved_thread;
fa593d66 2063
d16f3f6c 2064 return true;
fa593d66
PA
2065 }
2066 }
2067 else
2068 {
2069 /* If we get a synchronous signal while collecting, *and*
2070 while executing the (relocated) original instruction,
2071 reset the PC to point at the tpoint address, before
2072 reporting to GDB. Otherwise, it's an IPA lib bug: just
2073 report the signal to GDB, and pray for the best. */
2074
229d26fc
SM
2075 lwp->collecting_fast_tracepoint
2076 = fast_tpoint_collect_result::not_collecting;
fa593d66 2077
229d26fc 2078 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2079 && (status.adjusted_insn_addr <= lwp->stop_pc
2080 && lwp->stop_pc < status.adjusted_insn_addr_end))
2081 {
2082 siginfo_t info;
2083 struct regcache *regcache;
2084
2085 /* The si_addr on a few signals references the address
2086 of the faulting instruction. Adjust that as
2087 well. */
2088 if ((WSTOPSIG (*wstat) == SIGILL
2089 || WSTOPSIG (*wstat) == SIGFPE
2090 || WSTOPSIG (*wstat) == SIGBUS
2091 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2092 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2093 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2094 /* Final check just to make sure we don't clobber
2095 the siginfo of non-kernel-sent signals. */
2096 && (uintptr_t) info.si_addr == lwp->stop_pc)
2097 {
2098 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2099 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2100 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2101 }
2102
0bfdf32f 2103 regcache = get_thread_regcache (current_thread, 1);
bf9ae9d8 2104 low_set_pc (regcache, status.tpoint_addr);
fa593d66
PA
2105 lwp->stop_pc = status.tpoint_addr;
2106
2107 /* Cancel any fast tracepoint lock this thread was
2108 holding. */
2109 force_unlock_trace_buffer ();
2110 }
2111
2112 if (lwp->exit_jump_pad_bkpt != NULL)
2113 {
2114 if (debug_threads)
87ce2a04
DE
2115 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2116 "stopping all threads momentarily.\n");
fa593d66
PA
2117
2118 stop_all_lwps (1, lwp);
fa593d66
PA
2119
2120 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2121 lwp->exit_jump_pad_bkpt = NULL;
2122
2123 unstop_all_lwps (1, lwp);
2124
2125 gdb_assert (lwp->suspended >= 0);
2126 }
2127 }
2128 }
2129
2130 if (debug_threads)
87ce2a04
DE
2131 debug_printf ("Checking whether LWP %ld needs to move out of the "
2132 "jump pad...no\n",
0bfdf32f 2133 lwpid_of (current_thread));
0cccb683 2134
0bfdf32f 2135 current_thread = saved_thread;
d16f3f6c 2136 return false;
fa593d66
PA
2137}
2138
2139/* Enqueue one signal in the "signals to report later when out of the
2140 jump pad" list. */
2141
2142static void
2143enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2144{
2145 struct pending_signals *p_sig;
d86d4aaf 2146 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2147
2148 if (debug_threads)
87ce2a04 2149 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2150 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2151
2152 if (debug_threads)
2153 {
2154 struct pending_signals *sig;
2155
2156 for (sig = lwp->pending_signals_to_report;
2157 sig != NULL;
2158 sig = sig->prev)
87ce2a04
DE
2159 debug_printf (" Already queued %d\n",
2160 sig->signal);
fa593d66 2161
87ce2a04 2162 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2163 }
2164
1a981360
PA
2165 /* Don't enqueue non-RT signals if they are already in the deferred
2166 queue. (SIGSTOP being the easiest signal to see ending up here
2167 twice) */
2168 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2169 {
2170 struct pending_signals *sig;
2171
2172 for (sig = lwp->pending_signals_to_report;
2173 sig != NULL;
2174 sig = sig->prev)
2175 {
2176 if (sig->signal == WSTOPSIG (*wstat))
2177 {
2178 if (debug_threads)
87ce2a04
DE
2179 debug_printf ("Not requeuing already queued non-RT signal %d"
2180 " for LWP %ld\n",
2181 sig->signal,
d86d4aaf 2182 lwpid_of (thread));
1a981360
PA
2183 return;
2184 }
2185 }
2186 }
2187
8d749320 2188 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2189 p_sig->prev = lwp->pending_signals_to_report;
2190 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2191
d86d4aaf 2192 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2193 &p_sig->info);
fa593d66
PA
2194
2195 lwp->pending_signals_to_report = p_sig;
2196}
2197
2198/* Dequeue one signal from the "signals to report later when out of
2199 the jump pad" list. */
2200
2201static int
2202dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2203{
d86d4aaf
DE
2204 struct thread_info *thread = get_lwp_thread (lwp);
2205
fa593d66
PA
2206 if (lwp->pending_signals_to_report != NULL)
2207 {
2208 struct pending_signals **p_sig;
2209
2210 p_sig = &lwp->pending_signals_to_report;
2211 while ((*p_sig)->prev != NULL)
2212 p_sig = &(*p_sig)->prev;
2213
2214 *wstat = W_STOPCODE ((*p_sig)->signal);
2215 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2216 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2217 &(*p_sig)->info);
fa593d66
PA
2218 free (*p_sig);
2219 *p_sig = NULL;
2220
2221 if (debug_threads)
87ce2a04 2222 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2223 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2224
2225 if (debug_threads)
2226 {
2227 struct pending_signals *sig;
2228
2229 for (sig = lwp->pending_signals_to_report;
2230 sig != NULL;
2231 sig = sig->prev)
87ce2a04
DE
2232 debug_printf (" Still queued %d\n",
2233 sig->signal);
fa593d66 2234
87ce2a04 2235 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2236 }
2237
2238 return 1;
2239 }
2240
2241 return 0;
2242}
2243
582511be
PA
2244/* Fetch the possibly triggered data watchpoint info and store it in
2245 CHILD.
d50171e4 2246
582511be
PA
2247 On some archs, like x86, that use debug registers to set
2248 watchpoints, it's possible that the way to know which watched
2249 address trapped, is to check the register that is used to select
2250 which address to watch. Problem is, between setting the watchpoint
2251 and reading back which data address trapped, the user may change
2252 the set of watchpoints, and, as a consequence, GDB changes the
2253 debug registers in the inferior. To avoid reading back a stale
2254 stopped-data-address when that happens, we cache in LP the fact
2255 that a watchpoint trapped, and the corresponding data address, as
2256 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2257 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2258
582511be
PA
2259static int
2260check_stopped_by_watchpoint (struct lwp_info *child)
2261{
2262 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2263 {
582511be 2264 struct thread_info *saved_thread;
d50171e4 2265
582511be
PA
2266 saved_thread = current_thread;
2267 current_thread = get_lwp_thread (child);
2268
2269 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2270 {
15c66dd6 2271 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2272
2273 if (the_low_target.stopped_data_address != NULL)
2274 child->stopped_data_address
2275 = the_low_target.stopped_data_address ();
2276 else
2277 child->stopped_data_address = 0;
d50171e4
PA
2278 }
2279
0bfdf32f 2280 current_thread = saved_thread;
d50171e4
PA
2281 }
2282
15c66dd6 2283 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2284}
2285
de0d863e
DB
2286/* Return the ptrace options that we want to try to enable. */
2287
2288static int
2289linux_low_ptrace_options (int attached)
2290{
c12a5089 2291 client_state &cs = get_client_state ();
de0d863e
DB
2292 int options = 0;
2293
2294 if (!attached)
2295 options |= PTRACE_O_EXITKILL;
2296
c12a5089 2297 if (cs.report_fork_events)
de0d863e
DB
2298 options |= PTRACE_O_TRACEFORK;
2299
c12a5089 2300 if (cs.report_vfork_events)
c269dbdb
DB
2301 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2302
c12a5089 2303 if (cs.report_exec_events)
94585166
DB
2304 options |= PTRACE_O_TRACEEXEC;
2305
82075af2
JS
2306 options |= PTRACE_O_TRACESYSGOOD;
2307
de0d863e
DB
2308 return options;
2309}
2310
d16f3f6c
TBA
2311lwp_info *
2312linux_process_target::filter_event (int lwpid, int wstat)
fa96cb38 2313{
c12a5089 2314 client_state &cs = get_client_state ();
fa96cb38
PA
2315 struct lwp_info *child;
2316 struct thread_info *thread;
582511be 2317 int have_stop_pc = 0;
fa96cb38 2318
f2907e49 2319 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2320
94585166
DB
2321 /* Check for stop events reported by a process we didn't already
2322 know about - anything not already in our LWP list.
2323
2324 If we're expecting to receive stopped processes after
2325 fork, vfork, and clone events, then we'll just add the
2326 new one to our list and go back to waiting for the event
2327 to be reported - the stopped process might be returned
2328 from waitpid before or after the event is.
2329
2330 But note the case of a non-leader thread exec'ing after the
2331 leader having exited, and gone from our lists (because
2332 check_zombie_leaders deleted it). The non-leader thread
2333 changes its tid to the tgid. */
2334
2335 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2336 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2337 {
2338 ptid_t child_ptid;
2339
2340 /* A multi-thread exec after we had seen the leader exiting. */
2341 if (debug_threads)
2342 {
2343 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2344 "after exec.\n", lwpid);
2345 }
2346
fd79271b 2347 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2348 child = add_lwp (child_ptid);
2349 child->stopped = 1;
2350 current_thread = child->thread;
2351 }
2352
fa96cb38
PA
2353 /* If we didn't find a process, one of two things presumably happened:
2354 - A process we started and then detached from has exited. Ignore it.
2355 - A process we are controlling has forked and the new child's stop
2356 was reported to us by the kernel. Save its PID. */
2357 if (child == NULL && WIFSTOPPED (wstat))
2358 {
2359 add_to_pid_list (&stopped_pids, lwpid, wstat);
2360 return NULL;
2361 }
2362 else if (child == NULL)
2363 return NULL;
2364
2365 thread = get_lwp_thread (child);
2366
2367 child->stopped = 1;
2368
2369 child->last_status = wstat;
2370
582511be
PA
2371 /* Check if the thread has exited. */
2372 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2373 {
2374 if (debug_threads)
2375 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2376
2377 if (finish_step_over (child))
2378 {
2379 /* Unsuspend all other LWPs, and set them back running again. */
2380 unsuspend_all_lwps (child);
2381 }
2382
65706a29
PA
2383 /* If there is at least one more LWP, then the exit signal was
2384 not the end of the debugged application and should be
2385 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2386 if (cs.report_thread_events
65706a29 2387 || last_thread_of_process_p (pid_of (thread)))
582511be 2388 {
65706a29
PA
2389 /* Since events are serialized to GDB core, and we can't
2390 report this one right now. Leave the status pending for
2391 the next time we're able to report it. */
2392 mark_lwp_dead (child, wstat);
2393 return child;
582511be
PA
2394 }
2395 else
2396 {
65706a29
PA
2397 delete_lwp (child);
2398 return NULL;
582511be
PA
2399 }
2400 }
2401
2402 gdb_assert (WIFSTOPPED (wstat));
2403
fa96cb38
PA
2404 if (WIFSTOPPED (wstat))
2405 {
2406 struct process_info *proc;
2407
c06cbd92 2408 /* Architecture-specific setup after inferior is running. */
fa96cb38 2409 proc = find_process_pid (pid_of (thread));
c06cbd92 2410 if (proc->tdesc == NULL)
fa96cb38 2411 {
c06cbd92
YQ
2412 if (proc->attached)
2413 {
c06cbd92
YQ
2414 /* This needs to happen after we have attached to the
2415 inferior and it is stopped for the first time, but
2416 before we access any inferior registers. */
797bcff5 2417 arch_setup_thread (thread);
c06cbd92
YQ
2418 }
2419 else
2420 {
2421 /* The process is started, but GDBserver will do
2422 architecture-specific setup after the program stops at
2423 the first instruction. */
2424 child->status_pending_p = 1;
2425 child->status_pending = wstat;
2426 return child;
2427 }
fa96cb38
PA
2428 }
2429 }
2430
fa96cb38
PA
2431 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2432 {
beed38b8 2433 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2434 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2435
de0d863e 2436 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2437 child->must_set_ptrace_flags = 0;
2438 }
2439
82075af2
JS
2440 /* Always update syscall_state, even if it will be filtered later. */
2441 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2442 {
2443 child->syscall_state
2444 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2445 ? TARGET_WAITKIND_SYSCALL_RETURN
2446 : TARGET_WAITKIND_SYSCALL_ENTRY);
2447 }
2448 else
2449 {
2450 /* Almost all other ptrace-stops are known to be outside of system
2451 calls, with further exceptions in handle_extended_wait. */
2452 child->syscall_state = TARGET_WAITKIND_IGNORE;
2453 }
2454
e7ad2f14
PA
2455 /* Be careful to not overwrite stop_pc until save_stop_reason is
2456 called. */
fa96cb38 2457 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2458 && linux_is_extended_waitstatus (wstat))
fa96cb38 2459 {
582511be 2460 child->stop_pc = get_pc (child);
94585166 2461 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2462 {
2463 /* The event has been handled, so just return without
2464 reporting it. */
2465 return NULL;
2466 }
fa96cb38
PA
2467 }
2468
80aea927 2469 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2470 {
e7ad2f14 2471 if (save_stop_reason (child))
582511be
PA
2472 have_stop_pc = 1;
2473 }
2474
2475 if (!have_stop_pc)
2476 child->stop_pc = get_pc (child);
2477
fa96cb38
PA
2478 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2479 && child->stop_expected)
2480 {
2481 if (debug_threads)
2482 debug_printf ("Expected stop.\n");
2483 child->stop_expected = 0;
2484
2485 if (thread->last_resume_kind == resume_stop)
2486 {
2487 /* We want to report the stop to the core. Treat the
2488 SIGSTOP as a normal event. */
2bf6fb9d
PA
2489 if (debug_threads)
2490 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2491 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2492 }
2493 else if (stopping_threads != NOT_STOPPING_THREADS)
2494 {
2495 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2496 pending. */
2bf6fb9d
PA
2497 if (debug_threads)
2498 debug_printf ("LLW: SIGSTOP caught for %s "
2499 "while stopping threads.\n",
2500 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2501 return NULL;
2502 }
2503 else
2504 {
2bf6fb9d
PA
2505 /* This is a delayed SIGSTOP. Filter out the event. */
2506 if (debug_threads)
2507 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2508 child->stepping ? "step" : "continue",
2509 target_pid_to_str (ptid_of (thread)));
2510
df95181f 2511 resume_one_lwp (child, child->stepping, 0, NULL);
fa96cb38
PA
2512 return NULL;
2513 }
2514 }
2515
582511be
PA
2516 child->status_pending_p = 1;
2517 child->status_pending = wstat;
fa96cb38
PA
2518 return child;
2519}
2520
f79b145d
YQ
2521/* Return true if THREAD is doing hardware single step. */
2522
2523static int
2524maybe_hw_step (struct thread_info *thread)
2525{
2526 if (can_hardware_single_step ())
2527 return 1;
2528 else
2529 {
3b9a79ef 2530 /* GDBserver must insert single-step breakpoint for software
f79b145d 2531 single step. */
3b9a79ef 2532 gdb_assert (has_single_step_breakpoints (thread));
f79b145d
YQ
2533 return 0;
2534 }
2535}
2536
df95181f
TBA
2537void
2538linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2539{
20ba1ce6
PA
2540 struct lwp_info *lp = get_thread_lwp (thread);
2541
2542 if (lp->stopped
863d01bd 2543 && !lp->suspended
20ba1ce6 2544 && !lp->status_pending_p
20ba1ce6
PA
2545 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2546 {
8901d193
YQ
2547 int step = 0;
2548
2549 if (thread->last_resume_kind == resume_step)
2550 step = maybe_hw_step (thread);
20ba1ce6
PA
2551
2552 if (debug_threads)
2553 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2554 target_pid_to_str (ptid_of (thread)),
2555 paddress (lp->stop_pc),
2556 step);
2557
df95181f 2558 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
20ba1ce6
PA
2559 }
2560}
2561
d16f3f6c
TBA
2562int
2563linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2564 ptid_t filter_ptid,
2565 int *wstatp, int options)
0d62e5e8 2566{
d86d4aaf 2567 struct thread_info *event_thread;
d50171e4 2568 struct lwp_info *event_child, *requested_child;
fa96cb38 2569 sigset_t block_mask, prev_mask;
d50171e4 2570
fa96cb38 2571 retry:
d86d4aaf
DE
2572 /* N.B. event_thread points to the thread_info struct that contains
2573 event_child. Keep them in sync. */
2574 event_thread = NULL;
d50171e4
PA
2575 event_child = NULL;
2576 requested_child = NULL;
0d62e5e8 2577
95954743 2578 /* Check for a lwp with a pending status. */
bd99dc85 2579
d7e15655 2580 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2581 {
83e1b6c1
SM
2582 event_thread = find_thread_in_random ([&] (thread_info *thread)
2583 {
2584 return status_pending_p_callback (thread, filter_ptid);
2585 });
2586
d86d4aaf
DE
2587 if (event_thread != NULL)
2588 event_child = get_thread_lwp (event_thread);
2589 if (debug_threads && event_thread)
2590 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2591 }
d7e15655 2592 else if (filter_ptid != null_ptid)
0d62e5e8 2593 {
fa96cb38 2594 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2595
bde24c0a 2596 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2597 && requested_child->status_pending_p
229d26fc
SM
2598 && (requested_child->collecting_fast_tracepoint
2599 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2600 {
2601 enqueue_one_deferred_signal (requested_child,
2602 &requested_child->status_pending);
2603 requested_child->status_pending_p = 0;
2604 requested_child->status_pending = 0;
df95181f 2605 resume_one_lwp (requested_child, 0, 0, NULL);
fa593d66
PA
2606 }
2607
2608 if (requested_child->suspended
2609 && requested_child->status_pending_p)
38e08fca
GB
2610 {
2611 internal_error (__FILE__, __LINE__,
2612 "requesting an event out of a"
2613 " suspended child?");
2614 }
fa593d66 2615
d50171e4 2616 if (requested_child->status_pending_p)
d86d4aaf
DE
2617 {
2618 event_child = requested_child;
2619 event_thread = get_lwp_thread (event_child);
2620 }
0d62e5e8 2621 }
611cb4a5 2622
0d62e5e8
DJ
2623 if (event_child != NULL)
2624 {
bd99dc85 2625 if (debug_threads)
87ce2a04 2626 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2627 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2628 *wstatp = event_child->status_pending;
bd99dc85
PA
2629 event_child->status_pending_p = 0;
2630 event_child->status_pending = 0;
0bfdf32f 2631 current_thread = event_thread;
d86d4aaf 2632 return lwpid_of (event_thread);
0d62e5e8
DJ
2633 }
2634
fa96cb38
PA
2635 /* But if we don't find a pending event, we'll have to wait.
2636
2637 We only enter this loop if no process has a pending wait status.
2638 Thus any action taken in response to a wait status inside this
2639 loop is responding as soon as we detect the status, not after any
2640 pending events. */
d8301ad1 2641
fa96cb38
PA
2642 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2643 all signals while here. */
2644 sigfillset (&block_mask);
21987b9c 2645 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
fa96cb38 2646
582511be
PA
2647 /* Always pull all events out of the kernel. We'll randomly select
2648 an event LWP out of all that have events, to prevent
2649 starvation. */
fa96cb38 2650 while (event_child == NULL)
0d62e5e8 2651 {
fa96cb38 2652 pid_t ret = 0;
0d62e5e8 2653
fa96cb38
PA
2654 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2655 quirks:
0d62e5e8 2656
fa96cb38
PA
2657 - If the thread group leader exits while other threads in the
2658 thread group still exist, waitpid(TGID, ...) hangs. That
2659 waitpid won't return an exit status until the other threads
2660 in the group are reaped.
611cb4a5 2661
fa96cb38
PA
2662 - When a non-leader thread execs, that thread just vanishes
2663 without reporting an exit (so we'd hang if we waited for it
2664 explicitly in that case). The exec event is reported to
94585166 2665 the TGID pid. */
fa96cb38
PA
2666 errno = 0;
2667 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2668
fa96cb38
PA
2669 if (debug_threads)
2670 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
6d91ce9a 2671 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
0d62e5e8 2672
fa96cb38 2673 if (ret > 0)
0d62e5e8 2674 {
89be2091 2675 if (debug_threads)
bd99dc85 2676 {
fa96cb38
PA
2677 debug_printf ("LLW: waitpid %ld received %s\n",
2678 (long) ret, status_to_str (*wstatp));
bd99dc85 2679 }
89be2091 2680
582511be
PA
2681 /* Filter all events. IOW, leave all events pending. We'll
2682 randomly select an event LWP out of all that have events
2683 below. */
d16f3f6c 2684 filter_event (ret, *wstatp);
fa96cb38
PA
2685 /* Retry until nothing comes out of waitpid. A single
2686 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2687 continue;
2688 }
2689
20ba1ce6
PA
2690 /* Now that we've pulled all events out of the kernel, resume
2691 LWPs that don't have an interesting event to report. */
2692 if (stopping_threads == NOT_STOPPING_THREADS)
df95181f
TBA
2693 for_each_thread ([this] (thread_info *thread)
2694 {
2695 resume_stopped_resumed_lwps (thread);
2696 });
20ba1ce6
PA
2697
2698 /* ... and find an LWP with a status to report to the core, if
2699 any. */
83e1b6c1
SM
2700 event_thread = find_thread_in_random ([&] (thread_info *thread)
2701 {
2702 return status_pending_p_callback (thread, filter_ptid);
2703 });
2704
582511be
PA
2705 if (event_thread != NULL)
2706 {
2707 event_child = get_thread_lwp (event_thread);
2708 *wstatp = event_child->status_pending;
2709 event_child->status_pending_p = 0;
2710 event_child->status_pending = 0;
2711 break;
2712 }
2713
fa96cb38
PA
2714 /* Check for zombie thread group leaders. Those can't be reaped
2715 until all other threads in the thread group are. */
2716 check_zombie_leaders ();
2717
a1385b7b
SM
2718 auto not_stopped = [&] (thread_info *thread)
2719 {
2720 return not_stopped_callback (thread, wait_ptid);
2721 };
2722
fa96cb38
PA
2723 /* If there are no resumed children left in the set of LWPs we
2724 want to wait for, bail. We can't just block in
2725 waitpid/sigsuspend, because lwps might have been left stopped
2726 in trace-stop state, and we'd be stuck forever waiting for
2727 their status to change (which would only happen if we resumed
2728 them). Even if WNOHANG is set, this return code is preferred
2729 over 0 (below), as it is more detailed. */
a1385b7b 2730 if (find_thread (not_stopped) == NULL)
a6dbe5df 2731 {
fa96cb38
PA
2732 if (debug_threads)
2733 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
21987b9c 2734 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2735 return -1;
a6dbe5df
PA
2736 }
2737
fa96cb38
PA
2738 /* No interesting event to report to the caller. */
2739 if ((options & WNOHANG))
24a09b5f 2740 {
fa96cb38
PA
2741 if (debug_threads)
2742 debug_printf ("WNOHANG set, no event found\n");
2743
21987b9c 2744 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38 2745 return 0;
24a09b5f
DJ
2746 }
2747
fa96cb38
PA
2748 /* Block until we get an event reported with SIGCHLD. */
2749 if (debug_threads)
2750 debug_printf ("sigsuspend'ing\n");
d50171e4 2751
fa96cb38 2752 sigsuspend (&prev_mask);
21987b9c 2753 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
fa96cb38
PA
2754 goto retry;
2755 }
d50171e4 2756
21987b9c 2757 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2758
0bfdf32f 2759 current_thread = event_thread;
d50171e4 2760
fa96cb38
PA
2761 return lwpid_of (event_thread);
2762}
2763
d16f3f6c
TBA
2764int
2765linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
fa96cb38 2766{
d16f3f6c 2767 return wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2768}
2769
6bf5e0ba
PA
2770/* Select one LWP out of those that have events pending. */
2771
2772static void
2773select_event_lwp (struct lwp_info **orig_lp)
2774{
582511be
PA
2775 struct thread_info *event_thread = NULL;
2776
2777 /* In all-stop, give preference to the LWP that is being
2778 single-stepped. There will be at most one, and it's the LWP that
2779 the core is most interested in. If we didn't do this, then we'd
2780 have to handle pending step SIGTRAPs somehow in case the core
2781 later continues the previously-stepped thread, otherwise we'd
2782 report the pending SIGTRAP, and the core, not having stepped the
2783 thread, wouldn't understand what the trap was for, and therefore
2784 would report it to the user as a random signal. */
2785 if (!non_stop)
6bf5e0ba 2786 {
39a64da5
SM
2787 event_thread = find_thread ([] (thread_info *thread)
2788 {
2789 lwp_info *lp = get_thread_lwp (thread);
2790
2791 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2792 && thread->last_resume_kind == resume_step
2793 && lp->status_pending_p);
2794 });
2795
582511be
PA
2796 if (event_thread != NULL)
2797 {
2798 if (debug_threads)
2799 debug_printf ("SEL: Select single-step %s\n",
2800 target_pid_to_str (ptid_of (event_thread)));
2801 }
6bf5e0ba 2802 }
582511be 2803 if (event_thread == NULL)
6bf5e0ba
PA
2804 {
2805 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2806 which have had events. */
6bf5e0ba 2807
b0319eaa 2808 event_thread = find_thread_in_random ([&] (thread_info *thread)
39a64da5
SM
2809 {
2810 lwp_info *lp = get_thread_lwp (thread);
2811
b0319eaa
TT
2812 /* Only resumed LWPs that have an event pending. */
2813 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2814 && lp->status_pending_p);
39a64da5 2815 });
6bf5e0ba
PA
2816 }
2817
d86d4aaf 2818 if (event_thread != NULL)
6bf5e0ba 2819 {
d86d4aaf
DE
2820 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2821
6bf5e0ba
PA
2822 /* Switch the event LWP. */
2823 *orig_lp = event_lp;
2824 }
2825}
2826
7984d532
PA
2827/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2828 NULL. */
2829
2830static void
2831unsuspend_all_lwps (struct lwp_info *except)
2832{
139720c5
SM
2833 for_each_thread ([&] (thread_info *thread)
2834 {
2835 lwp_info *lwp = get_thread_lwp (thread);
2836
2837 if (lwp != except)
2838 lwp_suspended_decr (lwp);
2839 });
7984d532
PA
2840}
2841
fcb056a5 2842static bool stuck_in_jump_pad_callback (thread_info *thread);
5a6b0a41 2843static bool lwp_running (thread_info *thread);
fa593d66
PA
2844
2845/* Stabilize threads (move out of jump pads).
2846
2847 If a thread is midway collecting a fast tracepoint, we need to
2848 finish the collection and move it out of the jump pad before
2849 reporting the signal.
2850
2851 This avoids recursion while collecting (when a signal arrives
2852 midway, and the signal handler itself collects), which would trash
2853 the trace buffer. In case the user set a breakpoint in a signal
2854 handler, this avoids the backtrace showing the jump pad, etc..
2855 Most importantly, there are certain things we can't do safely if
2856 threads are stopped in a jump pad (or in its callee's). For
2857 example:
2858
2859 - starting a new trace run. A thread still collecting the
2860 previous run, could trash the trace buffer when resumed. The trace
2861 buffer control structures would have been reset but the thread had
2862 no way to tell. The thread could even midway memcpy'ing to the
2863 buffer, which would mean that when resumed, it would clobber the
2864 trace buffer that had been set for a new run.
2865
2866 - we can't rewrite/reuse the jump pads for new tracepoints
2867 safely. Say you do tstart while a thread is stopped midway while
2868 collecting. When the thread is later resumed, it finishes the
2869 collection, and returns to the jump pad, to execute the original
2870 instruction that was under the tracepoint jump at the time the
2871 older run had been started. If the jump pad had been rewritten
2872 since for something else in the new run, the thread would now
2873 execute the wrong / random instructions. */
2874
5c9eb2f2
TBA
2875void
2876linux_process_target::stabilize_threads ()
fa593d66 2877{
fcb056a5 2878 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
fa593d66 2879
d86d4aaf 2880 if (thread_stuck != NULL)
fa593d66 2881 {
b4d51a55 2882 if (debug_threads)
87ce2a04 2883 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2884 lwpid_of (thread_stuck));
fa593d66
PA
2885 return;
2886 }
2887
fcb056a5 2888 thread_info *saved_thread = current_thread;
fa593d66
PA
2889
2890 stabilizing_threads = 1;
2891
2892 /* Kick 'em all. */
d16f3f6c
TBA
2893 for_each_thread ([this] (thread_info *thread)
2894 {
2895 move_out_of_jump_pad (thread);
2896 });
fa593d66
PA
2897
2898 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2899 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2900 {
2901 struct target_waitstatus ourstatus;
2902 struct lwp_info *lwp;
fa593d66
PA
2903 int wstat;
2904
2905 /* Note that we go through the full wait even loop. While
2906 moving threads out of jump pad, we need to be able to step
2907 over internal breakpoints and such. */
d16f3f6c 2908 wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2909
2910 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2911 {
0bfdf32f 2912 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2913
2914 /* Lock it. */
863d01bd 2915 lwp_suspended_inc (lwp);
fa593d66 2916
a493e3e2 2917 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2918 || current_thread->last_resume_kind == resume_stop)
fa593d66 2919 {
2ea28649 2920 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2921 enqueue_one_deferred_signal (lwp, &wstat);
2922 }
2923 }
2924 }
2925
fcdad592 2926 unsuspend_all_lwps (NULL);
fa593d66
PA
2927
2928 stabilizing_threads = 0;
2929
0bfdf32f 2930 current_thread = saved_thread;
fa593d66 2931
b4d51a55 2932 if (debug_threads)
fa593d66 2933 {
fcb056a5
SM
2934 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2935
d86d4aaf 2936 if (thread_stuck != NULL)
87ce2a04 2937 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2938 lwpid_of (thread_stuck));
fa593d66
PA
2939 }
2940}
2941
582511be
PA
2942/* Convenience function that is called when the kernel reports an
2943 event that is not passed out to GDB. */
2944
2945static ptid_t
2946ignore_event (struct target_waitstatus *ourstatus)
2947{
2948 /* If we got an event, there may still be others, as a single
2949 SIGCHLD can indicate more than one child stopped. This forces
2950 another target_wait call. */
2951 async_file_mark ();
2952
2953 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2954 return null_ptid;
2955}
2956
65706a29
PA
2957/* Convenience function that is called when the kernel reports an exit
2958 event. This decides whether to report the event to GDB as a
2959 process exit event, a thread exit event, or to suppress the
2960 event. */
2961
2962static ptid_t
2963filter_exit_event (struct lwp_info *event_child,
2964 struct target_waitstatus *ourstatus)
2965{
c12a5089 2966 client_state &cs = get_client_state ();
65706a29
PA
2967 struct thread_info *thread = get_lwp_thread (event_child);
2968 ptid_t ptid = ptid_of (thread);
2969
2970 if (!last_thread_of_process_p (pid_of (thread)))
2971 {
c12a5089 2972 if (cs.report_thread_events)
65706a29
PA
2973 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2974 else
2975 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2976
2977 delete_lwp (event_child);
2978 }
2979 return ptid;
2980}
2981
82075af2
JS
2982/* Returns 1 if GDB is interested in any event_child syscalls. */
2983
2984static int
2985gdb_catching_syscalls_p (struct lwp_info *event_child)
2986{
2987 struct thread_info *thread = get_lwp_thread (event_child);
2988 struct process_info *proc = get_thread_process (thread);
2989
f27866ba 2990 return !proc->syscalls_to_catch.empty ();
82075af2
JS
2991}
2992
2993/* Returns 1 if GDB is interested in the event_child syscall.
2994 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2995
2996static int
2997gdb_catch_this_syscall_p (struct lwp_info *event_child)
2998{
4cc32bec 2999 int sysno;
82075af2
JS
3000 struct thread_info *thread = get_lwp_thread (event_child);
3001 struct process_info *proc = get_thread_process (thread);
3002
f27866ba 3003 if (proc->syscalls_to_catch.empty ())
82075af2
JS
3004 return 0;
3005
f27866ba 3006 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
82075af2
JS
3007 return 1;
3008
4cc32bec 3009 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
3010
3011 for (int iter : proc->syscalls_to_catch)
82075af2
JS
3012 if (iter == sysno)
3013 return 1;
3014
3015 return 0;
3016}
3017
d16f3f6c
TBA
3018ptid_t
3019linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3020 int target_options)
da6d8c04 3021{
c12a5089 3022 client_state &cs = get_client_state ();
e5f1222d 3023 int w;
fc7238bb 3024 struct lwp_info *event_child;
bd99dc85 3025 int options;
bd99dc85 3026 int pid;
6bf5e0ba
PA
3027 int step_over_finished;
3028 int bp_explains_trap;
3029 int maybe_internal_trap;
3030 int report_to_gdb;
219f2f23 3031 int trace_event;
c2d6af84 3032 int in_step_range;
f2faf941 3033 int any_resumed;
bd99dc85 3034
87ce2a04
DE
3035 if (debug_threads)
3036 {
3037 debug_enter ();
d16f3f6c 3038 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
87ce2a04
DE
3039 }
3040
bd99dc85
PA
3041 /* Translate generic target options into linux options. */
3042 options = __WALL;
3043 if (target_options & TARGET_WNOHANG)
3044 options |= WNOHANG;
0d62e5e8 3045
fa593d66
PA
3046 bp_explains_trap = 0;
3047 trace_event = 0;
c2d6af84 3048 in_step_range = 0;
bd99dc85
PA
3049 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3050
83e1b6c1
SM
3051 auto status_pending_p_any = [&] (thread_info *thread)
3052 {
3053 return status_pending_p_callback (thread, minus_one_ptid);
3054 };
3055
a1385b7b
SM
3056 auto not_stopped = [&] (thread_info *thread)
3057 {
3058 return not_stopped_callback (thread, minus_one_ptid);
3059 };
3060
f2faf941 3061 /* Find a resumed LWP, if any. */
83e1b6c1 3062 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3063 any_resumed = 1;
a1385b7b 3064 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3065 any_resumed = 1;
3066 else
3067 any_resumed = 0;
3068
d7e15655 3069 if (step_over_bkpt == null_ptid)
d16f3f6c 3070 pid = wait_for_event (ptid, &w, options);
6bf5e0ba
PA
3071 else
3072 {
3073 if (debug_threads)
87ce2a04
DE
3074 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3075 target_pid_to_str (step_over_bkpt));
d16f3f6c 3076 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
6bf5e0ba
PA
3077 }
3078
f2faf941 3079 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3080 {
fa96cb38
PA
3081 gdb_assert (target_options & TARGET_WNOHANG);
3082
87ce2a04
DE
3083 if (debug_threads)
3084 {
d16f3f6c 3085 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38 3086 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3087 debug_exit ();
3088 }
fa96cb38
PA
3089
3090 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3091 return null_ptid;
3092 }
fa96cb38
PA
3093 else if (pid == -1)
3094 {
3095 if (debug_threads)
3096 {
d16f3f6c 3097 debug_printf ("wait_1 ret = null_ptid, "
fa96cb38
PA
3098 "TARGET_WAITKIND_NO_RESUMED\n");
3099 debug_exit ();
3100 }
bd99dc85 3101
fa96cb38
PA
3102 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3103 return null_ptid;
3104 }
0d62e5e8 3105
0bfdf32f 3106 event_child = get_thread_lwp (current_thread);
0d62e5e8 3107
d16f3f6c 3108 /* wait_for_event only returns an exit status for the last
fa96cb38
PA
3109 child of a process. Report it. */
3110 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3111 {
fa96cb38 3112 if (WIFEXITED (w))
0d62e5e8 3113 {
fa96cb38
PA
3114 ourstatus->kind = TARGET_WAITKIND_EXITED;
3115 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3116
fa96cb38 3117 if (debug_threads)
bd99dc85 3118 {
d16f3f6c 3119 debug_printf ("wait_1 ret = %s, exited with "
fa96cb38 3120 "retcode %d\n",
0bfdf32f 3121 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3122 WEXITSTATUS (w));
3123 debug_exit ();
bd99dc85 3124 }
fa96cb38
PA
3125 }
3126 else
3127 {
3128 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3129 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3130
fa96cb38
PA
3131 if (debug_threads)
3132 {
d16f3f6c 3133 debug_printf ("wait_1 ret = %s, terminated with "
fa96cb38 3134 "signal %d\n",
0bfdf32f 3135 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3136 WTERMSIG (w));
3137 debug_exit ();
3138 }
0d62e5e8 3139 }
fa96cb38 3140
65706a29
PA
3141 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3142 return filter_exit_event (event_child, ourstatus);
3143
0bfdf32f 3144 return ptid_of (current_thread);
da6d8c04
DJ
3145 }
3146
2d97cd35
AT
3147 /* If step-over executes a breakpoint instruction, in the case of a
3148 hardware single step it means a gdb/gdbserver breakpoint had been
3149 planted on top of a permanent breakpoint, in the case of a software
3150 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3151 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3152 the breakpoint address.
3153 So in the case of the hardware single step advance the PC manually
3154 past the breakpoint and in the case of software single step advance only
3b9a79ef 3155 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3156 This avoids that a program would keep trapping a permanent breakpoint
3157 forever. */
d7e15655 3158 if (step_over_bkpt != null_ptid
2d97cd35
AT
3159 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3160 && (event_child->stepping
3b9a79ef 3161 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3162 {
dd373349
AT
3163 int increment_pc = 0;
3164 int breakpoint_kind = 0;
3165 CORE_ADDR stop_pc = event_child->stop_pc;
3166
d16f3f6c
TBA
3167 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3168 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3169
3170 if (debug_threads)
3171 {
3172 debug_printf ("step-over for %s executed software breakpoint\n",
3173 target_pid_to_str (ptid_of (current_thread)));
3174 }
3175
3176 if (increment_pc != 0)
3177 {
3178 struct regcache *regcache
3179 = get_thread_regcache (current_thread, 1);
3180
3181 event_child->stop_pc += increment_pc;
bf9ae9d8 3182 low_set_pc (regcache, event_child->stop_pc);
8090aef2
PA
3183
3184 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3185 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3186 }
3187 }
3188
6bf5e0ba
PA
3189 /* If this event was not handled before, and is not a SIGTRAP, we
3190 report it. SIGILL and SIGSEGV are also treated as traps in case
3191 a breakpoint is inserted at the current PC. If this target does
3192 not support internal breakpoints at all, we also report the
3193 SIGTRAP without further processing; it's of no concern to us. */
3194 maybe_internal_trap
bf9ae9d8 3195 = (low_supports_breakpoints ()
6bf5e0ba
PA
3196 && (WSTOPSIG (w) == SIGTRAP
3197 || ((WSTOPSIG (w) == SIGILL
3198 || WSTOPSIG (w) == SIGSEGV)
3199 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3200
3201 if (maybe_internal_trap)
3202 {
3203 /* Handle anything that requires bookkeeping before deciding to
3204 report the event or continue waiting. */
3205
3206 /* First check if we can explain the SIGTRAP with an internal
3207 breakpoint, or if we should possibly report the event to GDB.
3208 Do this before anything that may remove or insert a
3209 breakpoint. */
3210 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3211
3212 /* We have a SIGTRAP, possibly a step-over dance has just
3213 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3214 reinsert breakpoints and delete any single-step
3215 breakpoints. */
6bf5e0ba
PA
3216 step_over_finished = finish_step_over (event_child);
3217
3218 /* Now invoke the callbacks of any internal breakpoints there. */
3219 check_breakpoints (event_child->stop_pc);
3220
219f2f23
PA
3221 /* Handle tracepoint data collecting. This may overflow the
3222 trace buffer, and cause a tracing stop, removing
3223 breakpoints. */
3224 trace_event = handle_tracepoints (event_child);
3225
6bf5e0ba
PA
3226 if (bp_explains_trap)
3227 {
6bf5e0ba 3228 if (debug_threads)
87ce2a04 3229 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3230 }
3231 }
3232 else
3233 {
3234 /* We have some other signal, possibly a step-over dance was in
3235 progress, and it should be cancelled too. */
3236 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3237 }
3238
3239 /* We have all the data we need. Either report the event to GDB, or
3240 resume threads and keep waiting for more. */
3241
3242 /* If we're collecting a fast tracepoint, finish the collection and
3243 move out of the jump pad before delivering a signal. See
3244 linux_stabilize_threads. */
3245
3246 if (WIFSTOPPED (w)
3247 && WSTOPSIG (w) != SIGTRAP
3248 && supports_fast_tracepoints ()
58b4daa5 3249 && agent_loaded_p ())
fa593d66
PA
3250 {
3251 if (debug_threads)
87ce2a04
DE
3252 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3253 "to defer or adjust it.\n",
0bfdf32f 3254 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3255
3256 /* Allow debugging the jump pad itself. */
0bfdf32f 3257 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3258 && maybe_move_out_of_jump_pad (event_child, &w))
3259 {
3260 enqueue_one_deferred_signal (event_child, &w);
3261
3262 if (debug_threads)
87ce2a04 3263 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3264 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66 3265
df95181f 3266 resume_one_lwp (event_child, 0, 0, NULL);
582511be 3267
edeeb602
YQ
3268 if (debug_threads)
3269 debug_exit ();
582511be 3270 return ignore_event (ourstatus);
fa593d66
PA
3271 }
3272 }
219f2f23 3273
229d26fc
SM
3274 if (event_child->collecting_fast_tracepoint
3275 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3276 {
3277 if (debug_threads)
87ce2a04
DE
3278 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3279 "Check if we're already there.\n",
0bfdf32f 3280 lwpid_of (current_thread),
229d26fc 3281 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3282
3283 trace_event = 1;
3284
3285 event_child->collecting_fast_tracepoint
3286 = linux_fast_tracepoint_collecting (event_child, NULL);
3287
229d26fc
SM
3288 if (event_child->collecting_fast_tracepoint
3289 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3290 {
3291 /* No longer need this breakpoint. */
3292 if (event_child->exit_jump_pad_bkpt != NULL)
3293 {
3294 if (debug_threads)
87ce2a04
DE
3295 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3296 "stopping all threads momentarily.\n");
fa593d66
PA
3297
3298 /* Other running threads could hit this breakpoint.
3299 We don't handle moribund locations like GDB does,
3300 instead we always pause all threads when removing
3301 breakpoints, so that any step-over or
3302 decr_pc_after_break adjustment is always taken
3303 care of while the breakpoint is still
3304 inserted. */
3305 stop_all_lwps (1, event_child);
fa593d66
PA
3306
3307 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3308 event_child->exit_jump_pad_bkpt = NULL;
3309
3310 unstop_all_lwps (1, event_child);
3311
3312 gdb_assert (event_child->suspended >= 0);
3313 }
3314 }
3315
229d26fc
SM
3316 if (event_child->collecting_fast_tracepoint
3317 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3318 {
3319 if (debug_threads)
87ce2a04
DE
3320 debug_printf ("fast tracepoint finished "
3321 "collecting successfully.\n");
fa593d66
PA
3322
3323 /* We may have a deferred signal to report. */
3324 if (dequeue_one_deferred_signal (event_child, &w))
3325 {
3326 if (debug_threads)
87ce2a04 3327 debug_printf ("dequeued one signal.\n");
fa593d66 3328 }
3c11dd79 3329 else
fa593d66 3330 {
3c11dd79 3331 if (debug_threads)
87ce2a04 3332 debug_printf ("no deferred signals.\n");
fa593d66
PA
3333
3334 if (stabilizing_threads)
3335 {
3336 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3337 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3338
3339 if (debug_threads)
3340 {
d16f3f6c 3341 debug_printf ("wait_1 ret = %s, stopped "
87ce2a04 3342 "while stabilizing threads\n",
0bfdf32f 3343 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3344 debug_exit ();
3345 }
3346
0bfdf32f 3347 return ptid_of (current_thread);
fa593d66
PA
3348 }
3349 }
3350 }
6bf5e0ba
PA
3351 }
3352
e471f25b
PA
3353 /* Check whether GDB would be interested in this event. */
3354
82075af2
JS
3355 /* Check if GDB is interested in this syscall. */
3356 if (WIFSTOPPED (w)
3357 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3358 && !gdb_catch_this_syscall_p (event_child))
3359 {
3360 if (debug_threads)
3361 {
3362 debug_printf ("Ignored syscall for LWP %ld.\n",
3363 lwpid_of (current_thread));
3364 }
3365
df95181f 3366 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
edeeb602
YQ
3367
3368 if (debug_threads)
3369 debug_exit ();
82075af2
JS
3370 return ignore_event (ourstatus);
3371 }
3372
e471f25b
PA
3373 /* If GDB is not interested in this signal, don't stop other
3374 threads, and don't report it to GDB. Just resume the inferior
3375 right away. We do this for threading-related signals as well as
3376 any that GDB specifically requested we ignore. But never ignore
3377 SIGSTOP if we sent it ourselves, and do not ignore signals when
3378 stepping - they may require special handling to skip the signal
c9587f88
AT
3379 handler. Also never ignore signals that could be caused by a
3380 breakpoint. */
e471f25b 3381 if (WIFSTOPPED (w)
0bfdf32f 3382 && current_thread->last_resume_kind != resume_step
e471f25b 3383 && (
1a981360 3384#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3385 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3386 && (WSTOPSIG (w) == __SIGRTMIN
3387 || WSTOPSIG (w) == __SIGRTMIN + 1))
3388 ||
3389#endif
c12a5089 3390 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3391 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3392 && current_thread->last_resume_kind == resume_stop)
3393 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3394 {
3395 siginfo_t info, *info_p;
3396
3397 if (debug_threads)
87ce2a04 3398 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3399 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3400
0bfdf32f 3401 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3402 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3403 info_p = &info;
3404 else
3405 info_p = NULL;
863d01bd
PA
3406
3407 if (step_over_finished)
3408 {
3409 /* We cancelled this thread's step-over above. We still
3410 need to unsuspend all other LWPs, and set them back
3411 running again while the signal handler runs. */
3412 unsuspend_all_lwps (event_child);
3413
3414 /* Enqueue the pending signal info so that proceed_all_lwps
3415 doesn't lose it. */
3416 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3417
3418 proceed_all_lwps ();
3419 }
3420 else
3421 {
df95181f
TBA
3422 resume_one_lwp (event_child, event_child->stepping,
3423 WSTOPSIG (w), info_p);
863d01bd 3424 }
edeeb602
YQ
3425
3426 if (debug_threads)
3427 debug_exit ();
3428
582511be 3429 return ignore_event (ourstatus);
e471f25b
PA
3430 }
3431
c2d6af84
PA
3432 /* Note that all addresses are always "out of the step range" when
3433 there's no range to begin with. */
3434 in_step_range = lwp_in_step_range (event_child);
3435
3436 /* If GDB wanted this thread to single step, and the thread is out
3437 of the step range, we always want to report the SIGTRAP, and let
3438 GDB handle it. Watchpoints should always be reported. So should
3439 signals we can't explain. A SIGTRAP we can't explain could be a
3440 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3441 do, we're be able to handle GDB breakpoints on top of internal
3442 breakpoints, by handling the internal breakpoint and still
3443 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3444 won't see the breakpoint hit. If we see a single-step event but
3445 the thread should be continuing, don't pass the trap to gdb.
3446 That indicates that we had previously finished a single-step but
3447 left the single-step pending -- see
3448 complete_ongoing_step_over. */
6bf5e0ba 3449 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3450 || (current_thread->last_resume_kind == resume_step
c2d6af84 3451 && !in_step_range)
15c66dd6 3452 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3453 || (!in_step_range
3454 && !bp_explains_trap
3455 && !trace_event
3456 && !step_over_finished
3457 && !(current_thread->last_resume_kind == resume_continue
3458 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3459 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3460 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3461 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3462 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3463
3464 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3465
3466 /* We found no reason GDB would want us to stop. We either hit one
3467 of our own breakpoints, or finished an internal step GDB
3468 shouldn't know about. */
3469 if (!report_to_gdb)
3470 {
3471 if (debug_threads)
3472 {
3473 if (bp_explains_trap)
87ce2a04 3474 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3475 if (step_over_finished)
87ce2a04 3476 debug_printf ("Step-over finished.\n");
219f2f23 3477 if (trace_event)
87ce2a04 3478 debug_printf ("Tracepoint event.\n");
c2d6af84 3479 if (lwp_in_step_range (event_child))
87ce2a04
DE
3480 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3481 paddress (event_child->stop_pc),
3482 paddress (event_child->step_range_start),
3483 paddress (event_child->step_range_end));
6bf5e0ba
PA
3484 }
3485
3486 /* We're not reporting this breakpoint to GDB, so apply the
3487 decr_pc_after_break adjustment to the inferior's regcache
3488 ourselves. */
3489
bf9ae9d8 3490 if (low_supports_breakpoints ())
6bf5e0ba
PA
3491 {
3492 struct regcache *regcache
0bfdf32f 3493 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3494 low_set_pc (regcache, event_child->stop_pc);
6bf5e0ba
PA
3495 }
3496
7984d532 3497 if (step_over_finished)
e3652c84
YQ
3498 {
3499 /* If we have finished stepping over a breakpoint, we've
3500 stopped and suspended all LWPs momentarily except the
3501 stepping one. This is where we resume them all again.
3502 We're going to keep waiting, so use proceed, which
3503 handles stepping over the next breakpoint. */
3504 unsuspend_all_lwps (event_child);
3505 }
3506 else
3507 {
3508 /* Remove the single-step breakpoints if any. Note that
3509 there isn't single-step breakpoint if we finished stepping
3510 over. */
3511 if (can_software_single_step ()
3512 && has_single_step_breakpoints (current_thread))
3513 {
3514 stop_all_lwps (0, event_child);
3515 delete_single_step_breakpoints (current_thread);
3516 unstop_all_lwps (0, event_child);
3517 }
3518 }
7984d532 3519
e3652c84
YQ
3520 if (debug_threads)
3521 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3522 proceed_all_lwps ();
edeeb602
YQ
3523
3524 if (debug_threads)
3525 debug_exit ();
3526
582511be 3527 return ignore_event (ourstatus);
6bf5e0ba
PA
3528 }
3529
3530 if (debug_threads)
3531 {
00db26fa 3532 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3533 {
23fdd69e
SM
3534 std::string str
3535 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3536
ad071a30 3537 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3538 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3539 }
0bfdf32f 3540 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3541 {
3542 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3543 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3544 else if (!lwp_in_step_range (event_child))
87ce2a04 3545 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3546 }
15c66dd6 3547 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3548 debug_printf ("Stopped by watchpoint.\n");
582511be 3549 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3550 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3551 if (debug_threads)
87ce2a04 3552 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3553 }
3554
3555 /* Alright, we're going to report a stop. */
3556
3b9a79ef 3557 /* Remove single-step breakpoints. */
8901d193
YQ
3558 if (can_software_single_step ())
3559 {
3b9a79ef 3560 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3561 lwps, so that other threads won't hit the breakpoint in the
3562 staled memory. */
3b9a79ef 3563 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3564
3565 if (non_stop)
3566 {
3b9a79ef
YQ
3567 remove_single_step_breakpoints_p
3568 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3569 }
3570 else
3571 {
3572 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3573 requests. Delete all single-step breakpoints. */
8901d193 3574
9c80ecd6
SM
3575 find_thread ([&] (thread_info *thread) {
3576 if (has_single_step_breakpoints (thread))
3577 {
3578 remove_single_step_breakpoints_p = 1;
3579 return true;
3580 }
8901d193 3581
9c80ecd6
SM
3582 return false;
3583 });
8901d193
YQ
3584 }
3585
3b9a79ef 3586 if (remove_single_step_breakpoints_p)
8901d193 3587 {
3b9a79ef 3588 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3589 so that other threads won't hit the breakpoint in the staled
3590 memory. */
3591 stop_all_lwps (0, event_child);
3592
3593 if (non_stop)
3594 {
3b9a79ef
YQ
3595 gdb_assert (has_single_step_breakpoints (current_thread));
3596 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3597 }
3598 else
3599 {
9c80ecd6
SM
3600 for_each_thread ([] (thread_info *thread){
3601 if (has_single_step_breakpoints (thread))
3602 delete_single_step_breakpoints (thread);
3603 });
8901d193
YQ
3604 }
3605
3606 unstop_all_lwps (0, event_child);
3607 }
3608 }
3609
582511be 3610 if (!stabilizing_threads)
6bf5e0ba
PA
3611 {
3612 /* In all-stop, stop all threads. */
582511be
PA
3613 if (!non_stop)
3614 stop_all_lwps (0, NULL);
6bf5e0ba 3615
c03e6ccc 3616 if (step_over_finished)
582511be
PA
3617 {
3618 if (!non_stop)
3619 {
3620 /* If we were doing a step-over, all other threads but
3621 the stepping one had been paused in start_step_over,
3622 with their suspend counts incremented. We don't want
3623 to do a full unstop/unpause, because we're in
3624 all-stop mode (so we want threads stopped), but we
3625 still need to unsuspend the other threads, to
3626 decrement their `suspended' count back. */
3627 unsuspend_all_lwps (event_child);
3628 }
3629 else
3630 {
3631 /* If we just finished a step-over, then all threads had
3632 been momentarily paused. In all-stop, that's fine,
3633 we want threads stopped by now anyway. In non-stop,
3634 we need to re-resume threads that GDB wanted to be
3635 running. */
3636 unstop_all_lwps (1, event_child);
3637 }
3638 }
c03e6ccc 3639
3aa5cfa0
AT
3640 /* If we're not waiting for a specific LWP, choose an event LWP
3641 from among those that have had events. Giving equal priority
3642 to all LWPs that have had events helps prevent
3643 starvation. */
d7e15655 3644 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3645 {
3646 event_child->status_pending_p = 1;
3647 event_child->status_pending = w;
3648
3649 select_event_lwp (&event_child);
3650
3651 /* current_thread and event_child must stay in sync. */
3652 current_thread = get_lwp_thread (event_child);
3653
3654 event_child->status_pending_p = 0;
3655 w = event_child->status_pending;
3656 }
3657
3658
fa593d66 3659 /* Stabilize threads (move out of jump pads). */
582511be 3660 if (!non_stop)
5c9eb2f2 3661 target_stabilize_threads ();
6bf5e0ba
PA
3662 }
3663 else
3664 {
3665 /* If we just finished a step-over, then all threads had been
3666 momentarily paused. In all-stop, that's fine, we want
3667 threads stopped by now anyway. In non-stop, we need to
3668 re-resume threads that GDB wanted to be running. */
3669 if (step_over_finished)
7984d532 3670 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3671 }
3672
00db26fa 3673 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3674 {
00db26fa
PA
3675 /* If the reported event is an exit, fork, vfork or exec, let
3676 GDB know. */
5a04c4cf
PA
3677
3678 /* Break the unreported fork relationship chain. */
3679 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3680 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3681 {
3682 event_child->fork_relative->fork_relative = NULL;
3683 event_child->fork_relative = NULL;
3684 }
3685
00db26fa 3686 *ourstatus = event_child->waitstatus;
de0d863e
DB
3687 /* Clear the event lwp's waitstatus since we handled it already. */
3688 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3689 }
3690 else
3691 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3692
582511be 3693 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3694 it was a software breakpoint, and the client doesn't know we can
3695 adjust the breakpoint ourselves. */
3696 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3697 && !cs.swbreak_feature)
582511be
PA
3698 {
3699 int decr_pc = the_low_target.decr_pc_after_break;
3700
3701 if (decr_pc != 0)
3702 {
3703 struct regcache *regcache
3704 = get_thread_regcache (current_thread, 1);
bf9ae9d8 3705 low_set_pc (regcache, event_child->stop_pc + decr_pc);
582511be
PA
3706 }
3707 }
3708
82075af2
JS
3709 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3710 {
82075af2 3711 get_syscall_trapinfo (event_child,
4cc32bec 3712 &ourstatus->value.syscall_number);
82075af2
JS
3713 ourstatus->kind = event_child->syscall_state;
3714 }
3715 else if (current_thread->last_resume_kind == resume_stop
3716 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3717 {
3718 /* A thread that has been requested to stop by GDB with vCont;t,
3719 and it stopped cleanly, so report as SIG0. The use of
3720 SIGSTOP is an implementation detail. */
a493e3e2 3721 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3722 }
0bfdf32f 3723 else if (current_thread->last_resume_kind == resume_stop
8336d594 3724 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3725 {
3726 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3727 but, it stopped for other reasons. */
2ea28649 3728 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3729 }
de0d863e 3730 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3731 {
2ea28649 3732 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3733 }
3734
d7e15655 3735 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3736
bd99dc85 3737 if (debug_threads)
87ce2a04 3738 {
d16f3f6c 3739 debug_printf ("wait_1 ret = %s, %d, %d\n",
0bfdf32f 3740 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3741 ourstatus->kind, ourstatus->value.sig);
3742 debug_exit ();
3743 }
bd99dc85 3744
65706a29
PA
3745 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3746 return filter_exit_event (event_child, ourstatus);
3747
0bfdf32f 3748 return ptid_of (current_thread);
bd99dc85
PA
3749}
3750
3751/* Get rid of any pending event in the pipe. */
3752static void
3753async_file_flush (void)
3754{
3755 int ret;
3756 char buf;
3757
3758 do
3759 ret = read (linux_event_pipe[0], &buf, 1);
3760 while (ret >= 0 || (ret == -1 && errno == EINTR));
3761}
3762
3763/* Put something in the pipe, so the event loop wakes up. */
3764static void
3765async_file_mark (void)
3766{
3767 int ret;
3768
3769 async_file_flush ();
3770
3771 do
3772 ret = write (linux_event_pipe[1], "+", 1);
3773 while (ret == 0 || (ret == -1 && errno == EINTR));
3774
3775 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3776 be awakened anyway. */
3777}
3778
6532e7e3
TBA
3779ptid_t
3780linux_process_target::wait (ptid_t ptid,
3781 target_waitstatus *ourstatus,
3782 int target_options)
bd99dc85 3783{
95954743 3784 ptid_t event_ptid;
bd99dc85 3785
bd99dc85
PA
3786 /* Flush the async file first. */
3787 if (target_is_async_p ())
3788 async_file_flush ();
3789
582511be
PA
3790 do
3791 {
d16f3f6c 3792 event_ptid = wait_1 (ptid, ourstatus, target_options);
582511be
PA
3793 }
3794 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3795 && event_ptid == null_ptid
582511be 3796 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3797
3798 /* If at least one stop was reported, there may be more. A single
3799 SIGCHLD can signal more than one child stop. */
3800 if (target_is_async_p ()
3801 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3802 && event_ptid != null_ptid)
bd99dc85
PA
3803 async_file_mark ();
3804
3805 return event_ptid;
da6d8c04
DJ
3806}
3807
c5f62d5f 3808/* Send a signal to an LWP. */
fd500816
DJ
3809
3810static int
a1928bad 3811kill_lwp (unsigned long lwpid, int signo)
fd500816 3812{
4a6ed09b 3813 int ret;
fd500816 3814
4a6ed09b
PA
3815 errno = 0;
3816 ret = syscall (__NR_tkill, lwpid, signo);
3817 if (errno == ENOSYS)
3818 {
3819 /* If tkill fails, then we are not using nptl threads, a
3820 configuration we no longer support. */
3821 perror_with_name (("tkill"));
3822 }
3823 return ret;
fd500816
DJ
3824}
3825
964e4306
PA
3826void
3827linux_stop_lwp (struct lwp_info *lwp)
3828{
3829 send_sigstop (lwp);
3830}
3831
0d62e5e8 3832static void
02fc4de7 3833send_sigstop (struct lwp_info *lwp)
0d62e5e8 3834{
bd99dc85 3835 int pid;
0d62e5e8 3836
d86d4aaf 3837 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3838
0d62e5e8
DJ
3839 /* If we already have a pending stop signal for this process, don't
3840 send another. */
54a0b537 3841 if (lwp->stop_expected)
0d62e5e8 3842 {
ae13219e 3843 if (debug_threads)
87ce2a04 3844 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3845
0d62e5e8
DJ
3846 return;
3847 }
3848
3849 if (debug_threads)
87ce2a04 3850 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3851
d50171e4 3852 lwp->stop_expected = 1;
bd99dc85 3853 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3854}
3855
df3e4dbe
SM
3856static void
3857send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3858{
d86d4aaf 3859 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3860
7984d532
PA
3861 /* Ignore EXCEPT. */
3862 if (lwp == except)
df3e4dbe 3863 return;
7984d532 3864
02fc4de7 3865 if (lwp->stopped)
df3e4dbe 3866 return;
02fc4de7
PA
3867
3868 send_sigstop (lwp);
7984d532
PA
3869}
3870
3871/* Increment the suspend count of an LWP, and stop it, if not stopped
3872 yet. */
df3e4dbe
SM
3873static void
3874suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3875{
d86d4aaf 3876 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3877
3878 /* Ignore EXCEPT. */
3879 if (lwp == except)
df3e4dbe 3880 return;
7984d532 3881
863d01bd 3882 lwp_suspended_inc (lwp);
7984d532 3883
df3e4dbe 3884 send_sigstop (thread, except);
02fc4de7
PA
3885}
3886
95954743
PA
3887static void
3888mark_lwp_dead (struct lwp_info *lwp, int wstat)
3889{
95954743
PA
3890 /* Store the exit status for later. */
3891 lwp->status_pending_p = 1;
3892 lwp->status_pending = wstat;
3893
00db26fa
PA
3894 /* Store in waitstatus as well, as there's nothing else to process
3895 for this event. */
3896 if (WIFEXITED (wstat))
3897 {
3898 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3899 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3900 }
3901 else if (WIFSIGNALED (wstat))
3902 {
3903 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3904 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3905 }
3906
95954743
PA
3907 /* Prevent trying to stop it. */
3908 lwp->stopped = 1;
3909
3910 /* No further stops are expected from a dead lwp. */
3911 lwp->stop_expected = 0;
3912}
3913
00db26fa
PA
3914/* Return true if LWP has exited already, and has a pending exit event
3915 to report to GDB. */
3916
3917static int
3918lwp_is_marked_dead (struct lwp_info *lwp)
3919{
3920 return (lwp->status_pending_p
3921 && (WIFEXITED (lwp->status_pending)
3922 || WIFSIGNALED (lwp->status_pending)));
3923}
3924
d16f3f6c
TBA
3925void
3926linux_process_target::wait_for_sigstop ()
0d62e5e8 3927{
0bfdf32f 3928 struct thread_info *saved_thread;
95954743 3929 ptid_t saved_tid;
fa96cb38
PA
3930 int wstat;
3931 int ret;
0d62e5e8 3932
0bfdf32f
GB
3933 saved_thread = current_thread;
3934 if (saved_thread != NULL)
9c80ecd6 3935 saved_tid = saved_thread->id;
bd99dc85 3936 else
95954743 3937 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3938
d50171e4 3939 if (debug_threads)
fa96cb38 3940 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3941
fa96cb38
PA
3942 /* Passing NULL_PTID as filter indicates we want all events to be
3943 left pending. Eventually this returns when there are no
3944 unwaited-for children left. */
d16f3f6c 3945 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
fa96cb38 3946 gdb_assert (ret == -1);
0d62e5e8 3947
13d3d99b 3948 if (saved_thread == NULL || mythread_alive (saved_tid))
0bfdf32f 3949 current_thread = saved_thread;
0d62e5e8
DJ
3950 else
3951 {
3952 if (debug_threads)
87ce2a04 3953 debug_printf ("Previously current thread died.\n");
0d62e5e8 3954
f0db101d
PA
3955 /* We can't change the current inferior behind GDB's back,
3956 otherwise, a subsequent command may apply to the wrong
3957 process. */
3958 current_thread = NULL;
0d62e5e8
DJ
3959 }
3960}
3961
fcb056a5 3962/* Returns true if THREAD is stopped in a jump pad, and we can't
fa593d66
PA
3963 move it out, because we need to report the stop event to GDB. For
3964 example, if the user puts a breakpoint in the jump pad, it's
3965 because she wants to debug it. */
3966
fcb056a5
SM
3967static bool
3968stuck_in_jump_pad_callback (thread_info *thread)
fa593d66 3969{
d86d4aaf 3970 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 3971
863d01bd
PA
3972 if (lwp->suspended != 0)
3973 {
3974 internal_error (__FILE__, __LINE__,
3975 "LWP %ld is suspended, suspended=%d\n",
3976 lwpid_of (thread), lwp->suspended);
3977 }
fa593d66
PA
3978 gdb_assert (lwp->stopped);
3979
3980 /* Allow debugging the jump pad, gdb_collect, etc.. */
3981 return (supports_fast_tracepoints ()
58b4daa5 3982 && agent_loaded_p ()
fa593d66 3983 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3984 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 3985 || thread->last_resume_kind == resume_step)
229d26fc
SM
3986 && (linux_fast_tracepoint_collecting (lwp, NULL)
3987 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
3988}
3989
d16f3f6c
TBA
3990void
3991linux_process_target::move_out_of_jump_pad (thread_info *thread)
fa593d66 3992{
f0ce0d3a 3993 struct thread_info *saved_thread;
d86d4aaf 3994 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3995 int *wstat;
3996
863d01bd
PA
3997 if (lwp->suspended != 0)
3998 {
3999 internal_error (__FILE__, __LINE__,
4000 "LWP %ld is suspended, suspended=%d\n",
4001 lwpid_of (thread), lwp->suspended);
4002 }
fa593d66
PA
4003 gdb_assert (lwp->stopped);
4004
f0ce0d3a
PA
4005 /* For gdb_breakpoint_here. */
4006 saved_thread = current_thread;
4007 current_thread = thread;
4008
fa593d66
PA
4009 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4010
4011 /* Allow debugging the jump pad, gdb_collect, etc. */
4012 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4013 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4014 && thread->last_resume_kind != resume_step
4015 && maybe_move_out_of_jump_pad (lwp, wstat))
4016 {
4017 if (debug_threads)
87ce2a04 4018 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4019 lwpid_of (thread));
fa593d66
PA
4020
4021 if (wstat)
4022 {
4023 lwp->status_pending_p = 0;
4024 enqueue_one_deferred_signal (lwp, wstat);
4025
4026 if (debug_threads)
87ce2a04
DE
4027 debug_printf ("Signal %d for LWP %ld deferred "
4028 "(in jump pad)\n",
d86d4aaf 4029 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4030 }
4031
df95181f 4032 resume_one_lwp (lwp, 0, 0, NULL);
fa593d66
PA
4033 }
4034 else
863d01bd 4035 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4036
4037 current_thread = saved_thread;
fa593d66
PA
4038}
4039
5a6b0a41
SM
4040static bool
4041lwp_running (thread_info *thread)
fa593d66 4042{
d86d4aaf 4043 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4044
00db26fa 4045 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4046 return false;
4047
4048 return !lwp->stopped;
fa593d66
PA
4049}
4050
d16f3f6c
TBA
4051void
4052linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
0d62e5e8 4053{
bde24c0a
PA
4054 /* Should not be called recursively. */
4055 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4056
87ce2a04
DE
4057 if (debug_threads)
4058 {
4059 debug_enter ();
4060 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4061 suspend ? "stop-and-suspend" : "stop",
4062 except != NULL
d86d4aaf 4063 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4064 : "none");
4065 }
4066
bde24c0a
PA
4067 stopping_threads = (suspend
4068 ? STOPPING_AND_SUSPENDING_THREADS
4069 : STOPPING_THREADS);
7984d532
PA
4070
4071 if (suspend)
df3e4dbe
SM
4072 for_each_thread ([&] (thread_info *thread)
4073 {
4074 suspend_and_send_sigstop (thread, except);
4075 });
7984d532 4076 else
df3e4dbe
SM
4077 for_each_thread ([&] (thread_info *thread)
4078 {
4079 send_sigstop (thread, except);
4080 });
4081
fa96cb38 4082 wait_for_sigstop ();
bde24c0a 4083 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4084
4085 if (debug_threads)
4086 {
4087 debug_printf ("stop_all_lwps done, setting stopping_threads "
4088 "back to !stopping\n");
4089 debug_exit ();
4090 }
0d62e5e8
DJ
4091}
4092
863d01bd
PA
4093/* Enqueue one signal in the chain of signals which need to be
4094 delivered to this process on next resume. */
4095
4096static void
4097enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4098{
8d749320 4099 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4100
863d01bd
PA
4101 p_sig->prev = lwp->pending_signals;
4102 p_sig->signal = signal;
4103 if (info == NULL)
4104 memset (&p_sig->info, 0, sizeof (siginfo_t));
4105 else
4106 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4107 lwp->pending_signals = p_sig;
4108}
4109
df95181f
TBA
4110void
4111linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
fa5308bd 4112{
984a2c04
YQ
4113 struct thread_info *thread = get_lwp_thread (lwp);
4114 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4115
4116 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4117
984a2c04 4118 current_thread = thread;
a0ff9e1a 4119 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
fa5308bd 4120
a0ff9e1a 4121 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4122 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4123}
4124
df95181f
TBA
4125int
4126linux_process_target::single_step (lwp_info* lwp)
7fe5e27e
AT
4127{
4128 int step = 0;
4129
4130 if (can_hardware_single_step ())
4131 {
4132 step = 1;
4133 }
4134 else if (can_software_single_step ())
4135 {
4136 install_software_single_step_breakpoints (lwp);
4137 step = 0;
4138 }
4139 else
4140 {
4141 if (debug_threads)
4142 debug_printf ("stepping is not implemented on this target");
4143 }
4144
4145 return step;
4146}
4147
35ac8b3e 4148/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4149 finish a fast tracepoint collect. Since signal can be delivered in
4150 the step-over, the program may go to signal handler and trap again
4151 after return from the signal handler. We can live with the spurious
4152 double traps. */
35ac8b3e
YQ
4153
4154static int
4155lwp_signal_can_be_delivered (struct lwp_info *lwp)
4156{
229d26fc
SM
4157 return (lwp->collecting_fast_tracepoint
4158 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4159}
4160
df95181f
TBA
4161void
4162linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4163 int signal, siginfo_t *info)
da6d8c04 4164{
d86d4aaf 4165 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4166 struct thread_info *saved_thread;
82075af2 4167 int ptrace_request;
c06cbd92
YQ
4168 struct process_info *proc = get_thread_process (thread);
4169
4170 /* Note that target description may not be initialised
4171 (proc->tdesc == NULL) at this point because the program hasn't
4172 stopped at the first instruction yet. It means GDBserver skips
4173 the extra traps from the wrapper program (see option --wrapper).
4174 Code in this function that requires register access should be
4175 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4176
54a0b537 4177 if (lwp->stopped == 0)
0d62e5e8
DJ
4178 return;
4179
65706a29
PA
4180 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4181
229d26fc
SM
4182 fast_tpoint_collect_result fast_tp_collecting
4183 = lwp->collecting_fast_tracepoint;
fa593d66 4184
229d26fc
SM
4185 gdb_assert (!stabilizing_threads
4186 || (fast_tp_collecting
4187 != fast_tpoint_collect_result::not_collecting));
fa593d66 4188
219f2f23
PA
4189 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4190 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4191 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4192 {
4193 /* Collecting 'while-stepping' actions doesn't make sense
4194 anymore. */
d86d4aaf 4195 release_while_stepping_state_list (thread);
219f2f23
PA
4196 }
4197
0d62e5e8 4198 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4199 signal. Also enqueue the signal if it can't be delivered to the
4200 inferior right now. */
0d62e5e8 4201 if (signal != 0
fa593d66
PA
4202 && (lwp->status_pending_p
4203 || lwp->pending_signals != NULL
35ac8b3e 4204 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4205 {
4206 enqueue_pending_signal (lwp, signal, info);
4207
4208 /* Postpone any pending signal. It was enqueued above. */
4209 signal = 0;
4210 }
0d62e5e8 4211
d50171e4
PA
4212 if (lwp->status_pending_p)
4213 {
4214 if (debug_threads)
94610ec4 4215 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4216 " has pending status\n",
94610ec4 4217 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4218 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4219 return;
4220 }
0d62e5e8 4221
0bfdf32f
GB
4222 saved_thread = current_thread;
4223 current_thread = thread;
0d62e5e8 4224
0d62e5e8
DJ
4225 /* This bit needs some thinking about. If we get a signal that
4226 we must report while a single-step reinsert is still pending,
4227 we often end up resuming the thread. It might be better to
4228 (ew) allow a stack of pending events; then we could be sure that
4229 the reinsert happened right away and not lose any signals.
4230
4231 Making this stack would also shrink the window in which breakpoints are
54a0b537 4232 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4233 complete correctness, so it won't solve that problem. It may be
4234 worthwhile just to solve this one, however. */
54a0b537 4235 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4236 {
4237 if (debug_threads)
87ce2a04
DE
4238 debug_printf (" pending reinsert at 0x%s\n",
4239 paddress (lwp->bp_reinsert));
d50171e4 4240
85e00e85 4241 if (can_hardware_single_step ())
d50171e4 4242 {
229d26fc 4243 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4244 {
4245 if (step == 0)
9986ba08 4246 warning ("BAD - reinserting but not stepping.");
fa593d66 4247 if (lwp->suspended)
9986ba08
PA
4248 warning ("BAD - reinserting and suspended(%d).",
4249 lwp->suspended);
fa593d66 4250 }
d50171e4 4251 }
f79b145d
YQ
4252
4253 step = maybe_hw_step (thread);
0d62e5e8
DJ
4254 }
4255
229d26fc 4256 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4257 {
4258 if (debug_threads)
87ce2a04
DE
4259 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4260 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4261 lwpid_of (thread));
fa593d66 4262 }
229d26fc 4263 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4264 {
4265 if (debug_threads)
87ce2a04
DE
4266 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4267 " single-stepping\n",
d86d4aaf 4268 lwpid_of (thread));
fa593d66
PA
4269
4270 if (can_hardware_single_step ())
4271 step = 1;
4272 else
38e08fca
GB
4273 {
4274 internal_error (__FILE__, __LINE__,
4275 "moving out of jump pad single-stepping"
4276 " not implemented on this target");
4277 }
fa593d66
PA
4278 }
4279
219f2f23
PA
4280 /* If we have while-stepping actions in this thread set it stepping.
4281 If we have a signal to deliver, it may or may not be set to
4282 SIG_IGN, we don't know. Assume so, and allow collecting
4283 while-stepping into a signal handler. A possible smart thing to
4284 do would be to set an internal breakpoint at the signal return
4285 address, continue, and carry on catching this while-stepping
4286 action only when that breakpoint is hit. A future
4287 enhancement. */
7fe5e27e 4288 if (thread->while_stepping != NULL)
219f2f23
PA
4289 {
4290 if (debug_threads)
87ce2a04 4291 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4292 lwpid_of (thread));
7fe5e27e
AT
4293
4294 step = single_step (lwp);
219f2f23
PA
4295 }
4296
bf9ae9d8 4297 if (proc->tdesc != NULL && low_supports_breakpoints ())
0d62e5e8 4298 {
0bfdf32f 4299 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be 4300
bf9ae9d8 4301 lwp->stop_pc = low_get_pc (regcache);
582511be
PA
4302
4303 if (debug_threads)
4304 {
4305 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4306 (long) lwp->stop_pc);
4307 }
0d62e5e8
DJ
4308 }
4309
35ac8b3e
YQ
4310 /* If we have pending signals, consume one if it can be delivered to
4311 the inferior. */
4312 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4313 {
4314 struct pending_signals **p_sig;
4315
54a0b537 4316 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4317 while ((*p_sig)->prev != NULL)
4318 p_sig = &(*p_sig)->prev;
4319
4320 signal = (*p_sig)->signal;
32ca6d61 4321 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4322 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4323 &(*p_sig)->info);
32ca6d61 4324
0d62e5e8
DJ
4325 free (*p_sig);
4326 *p_sig = NULL;
4327 }
4328
94610ec4
YQ
4329 if (debug_threads)
4330 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4331 lwpid_of (thread), step ? "step" : "continue", signal,
4332 lwp->stop_expected ? "expected" : "not expected");
4333
aa5ca48f
DE
4334 if (the_low_target.prepare_to_resume != NULL)
4335 the_low_target.prepare_to_resume (lwp);
4336
d86d4aaf 4337 regcache_invalidate_thread (thread);
da6d8c04 4338 errno = 0;
54a0b537 4339 lwp->stepping = step;
82075af2
JS
4340 if (step)
4341 ptrace_request = PTRACE_SINGLESTEP;
4342 else if (gdb_catching_syscalls_p (lwp))
4343 ptrace_request = PTRACE_SYSCALL;
4344 else
4345 ptrace_request = PTRACE_CONT;
4346 ptrace (ptrace_request,
4347 lwpid_of (thread),
b8e1b30e 4348 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4349 /* Coerce to a uintptr_t first to avoid potential gcc warning
4350 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4351 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4352
0bfdf32f 4353 current_thread = saved_thread;
da6d8c04 4354 if (errno)
23f238d3
PA
4355 perror_with_name ("resuming thread");
4356
4357 /* Successfully resumed. Clear state that no longer makes sense,
4358 and mark the LWP as running. Must not do this before resuming
4359 otherwise if that fails other code will be confused. E.g., we'd
4360 later try to stop the LWP and hang forever waiting for a stop
4361 status. Note that we must not throw after this is cleared,
4362 otherwise handle_zombie_lwp_error would get confused. */
4363 lwp->stopped = 0;
4364 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4365}
4366
4367/* Called when we try to resume a stopped LWP and that errors out. If
4368 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4369 or about to become), discard the error, clear any pending status
4370 the LWP may have, and return true (we'll collect the exit status
4371 soon enough). Otherwise, return false. */
4372
4373static int
4374check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4375{
4376 struct thread_info *thread = get_lwp_thread (lp);
4377
4378 /* If we get an error after resuming the LWP successfully, we'd
4379 confuse !T state for the LWP being gone. */
4380 gdb_assert (lp->stopped);
4381
4382 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4383 because even if ptrace failed with ESRCH, the tracee may be "not
4384 yet fully dead", but already refusing ptrace requests. In that
4385 case the tracee has 'R (Running)' state for a little bit
4386 (observed in Linux 3.18). See also the note on ESRCH in the
4387 ptrace(2) man page. Instead, check whether the LWP has any state
4388 other than ptrace-stopped. */
4389
4390 /* Don't assume anything if /proc/PID/status can't be read. */
4391 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4392 {
23f238d3
PA
4393 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4394 lp->status_pending_p = 0;
4395 return 1;
4396 }
4397 return 0;
4398}
4399
df95181f
TBA
4400void
4401linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4402 siginfo_t *info)
23f238d3 4403{
a70b8144 4404 try
23f238d3 4405 {
df95181f 4406 resume_one_lwp_throw (lwp, step, signal, info);
23f238d3 4407 }
230d2906 4408 catch (const gdb_exception_error &ex)
23f238d3
PA
4409 {
4410 if (!check_ptrace_stopped_lwp_gone (lwp))
eedc3f4f 4411 throw;
3221518c 4412 }
da6d8c04
DJ
4413}
4414
5fdda392
SM
4415/* This function is called once per thread via for_each_thread.
4416 We look up which resume request applies to THREAD and mark it with a
4417 pointer to the appropriate resume request.
5544ad89
DJ
4418
4419 This algorithm is O(threads * resume elements), but resume elements
4420 is small (and will remain small at least until GDB supports thread
4421 suspension). */
ebcf782c 4422
5fdda392
SM
4423static void
4424linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4425{
d86d4aaf 4426 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4427
5fdda392 4428 for (int ndx = 0; ndx < n; ndx++)
95954743 4429 {
5fdda392 4430 ptid_t ptid = resume[ndx].thread;
d7e15655 4431 if (ptid == minus_one_ptid
9c80ecd6 4432 || ptid == thread->id
0c9070b3
YQ
4433 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4434 of PID'. */
e99b03dc 4435 || (ptid.pid () == pid_of (thread)
0e998d96 4436 && (ptid.is_pid ()
e38504b3 4437 || ptid.lwp () == -1)))
95954743 4438 {
5fdda392 4439 if (resume[ndx].kind == resume_stop
8336d594 4440 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4441 {
4442 if (debug_threads)
87ce2a04
DE
4443 debug_printf ("already %s LWP %ld at GDB's request\n",
4444 (thread->last_status.kind
4445 == TARGET_WAITKIND_STOPPED)
4446 ? "stopped"
4447 : "stopping",
d86d4aaf 4448 lwpid_of (thread));
d50171e4
PA
4449
4450 continue;
4451 }
4452
5a04c4cf
PA
4453 /* Ignore (wildcard) resume requests for already-resumed
4454 threads. */
5fdda392 4455 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4456 && thread->last_resume_kind != resume_stop)
4457 {
4458 if (debug_threads)
4459 debug_printf ("already %s LWP %ld at GDB's request\n",
4460 (thread->last_resume_kind
4461 == resume_step)
4462 ? "stepping"
4463 : "continuing",
4464 lwpid_of (thread));
4465 continue;
4466 }
4467
4468 /* Don't let wildcard resumes resume fork children that GDB
4469 does not yet know are new fork children. */
4470 if (lwp->fork_relative != NULL)
4471 {
5a04c4cf
PA
4472 struct lwp_info *rel = lwp->fork_relative;
4473
4474 if (rel->status_pending_p
4475 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4476 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4477 {
4478 if (debug_threads)
4479 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4480 lwpid_of (thread));
4481 continue;
4482 }
4483 }
4484
4485 /* If the thread has a pending event that has already been
4486 reported to GDBserver core, but GDB has not pulled the
4487 event out of the vStopped queue yet, likewise, ignore the
4488 (wildcard) resume request. */
9c80ecd6 4489 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4490 {
4491 if (debug_threads)
4492 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4493 lwpid_of (thread));
4494 continue;
4495 }
4496
5fdda392 4497 lwp->resume = &resume[ndx];
8336d594 4498 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4499
c2d6af84
PA
4500 lwp->step_range_start = lwp->resume->step_range_start;
4501 lwp->step_range_end = lwp->resume->step_range_end;
4502
fa593d66
PA
4503 /* If we had a deferred signal to report, dequeue one now.
4504 This can happen if LWP gets more than one signal while
4505 trying to get out of a jump pad. */
4506 if (lwp->stopped
4507 && !lwp->status_pending_p
4508 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4509 {
4510 lwp->status_pending_p = 1;
4511
4512 if (debug_threads)
87ce2a04
DE
4513 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4514 "leaving status pending.\n",
d86d4aaf
DE
4515 WSTOPSIG (lwp->status_pending),
4516 lwpid_of (thread));
fa593d66
PA
4517 }
4518
5fdda392 4519 return;
95954743
PA
4520 }
4521 }
2bd7c093
PA
4522
4523 /* No resume action for this thread. */
4524 lwp->resume = NULL;
5544ad89
DJ
4525}
4526
df95181f
TBA
4527bool
4528linux_process_target::resume_status_pending (thread_info *thread)
5544ad89 4529{
d86d4aaf 4530 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4531
bd99dc85
PA
4532 /* LWPs which will not be resumed are not interesting, because
4533 we might not wait for them next time through linux_wait. */
2bd7c093 4534 if (lwp->resume == NULL)
25c28b4d 4535 return false;
64386c31 4536
df95181f 4537 return thread_still_has_status_pending (thread);
d50171e4
PA
4538}
4539
df95181f
TBA
4540bool
4541linux_process_target::thread_needs_step_over (thread_info *thread)
d50171e4 4542{
d86d4aaf 4543 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4544 struct thread_info *saved_thread;
d50171e4 4545 CORE_ADDR pc;
c06cbd92
YQ
4546 struct process_info *proc = get_thread_process (thread);
4547
4548 /* GDBserver is skipping the extra traps from the wrapper program,
4549 don't have to do step over. */
4550 if (proc->tdesc == NULL)
eca55aec 4551 return false;
d50171e4
PA
4552
4553 /* LWPs which will not be resumed are not interesting, because we
4554 might not wait for them next time through linux_wait. */
4555
4556 if (!lwp->stopped)
4557 {
4558 if (debug_threads)
87ce2a04 4559 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4560 lwpid_of (thread));
eca55aec 4561 return false;
d50171e4
PA
4562 }
4563
8336d594 4564 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4565 {
4566 if (debug_threads)
87ce2a04
DE
4567 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4568 " stopped\n",
d86d4aaf 4569 lwpid_of (thread));
eca55aec 4570 return false;
d50171e4
PA
4571 }
4572
7984d532
PA
4573 gdb_assert (lwp->suspended >= 0);
4574
4575 if (lwp->suspended)
4576 {
4577 if (debug_threads)
87ce2a04 4578 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4579 lwpid_of (thread));
eca55aec 4580 return false;
7984d532
PA
4581 }
4582
bd99dc85 4583 if (lwp->status_pending_p)
d50171e4
PA
4584 {
4585 if (debug_threads)
87ce2a04
DE
4586 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4587 " status.\n",
d86d4aaf 4588 lwpid_of (thread));
eca55aec 4589 return false;
d50171e4
PA
4590 }
4591
4592 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4593 or we have. */
4594 pc = get_pc (lwp);
4595
4596 /* If the PC has changed since we stopped, then don't do anything,
4597 and let the breakpoint/tracepoint be hit. This happens if, for
4598 instance, GDB handled the decr_pc_after_break subtraction itself,
4599 GDB is OOL stepping this thread, or the user has issued a "jump"
4600 command, or poked thread's registers herself. */
4601 if (pc != lwp->stop_pc)
4602 {
4603 if (debug_threads)
87ce2a04
DE
4604 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4605 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4606 lwpid_of (thread),
4607 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4608 return false;
d50171e4
PA
4609 }
4610
484b3c32
YQ
4611 /* On software single step target, resume the inferior with signal
4612 rather than stepping over. */
4613 if (can_software_single_step ()
4614 && lwp->pending_signals != NULL
4615 && lwp_signal_can_be_delivered (lwp))
4616 {
4617 if (debug_threads)
4618 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4619 " signals.\n",
4620 lwpid_of (thread));
4621
eca55aec 4622 return false;
484b3c32
YQ
4623 }
4624
0bfdf32f
GB
4625 saved_thread = current_thread;
4626 current_thread = thread;
d50171e4 4627
8b07ae33 4628 /* We can only step over breakpoints we know about. */
fa593d66 4629 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4630 {
8b07ae33 4631 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4632 though. If the condition is being evaluated on the target's side
4633 and it evaluate to false, step over this breakpoint as well. */
4634 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4635 && gdb_condition_true_at_breakpoint (pc)
4636 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4637 {
4638 if (debug_threads)
87ce2a04
DE
4639 debug_printf ("Need step over [LWP %ld]? yes, but found"
4640 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4641 lwpid_of (thread), paddress (pc));
d50171e4 4642
0bfdf32f 4643 current_thread = saved_thread;
eca55aec 4644 return false;
8b07ae33
PA
4645 }
4646 else
4647 {
4648 if (debug_threads)
87ce2a04
DE
4649 debug_printf ("Need step over [LWP %ld]? yes, "
4650 "found breakpoint at 0x%s\n",
d86d4aaf 4651 lwpid_of (thread), paddress (pc));
d50171e4 4652
8b07ae33 4653 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4654 that find_thread stops looking. */
0bfdf32f 4655 current_thread = saved_thread;
8b07ae33 4656
eca55aec 4657 return true;
8b07ae33 4658 }
d50171e4
PA
4659 }
4660
0bfdf32f 4661 current_thread = saved_thread;
d50171e4
PA
4662
4663 if (debug_threads)
87ce2a04
DE
4664 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4665 " at 0x%s\n",
d86d4aaf 4666 lwpid_of (thread), paddress (pc));
c6ecbae5 4667
eca55aec 4668 return false;
5544ad89
DJ
4669}
4670
d16f3f6c
TBA
4671void
4672linux_process_target::start_step_over (lwp_info *lwp)
d50171e4 4673{
d86d4aaf 4674 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4675 struct thread_info *saved_thread;
d50171e4
PA
4676 CORE_ADDR pc;
4677 int step;
4678
4679 if (debug_threads)
87ce2a04 4680 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4681 lwpid_of (thread));
d50171e4 4682
7984d532 4683 stop_all_lwps (1, lwp);
863d01bd
PA
4684
4685 if (lwp->suspended != 0)
4686 {
4687 internal_error (__FILE__, __LINE__,
4688 "LWP %ld suspended=%d\n", lwpid_of (thread),
4689 lwp->suspended);
4690 }
d50171e4
PA
4691
4692 if (debug_threads)
87ce2a04 4693 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4694
4695 /* Note, we should always reach here with an already adjusted PC,
4696 either by GDB (if we're resuming due to GDB's request), or by our
4697 caller, if we just finished handling an internal breakpoint GDB
4698 shouldn't care about. */
4699 pc = get_pc (lwp);
4700
0bfdf32f
GB
4701 saved_thread = current_thread;
4702 current_thread = thread;
d50171e4
PA
4703
4704 lwp->bp_reinsert = pc;
4705 uninsert_breakpoints_at (pc);
fa593d66 4706 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4707
7fe5e27e 4708 step = single_step (lwp);
d50171e4 4709
0bfdf32f 4710 current_thread = saved_thread;
d50171e4 4711
df95181f 4712 resume_one_lwp (lwp, step, 0, NULL);
d50171e4
PA
4713
4714 /* Require next event from this LWP. */
9c80ecd6 4715 step_over_bkpt = thread->id;
d50171e4
PA
4716}
4717
4718/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3b9a79ef 4719 start_step_over, if still there, and delete any single-step
d50171e4
PA
4720 breakpoints we've set, on non hardware single-step targets. */
4721
4722static int
4723finish_step_over (struct lwp_info *lwp)
4724{
4725 if (lwp->bp_reinsert != 0)
4726 {
f79b145d
YQ
4727 struct thread_info *saved_thread = current_thread;
4728
d50171e4 4729 if (debug_threads)
87ce2a04 4730 debug_printf ("Finished step over.\n");
d50171e4 4731
f79b145d
YQ
4732 current_thread = get_lwp_thread (lwp);
4733
d50171e4
PA
4734 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4735 may be no breakpoint to reinsert there by now. */
4736 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4737 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4738
4739 lwp->bp_reinsert = 0;
4740
3b9a79ef
YQ
4741 /* Delete any single-step breakpoints. No longer needed. We
4742 don't have to worry about other threads hitting this trap,
4743 and later not being able to explain it, because we were
4744 stepping over a breakpoint, and we hold all threads but
4745 LWP stopped while doing that. */
d50171e4 4746 if (!can_hardware_single_step ())
f79b145d 4747 {
3b9a79ef
YQ
4748 gdb_assert (has_single_step_breakpoints (current_thread));
4749 delete_single_step_breakpoints (current_thread);
f79b145d 4750 }
d50171e4
PA
4751
4752 step_over_bkpt = null_ptid;
f79b145d 4753 current_thread = saved_thread;
d50171e4
PA
4754 return 1;
4755 }
4756 else
4757 return 0;
4758}
4759
d16f3f6c
TBA
4760void
4761linux_process_target::complete_ongoing_step_over ()
863d01bd 4762{
d7e15655 4763 if (step_over_bkpt != null_ptid)
863d01bd
PA
4764 {
4765 struct lwp_info *lwp;
4766 int wstat;
4767 int ret;
4768
4769 if (debug_threads)
4770 debug_printf ("detach: step over in progress, finish it first\n");
4771
4772 /* Passing NULL_PTID as filter indicates we want all events to
4773 be left pending. Eventually this returns when there are no
4774 unwaited-for children left. */
d16f3f6c
TBA
4775 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4776 __WALL);
863d01bd
PA
4777 gdb_assert (ret == -1);
4778
4779 lwp = find_lwp_pid (step_over_bkpt);
4780 if (lwp != NULL)
4781 finish_step_over (lwp);
4782 step_over_bkpt = null_ptid;
4783 unsuspend_all_lwps (lwp);
4784 }
4785}
4786
df95181f
TBA
4787void
4788linux_process_target::resume_one_thread (thread_info *thread,
4789 bool leave_all_stopped)
5544ad89 4790{
d86d4aaf 4791 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4792 int leave_pending;
5544ad89 4793
2bd7c093 4794 if (lwp->resume == NULL)
c80825ff 4795 return;
5544ad89 4796
bd99dc85 4797 if (lwp->resume->kind == resume_stop)
5544ad89 4798 {
bd99dc85 4799 if (debug_threads)
d86d4aaf 4800 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4801
4802 if (!lwp->stopped)
4803 {
4804 if (debug_threads)
d86d4aaf 4805 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4806
d50171e4
PA
4807 /* Stop the thread, and wait for the event asynchronously,
4808 through the event loop. */
02fc4de7 4809 send_sigstop (lwp);
bd99dc85
PA
4810 }
4811 else
4812 {
4813 if (debug_threads)
87ce2a04 4814 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4815 lwpid_of (thread));
d50171e4
PA
4816
4817 /* The LWP may have been stopped in an internal event that
4818 was not meant to be notified back to GDB (e.g., gdbserver
4819 breakpoint), so we should be reporting a stop event in
4820 this case too. */
4821
4822 /* If the thread already has a pending SIGSTOP, this is a
4823 no-op. Otherwise, something later will presumably resume
4824 the thread and this will cause it to cancel any pending
4825 operation, due to last_resume_kind == resume_stop. If
4826 the thread already has a pending status to report, we
4827 will still report it the next time we wait - see
4828 status_pending_p_callback. */
1a981360
PA
4829
4830 /* If we already have a pending signal to report, then
4831 there's no need to queue a SIGSTOP, as this means we're
4832 midway through moving the LWP out of the jumppad, and we
4833 will report the pending signal as soon as that is
4834 finished. */
4835 if (lwp->pending_signals_to_report == NULL)
4836 send_sigstop (lwp);
bd99dc85 4837 }
32ca6d61 4838
bd99dc85
PA
4839 /* For stop requests, we're done. */
4840 lwp->resume = NULL;
fc7238bb 4841 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4842 return;
5544ad89
DJ
4843 }
4844
bd99dc85 4845 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4846 then don't resume it - we can just report the pending status.
4847 Likewise if it is suspended, because e.g., another thread is
4848 stepping past a breakpoint. Make sure to queue any signals that
4849 would otherwise be sent. In all-stop mode, we do this decision
4850 based on if *any* thread has a pending status. If there's a
4851 thread that needs the step-over-breakpoint dance, then don't
4852 resume any other thread but that particular one. */
4853 leave_pending = (lwp->suspended
4854 || lwp->status_pending_p
4855 || leave_all_stopped);
5544ad89 4856
0e9a339e
YQ
4857 /* If we have a new signal, enqueue the signal. */
4858 if (lwp->resume->sig != 0)
4859 {
4860 siginfo_t info, *info_p;
4861
4862 /* If this is the same signal we were previously stopped by,
4863 make sure to queue its siginfo. */
4864 if (WIFSTOPPED (lwp->last_status)
4865 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4866 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4867 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4868 info_p = &info;
4869 else
4870 info_p = NULL;
4871
4872 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4873 }
4874
d50171e4 4875 if (!leave_pending)
bd99dc85
PA
4876 {
4877 if (debug_threads)
d86d4aaf 4878 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4879
9c80ecd6 4880 proceed_one_lwp (thread, NULL);
bd99dc85
PA
4881 }
4882 else
4883 {
4884 if (debug_threads)
d86d4aaf 4885 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 4886 }
5544ad89 4887
fc7238bb 4888 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4889 lwp->resume = NULL;
0d62e5e8
DJ
4890}
4891
0e4d7e35
TBA
4892void
4893linux_process_target::resume (thread_resume *resume_info, size_t n)
0d62e5e8 4894{
d86d4aaf 4895 struct thread_info *need_step_over = NULL;
c6ecbae5 4896
87ce2a04
DE
4897 if (debug_threads)
4898 {
4899 debug_enter ();
4900 debug_printf ("linux_resume:\n");
4901 }
4902
5fdda392
SM
4903 for_each_thread ([&] (thread_info *thread)
4904 {
4905 linux_set_resume_request (thread, resume_info, n);
4906 });
5544ad89 4907
d50171e4
PA
4908 /* If there is a thread which would otherwise be resumed, which has
4909 a pending status, then don't resume any threads - we can just
4910 report the pending status. Make sure to queue any signals that
4911 would otherwise be sent. In non-stop mode, we'll apply this
4912 logic to each thread individually. We consume all pending events
4913 before considering to start a step-over (in all-stop). */
25c28b4d 4914 bool any_pending = false;
bd99dc85 4915 if (!non_stop)
df95181f
TBA
4916 any_pending = find_thread ([this] (thread_info *thread)
4917 {
4918 return resume_status_pending (thread);
4919 }) != nullptr;
d50171e4
PA
4920
4921 /* If there is a thread which would otherwise be resumed, which is
4922 stopped at a breakpoint that needs stepping over, then don't
4923 resume any threads - have it step over the breakpoint with all
4924 other threads stopped, then resume all threads again. Make sure
4925 to queue any signals that would otherwise be delivered or
4926 queued. */
bf9ae9d8 4927 if (!any_pending && low_supports_breakpoints ())
df95181f
TBA
4928 need_step_over = find_thread ([this] (thread_info *thread)
4929 {
4930 return thread_needs_step_over (thread);
4931 });
d50171e4 4932
c80825ff 4933 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
4934
4935 if (debug_threads)
4936 {
4937 if (need_step_over != NULL)
87ce2a04 4938 debug_printf ("Not resuming all, need step over\n");
d50171e4 4939 else if (any_pending)
87ce2a04
DE
4940 debug_printf ("Not resuming, all-stop and found "
4941 "an LWP with pending status\n");
d50171e4 4942 else
87ce2a04 4943 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4944 }
4945
4946 /* Even if we're leaving threads stopped, queue all signals we'd
4947 otherwise deliver. */
c80825ff
SM
4948 for_each_thread ([&] (thread_info *thread)
4949 {
df95181f 4950 resume_one_thread (thread, leave_all_stopped);
c80825ff 4951 });
d50171e4
PA
4952
4953 if (need_step_over)
d86d4aaf 4954 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4955
4956 if (debug_threads)
4957 {
4958 debug_printf ("linux_resume done\n");
4959 debug_exit ();
4960 }
1bebeeca
PA
4961
4962 /* We may have events that were pending that can/should be sent to
4963 the client now. Trigger a linux_wait call. */
4964 if (target_is_async_p ())
4965 async_file_mark ();
d50171e4
PA
4966}
4967
df95181f
TBA
4968void
4969linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 4970{
d86d4aaf 4971 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4972 int step;
4973
7984d532 4974 if (lwp == except)
e2b44075 4975 return;
d50171e4
PA
4976
4977 if (debug_threads)
d86d4aaf 4978 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4979
4980 if (!lwp->stopped)
4981 {
4982 if (debug_threads)
d86d4aaf 4983 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 4984 return;
d50171e4
PA
4985 }
4986
02fc4de7
PA
4987 if (thread->last_resume_kind == resume_stop
4988 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4989 {
4990 if (debug_threads)
87ce2a04 4991 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4992 lwpid_of (thread));
e2b44075 4993 return;
d50171e4
PA
4994 }
4995
4996 if (lwp->status_pending_p)
4997 {
4998 if (debug_threads)
87ce2a04 4999 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 5000 lwpid_of (thread));
e2b44075 5001 return;
d50171e4
PA
5002 }
5003
7984d532
PA
5004 gdb_assert (lwp->suspended >= 0);
5005
d50171e4
PA
5006 if (lwp->suspended)
5007 {
5008 if (debug_threads)
d86d4aaf 5009 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 5010 return;
d50171e4
PA
5011 }
5012
1a981360
PA
5013 if (thread->last_resume_kind == resume_stop
5014 && lwp->pending_signals_to_report == NULL
229d26fc
SM
5015 && (lwp->collecting_fast_tracepoint
5016 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
5017 {
5018 /* We haven't reported this LWP as stopped yet (otherwise, the
5019 last_status.kind check above would catch it, and we wouldn't
5020 reach here. This LWP may have been momentarily paused by a
5021 stop_all_lwps call while handling for example, another LWP's
5022 step-over. In that case, the pending expected SIGSTOP signal
5023 that was queued at vCont;t handling time will have already
5024 been consumed by wait_for_sigstop, and so we need to requeue
5025 another one here. Note that if the LWP already has a SIGSTOP
5026 pending, this is a no-op. */
5027
5028 if (debug_threads)
87ce2a04
DE
5029 debug_printf ("Client wants LWP %ld to stop. "
5030 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5031 lwpid_of (thread));
02fc4de7
PA
5032
5033 send_sigstop (lwp);
5034 }
5035
863d01bd
PA
5036 if (thread->last_resume_kind == resume_step)
5037 {
5038 if (debug_threads)
5039 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5040 lwpid_of (thread));
8901d193 5041
3b9a79ef 5042 /* If resume_step is requested by GDB, install single-step
8901d193 5043 breakpoints when the thread is about to be actually resumed if
3b9a79ef
YQ
5044 the single-step breakpoints weren't removed. */
5045 if (can_software_single_step ()
5046 && !has_single_step_breakpoints (thread))
8901d193
YQ
5047 install_software_single_step_breakpoints (lwp);
5048
5049 step = maybe_hw_step (thread);
863d01bd
PA
5050 }
5051 else if (lwp->bp_reinsert != 0)
5052 {
5053 if (debug_threads)
5054 debug_printf (" stepping LWP %ld, reinsert set\n",
5055 lwpid_of (thread));
f79b145d
YQ
5056
5057 step = maybe_hw_step (thread);
863d01bd
PA
5058 }
5059 else
5060 step = 0;
5061
df95181f 5062 resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5063}
5064
df95181f
TBA
5065void
5066linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5067 lwp_info *except)
7984d532 5068{
d86d4aaf 5069 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5070
5071 if (lwp == except)
e2b44075 5072 return;
7984d532 5073
863d01bd 5074 lwp_suspended_decr (lwp);
7984d532 5075
e2b44075 5076 proceed_one_lwp (thread, except);
d50171e4
PA
5077}
5078
d16f3f6c
TBA
5079void
5080linux_process_target::proceed_all_lwps ()
d50171e4 5081{
d86d4aaf 5082 struct thread_info *need_step_over;
d50171e4
PA
5083
5084 /* If there is a thread which would otherwise be resumed, which is
5085 stopped at a breakpoint that needs stepping over, then don't
5086 resume any threads - have it step over the breakpoint with all
5087 other threads stopped, then resume all threads again. */
5088
bf9ae9d8 5089 if (low_supports_breakpoints ())
d50171e4 5090 {
df95181f
TBA
5091 need_step_over = find_thread ([this] (thread_info *thread)
5092 {
5093 return thread_needs_step_over (thread);
5094 });
d50171e4
PA
5095
5096 if (need_step_over != NULL)
5097 {
5098 if (debug_threads)
87ce2a04
DE
5099 debug_printf ("proceed_all_lwps: found "
5100 "thread %ld needing a step-over\n",
5101 lwpid_of (need_step_over));
d50171e4 5102
d86d4aaf 5103 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5104 return;
5105 }
5106 }
5544ad89 5107
d50171e4 5108 if (debug_threads)
87ce2a04 5109 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5110
df95181f 5111 for_each_thread ([this] (thread_info *thread)
e2b44075
SM
5112 {
5113 proceed_one_lwp (thread, NULL);
5114 });
d50171e4
PA
5115}
5116
d16f3f6c
TBA
5117void
5118linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
d50171e4 5119{
5544ad89
DJ
5120 if (debug_threads)
5121 {
87ce2a04 5122 debug_enter ();
d50171e4 5123 if (except)
87ce2a04 5124 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5125 lwpid_of (get_lwp_thread (except)));
5544ad89 5126 else
87ce2a04 5127 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5128 }
5129
7984d532 5130 if (unsuspend)
e2b44075
SM
5131 for_each_thread ([&] (thread_info *thread)
5132 {
5133 unsuspend_and_proceed_one_lwp (thread, except);
5134 });
7984d532 5135 else
e2b44075
SM
5136 for_each_thread ([&] (thread_info *thread)
5137 {
5138 proceed_one_lwp (thread, except);
5139 });
87ce2a04
DE
5140
5141 if (debug_threads)
5142 {
5143 debug_printf ("unstop_all_lwps done\n");
5144 debug_exit ();
5145 }
0d62e5e8
DJ
5146}
5147
58caa3dc
DJ
5148
5149#ifdef HAVE_LINUX_REGSETS
5150
1faeff08
MR
5151#define use_linux_regsets 1
5152
030031ee
PA
5153/* Returns true if REGSET has been disabled. */
5154
5155static int
5156regset_disabled (struct regsets_info *info, struct regset_info *regset)
5157{
5158 return (info->disabled_regsets != NULL
5159 && info->disabled_regsets[regset - info->regsets]);
5160}
5161
5162/* Disable REGSET. */
5163
5164static void
5165disable_regset (struct regsets_info *info, struct regset_info *regset)
5166{
5167 int dr_offset;
5168
5169 dr_offset = regset - info->regsets;
5170 if (info->disabled_regsets == NULL)
224c3ddb 5171 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5172 info->disabled_regsets[dr_offset] = 1;
5173}
5174
58caa3dc 5175static int
3aee8918
PA
5176regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5177 struct regcache *regcache)
58caa3dc
DJ
5178{
5179 struct regset_info *regset;
e9d25b98 5180 int saw_general_regs = 0;
95954743 5181 int pid;
1570b33e 5182 struct iovec iov;
58caa3dc 5183
0bfdf32f 5184 pid = lwpid_of (current_thread);
28eef672 5185 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5186 {
1570b33e
L
5187 void *buf, *data;
5188 int nt_type, res;
58caa3dc 5189
030031ee 5190 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5191 continue;
58caa3dc 5192
bca929d3 5193 buf = xmalloc (regset->size);
1570b33e
L
5194
5195 nt_type = regset->nt_type;
5196 if (nt_type)
5197 {
5198 iov.iov_base = buf;
5199 iov.iov_len = regset->size;
5200 data = (void *) &iov;
5201 }
5202 else
5203 data = buf;
5204
dfb64f85 5205#ifndef __sparc__
f15f9948 5206 res = ptrace (regset->get_request, pid,
b8e1b30e 5207 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5208#else
1570b33e 5209 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5210#endif
58caa3dc
DJ
5211 if (res < 0)
5212 {
1ef53e6b
AH
5213 if (errno == EIO
5214 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5215 {
1ef53e6b
AH
5216 /* If we get EIO on a regset, or an EINVAL and the regset is
5217 optional, do not try it again for this process mode. */
030031ee 5218 disable_regset (regsets_info, regset);
58caa3dc 5219 }
e5a9158d
AA
5220 else if (errno == ENODATA)
5221 {
5222 /* ENODATA may be returned if the regset is currently
5223 not "active". This can happen in normal operation,
5224 so suppress the warning in this case. */
5225 }
fcd4a73d
YQ
5226 else if (errno == ESRCH)
5227 {
5228 /* At this point, ESRCH should mean the process is
5229 already gone, in which case we simply ignore attempts
5230 to read its registers. */
5231 }
58caa3dc
DJ
5232 else
5233 {
0d62e5e8 5234 char s[256];
95954743
PA
5235 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5236 pid);
0d62e5e8 5237 perror (s);
58caa3dc
DJ
5238 }
5239 }
098dbe61
AA
5240 else
5241 {
5242 if (regset->type == GENERAL_REGS)
5243 saw_general_regs = 1;
5244 regset->store_function (regcache, buf);
5245 }
fdeb2a12 5246 free (buf);
58caa3dc 5247 }
e9d25b98
DJ
5248 if (saw_general_regs)
5249 return 0;
5250 else
5251 return 1;
58caa3dc
DJ
5252}
5253
5254static int
3aee8918
PA
5255regsets_store_inferior_registers (struct regsets_info *regsets_info,
5256 struct regcache *regcache)
58caa3dc
DJ
5257{
5258 struct regset_info *regset;
e9d25b98 5259 int saw_general_regs = 0;
95954743 5260 int pid;
1570b33e 5261 struct iovec iov;
58caa3dc 5262
0bfdf32f 5263 pid = lwpid_of (current_thread);
28eef672 5264 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5265 {
1570b33e
L
5266 void *buf, *data;
5267 int nt_type, res;
58caa3dc 5268
feea5f36
AA
5269 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5270 || regset->fill_function == NULL)
28eef672 5271 continue;
58caa3dc 5272
bca929d3 5273 buf = xmalloc (regset->size);
545587ee
DJ
5274
5275 /* First fill the buffer with the current register set contents,
5276 in case there are any items in the kernel's regset that are
5277 not in gdbserver's regcache. */
1570b33e
L
5278
5279 nt_type = regset->nt_type;
5280 if (nt_type)
5281 {
5282 iov.iov_base = buf;
5283 iov.iov_len = regset->size;
5284 data = (void *) &iov;
5285 }
5286 else
5287 data = buf;
5288
dfb64f85 5289#ifndef __sparc__
f15f9948 5290 res = ptrace (regset->get_request, pid,
b8e1b30e 5291 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5292#else
689cc2ae 5293 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5294#endif
545587ee
DJ
5295
5296 if (res == 0)
5297 {
5298 /* Then overlay our cached registers on that. */
442ea881 5299 regset->fill_function (regcache, buf);
545587ee
DJ
5300
5301 /* Only now do we write the register set. */
dfb64f85 5302#ifndef __sparc__
f15f9948 5303 res = ptrace (regset->set_request, pid,
b8e1b30e 5304 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5305#else
1570b33e 5306 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5307#endif
545587ee
DJ
5308 }
5309
58caa3dc
DJ
5310 if (res < 0)
5311 {
1ef53e6b
AH
5312 if (errno == EIO
5313 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5314 {
1ef53e6b
AH
5315 /* If we get EIO on a regset, or an EINVAL and the regset is
5316 optional, do not try it again for this process mode. */
030031ee 5317 disable_regset (regsets_info, regset);
58caa3dc 5318 }
3221518c
UW
5319 else if (errno == ESRCH)
5320 {
1b3f6016
PA
5321 /* At this point, ESRCH should mean the process is
5322 already gone, in which case we simply ignore attempts
5323 to change its registers. See also the related
df95181f 5324 comment in resume_one_lwp. */
fdeb2a12 5325 free (buf);
3221518c
UW
5326 return 0;
5327 }
58caa3dc
DJ
5328 else
5329 {
ce3a066d 5330 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5331 }
5332 }
e9d25b98
DJ
5333 else if (regset->type == GENERAL_REGS)
5334 saw_general_regs = 1;
09ec9b38 5335 free (buf);
58caa3dc 5336 }
e9d25b98
DJ
5337 if (saw_general_regs)
5338 return 0;
5339 else
5340 return 1;
58caa3dc
DJ
5341}
5342
1faeff08 5343#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5344
1faeff08 5345#define use_linux_regsets 0
3aee8918
PA
5346#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5347#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5348
58caa3dc 5349#endif
1faeff08
MR
5350
5351/* Return 1 if register REGNO is supported by one of the regset ptrace
5352 calls or 0 if it has to be transferred individually. */
5353
5354static int
3aee8918 5355linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5356{
5357 unsigned char mask = 1 << (regno % 8);
5358 size_t index = regno / 8;
5359
5360 return (use_linux_regsets
3aee8918
PA
5361 && (regs_info->regset_bitmap == NULL
5362 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5363}
5364
58caa3dc 5365#ifdef HAVE_LINUX_USRREGS
1faeff08 5366
5b3da067 5367static int
3aee8918 5368register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5369{
5370 int addr;
5371
3aee8918 5372 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5373 error ("Invalid register number %d.", regnum);
5374
3aee8918 5375 addr = usrregs->regmap[regnum];
1faeff08
MR
5376
5377 return addr;
5378}
5379
daca57a7
TBA
5380
5381void
5382linux_process_target::fetch_register (const usrregs_info *usrregs,
5383 regcache *regcache, int regno)
1faeff08
MR
5384{
5385 CORE_ADDR regaddr;
5386 int i, size;
5387 char *buf;
5388 int pid;
5389
3aee8918 5390 if (regno >= usrregs->num_regs)
1faeff08 5391 return;
daca57a7 5392 if (low_cannot_fetch_register (regno))
1faeff08
MR
5393 return;
5394
3aee8918 5395 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5396 if (regaddr == -1)
5397 return;
5398
3aee8918
PA
5399 size = ((register_size (regcache->tdesc, regno)
5400 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5401 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5402 buf = (char *) alloca (size);
1faeff08 5403
0bfdf32f 5404 pid = lwpid_of (current_thread);
1faeff08
MR
5405 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5406 {
5407 errno = 0;
5408 *(PTRACE_XFER_TYPE *) (buf + i) =
5409 ptrace (PTRACE_PEEKUSER, pid,
5410 /* Coerce to a uintptr_t first to avoid potential gcc warning
5411 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5412 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5413 regaddr += sizeof (PTRACE_XFER_TYPE);
5414 if (errno != 0)
9a70f35c
YQ
5415 {
5416 /* Mark register REGNO unavailable. */
5417 supply_register (regcache, regno, NULL);
5418 return;
5419 }
1faeff08
MR
5420 }
5421
5422 if (the_low_target.supply_ptrace_register)
5423 the_low_target.supply_ptrace_register (regcache, regno, buf);
5424 else
5425 supply_register (regcache, regno, buf);
5426}
5427
daca57a7
TBA
5428void
5429linux_process_target::store_register (const usrregs_info *usrregs,
5430 regcache *regcache, int regno)
1faeff08
MR
5431{
5432 CORE_ADDR regaddr;
5433 int i, size;
5434 char *buf;
5435 int pid;
5436
3aee8918 5437 if (regno >= usrregs->num_regs)
1faeff08 5438 return;
daca57a7 5439 if (low_cannot_store_register (regno))
1faeff08
MR
5440 return;
5441
3aee8918 5442 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5443 if (regaddr == -1)
5444 return;
5445
3aee8918
PA
5446 size = ((register_size (regcache->tdesc, regno)
5447 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5448 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5449 buf = (char *) alloca (size);
1faeff08
MR
5450 memset (buf, 0, size);
5451
5452 if (the_low_target.collect_ptrace_register)
5453 the_low_target.collect_ptrace_register (regcache, regno, buf);
5454 else
5455 collect_register (regcache, regno, buf);
5456
0bfdf32f 5457 pid = lwpid_of (current_thread);
1faeff08
MR
5458 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5459 {
5460 errno = 0;
5461 ptrace (PTRACE_POKEUSER, pid,
5462 /* Coerce to a uintptr_t first to avoid potential gcc warning
5463 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5464 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5465 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5466 if (errno != 0)
5467 {
5468 /* At this point, ESRCH should mean the process is
5469 already gone, in which case we simply ignore attempts
5470 to change its registers. See also the related
df95181f 5471 comment in resume_one_lwp. */
1faeff08
MR
5472 if (errno == ESRCH)
5473 return;
5474
daca57a7
TBA
5475
5476 if (!low_cannot_store_register (regno))
6d91ce9a 5477 error ("writing register %d: %s", regno, safe_strerror (errno));
1faeff08
MR
5478 }
5479 regaddr += sizeof (PTRACE_XFER_TYPE);
5480 }
5481}
daca57a7 5482#endif /* HAVE_LINUX_USRREGS */
1faeff08 5483
daca57a7
TBA
5484void
5485linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5486 regcache *regcache,
5487 int regno, int all)
1faeff08 5488{
daca57a7 5489#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5490 struct usrregs_info *usr = regs_info->usrregs;
5491
1faeff08
MR
5492 if (regno == -1)
5493 {
3aee8918
PA
5494 for (regno = 0; regno < usr->num_regs; regno++)
5495 if (all || !linux_register_in_regsets (regs_info, regno))
5496 fetch_register (usr, regcache, regno);
1faeff08
MR
5497 }
5498 else
3aee8918 5499 fetch_register (usr, regcache, regno);
daca57a7 5500#endif
1faeff08
MR
5501}
5502
daca57a7
TBA
5503void
5504linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5505 regcache *regcache,
5506 int regno, int all)
1faeff08 5507{
daca57a7 5508#ifdef HAVE_LINUX_USRREGS
3aee8918
PA
5509 struct usrregs_info *usr = regs_info->usrregs;
5510
1faeff08
MR
5511 if (regno == -1)
5512 {
3aee8918
PA
5513 for (regno = 0; regno < usr->num_regs; regno++)
5514 if (all || !linux_register_in_regsets (regs_info, regno))
5515 store_register (usr, regcache, regno);
1faeff08
MR
5516 }
5517 else
3aee8918 5518 store_register (usr, regcache, regno);
58caa3dc 5519#endif
daca57a7 5520}
1faeff08 5521
a5a4d4cd
TBA
5522void
5523linux_process_target::fetch_registers (regcache *regcache, int regno)
1faeff08
MR
5524{
5525 int use_regsets;
5526 int all = 0;
aa8d21c9 5527 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5528
5529 if (regno == -1)
5530 {
bd70b1f2 5531 if (regs_info->usrregs != NULL)
3aee8918 5532 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
bd70b1f2 5533 low_fetch_register (regcache, regno);
c14dfd32 5534
3aee8918
PA
5535 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5536 if (regs_info->usrregs != NULL)
5537 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5538 }
5539 else
5540 {
bd70b1f2 5541 if (low_fetch_register (regcache, regno))
c14dfd32
PA
5542 return;
5543
3aee8918 5544 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5545 if (use_regsets)
3aee8918
PA
5546 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5547 regcache);
5548 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5549 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5550 }
58caa3dc
DJ
5551}
5552
a5a4d4cd
TBA
5553void
5554linux_process_target::store_registers (regcache *regcache, int regno)
58caa3dc 5555{
1faeff08
MR
5556 int use_regsets;
5557 int all = 0;
aa8d21c9 5558 const regs_info *regs_info = get_regs_info ();
1faeff08
MR
5559
5560 if (regno == -1)
5561 {
3aee8918
PA
5562 all = regsets_store_inferior_registers (regs_info->regsets_info,
5563 regcache);
5564 if (regs_info->usrregs != NULL)
5565 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5566 }
5567 else
5568 {
3aee8918 5569 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5570 if (use_regsets)
3aee8918
PA
5571 all = regsets_store_inferior_registers (regs_info->regsets_info,
5572 regcache);
5573 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5574 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5575 }
58caa3dc
DJ
5576}
5577
bd70b1f2
TBA
5578bool
5579linux_process_target::low_fetch_register (regcache *regcache, int regno)
5580{
5581 return false;
5582}
da6d8c04 5583
e2558df3 5584/* A wrapper for the read_memory target op. */
da6d8c04 5585
c3e735a6 5586static int
f450004a 5587linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
e2558df3 5588{
52405d85 5589 return the_target->read_memory (memaddr, myaddr, len);
e2558df3
TBA
5590}
5591
5592/* Copy LEN bytes from inferior's memory starting at MEMADDR
5593 to debugger memory starting at MYADDR. */
5594
5595int
5596linux_process_target::read_memory (CORE_ADDR memaddr,
5597 unsigned char *myaddr, int len)
da6d8c04 5598{
0bfdf32f 5599 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5600 PTRACE_XFER_TYPE *buffer;
5601 CORE_ADDR addr;
5602 int count;
4934b29e 5603 char filename[64];
ae3e2ccf 5604 int i;
4934b29e 5605 int ret;
fd462a61 5606 int fd;
fd462a61
DJ
5607
5608 /* Try using /proc. Don't bother for one word. */
5609 if (len >= 3 * sizeof (long))
5610 {
4934b29e
MR
5611 int bytes;
5612
fd462a61
DJ
5613 /* We could keep this file open and cache it - possibly one per
5614 thread. That requires some juggling, but is even faster. */
95954743 5615 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5616 fd = open (filename, O_RDONLY | O_LARGEFILE);
5617 if (fd == -1)
5618 goto no_proc;
5619
5620 /* If pread64 is available, use it. It's faster if the kernel
5621 supports it (only one syscall), and it's 64-bit safe even on
5622 32-bit platforms (for instance, SPARC debugging a SPARC64
5623 application). */
5624#ifdef HAVE_PREAD64
4934b29e 5625 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5626#else
4934b29e
MR
5627 bytes = -1;
5628 if (lseek (fd, memaddr, SEEK_SET) != -1)
5629 bytes = read (fd, myaddr, len);
fd462a61 5630#endif
fd462a61
DJ
5631
5632 close (fd);
4934b29e
MR
5633 if (bytes == len)
5634 return 0;
5635
5636 /* Some data was read, we'll try to get the rest with ptrace. */
5637 if (bytes > 0)
5638 {
5639 memaddr += bytes;
5640 myaddr += bytes;
5641 len -= bytes;
5642 }
fd462a61 5643 }
da6d8c04 5644
fd462a61 5645 no_proc:
4934b29e
MR
5646 /* Round starting address down to longword boundary. */
5647 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5648 /* Round ending address up; get number of longwords that makes. */
5649 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5650 / sizeof (PTRACE_XFER_TYPE));
5651 /* Allocate buffer of that many longwords. */
8d749320 5652 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5653
da6d8c04 5654 /* Read all the longwords */
4934b29e 5655 errno = 0;
da6d8c04
DJ
5656 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5657 {
14ce3065
DE
5658 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5659 about coercing an 8 byte integer to a 4 byte pointer. */
5660 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5661 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5662 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5663 if (errno)
4934b29e 5664 break;
da6d8c04 5665 }
4934b29e 5666 ret = errno;
da6d8c04
DJ
5667
5668 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5669 if (i > 0)
5670 {
5671 i *= sizeof (PTRACE_XFER_TYPE);
5672 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5673 memcpy (myaddr,
5674 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5675 i < len ? i : len);
5676 }
c3e735a6 5677
4934b29e 5678 return ret;
da6d8c04
DJ
5679}
5680
93ae6fdc
PA
5681/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5682 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5683 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5684
e2558df3
TBA
5685int
5686linux_process_target::write_memory (CORE_ADDR memaddr,
5687 const unsigned char *myaddr, int len)
da6d8c04 5688{
ae3e2ccf 5689 int i;
da6d8c04 5690 /* Round starting address down to longword boundary. */
ae3e2ccf 5691 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5692 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5693 int count
493e2a69
MS
5694 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5695 / sizeof (PTRACE_XFER_TYPE);
5696
da6d8c04 5697 /* Allocate buffer of that many longwords. */
ae3e2ccf 5698 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5699
0bfdf32f 5700 int pid = lwpid_of (current_thread);
da6d8c04 5701
f0ae6fc3
PA
5702 if (len == 0)
5703 {
5704 /* Zero length write always succeeds. */
5705 return 0;
5706 }
5707
0d62e5e8
DJ
5708 if (debug_threads)
5709 {
58d6951d 5710 /* Dump up to four bytes. */
bf47e248
PA
5711 char str[4 * 2 + 1];
5712 char *p = str;
5713 int dump = len < 4 ? len : 4;
5714
5715 for (i = 0; i < dump; i++)
5716 {
5717 sprintf (p, "%02x", myaddr[i]);
5718 p += 2;
5719 }
5720 *p = '\0';
5721
5722 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5723 str, (long) memaddr, pid);
0d62e5e8
DJ
5724 }
5725
da6d8c04
DJ
5726 /* Fill start and end extra bytes of buffer with existing memory data. */
5727
93ae6fdc 5728 errno = 0;
14ce3065
DE
5729 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5730 about coercing an 8 byte integer to a 4 byte pointer. */
5731 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5732 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5733 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5734 if (errno)
5735 return errno;
da6d8c04
DJ
5736
5737 if (count > 1)
5738 {
93ae6fdc 5739 errno = 0;
da6d8c04 5740 buffer[count - 1]
95954743 5741 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5742 /* Coerce to a uintptr_t first to avoid potential gcc warning
5743 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5744 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5745 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5746 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5747 if (errno)
5748 return errno;
da6d8c04
DJ
5749 }
5750
93ae6fdc 5751 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5752
493e2a69
MS
5753 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5754 myaddr, len);
da6d8c04
DJ
5755
5756 /* Write the entire buffer. */
5757
5758 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5759 {
5760 errno = 0;
14ce3065
DE
5761 ptrace (PTRACE_POKETEXT, pid,
5762 /* Coerce to a uintptr_t first to avoid potential gcc warning
5763 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5764 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5765 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5766 if (errno)
5767 return errno;
5768 }
5769
5770 return 0;
5771}
2f2893d9 5772
2a31c7aa
TBA
5773void
5774linux_process_target::look_up_symbols ()
2f2893d9 5775{
0d62e5e8 5776#ifdef USE_THREAD_DB
95954743
PA
5777 struct process_info *proc = current_process ();
5778
fe978cb0 5779 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5780 return;
5781
9b4c5f87 5782 thread_db_init ();
0d62e5e8
DJ
5783#endif
5784}
5785
eb497a2a
TBA
5786void
5787linux_process_target::request_interrupt ()
e5379b03 5788{
78708b7c
PA
5789 /* Send a SIGINT to the process group. This acts just like the user
5790 typed a ^C on the controlling terminal. */
eb497a2a 5791 ::kill (-signal_pid, SIGINT);
e5379b03
DJ
5792}
5793
eac215cc
TBA
5794bool
5795linux_process_target::supports_read_auxv ()
5796{
5797 return true;
5798}
5799
aa691b87
RM
5800/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5801 to debugger memory starting at MYADDR. */
5802
eac215cc
TBA
5803int
5804linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5805 unsigned int len)
aa691b87
RM
5806{
5807 char filename[PATH_MAX];
5808 int fd, n;
0bfdf32f 5809 int pid = lwpid_of (current_thread);
aa691b87 5810
6cebaf6e 5811 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5812
5813 fd = open (filename, O_RDONLY);
5814 if (fd < 0)
5815 return -1;
5816
5817 if (offset != (CORE_ADDR) 0
5818 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5819 n = -1;
5820 else
5821 n = read (fd, myaddr, len);
5822
5823 close (fd);
5824
5825 return n;
5826}
5827
d993e290
PA
5828/* These breakpoint and watchpoint related wrapper functions simply
5829 pass on the function call if the target has registered a
5830 corresponding function. */
e013ee27 5831
a2b2297a
TBA
5832bool
5833linux_process_target::supports_z_point_type (char z_type)
802e8e6d
PA
5834{
5835 return (the_low_target.supports_z_point_type != NULL
5836 && the_low_target.supports_z_point_type (z_type));
5837}
5838
7e0bde70
TBA
5839int
5840linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5841 int size, raw_breakpoint *bp)
e013ee27 5842{
c8f4bfdd
YQ
5843 if (type == raw_bkpt_type_sw)
5844 return insert_memory_breakpoint (bp);
5845 else if (the_low_target.insert_point != NULL)
802e8e6d 5846 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5847 else
5848 /* Unsupported (see target.h). */
5849 return 1;
5850}
5851
7e0bde70
TBA
5852int
5853linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5854 int size, raw_breakpoint *bp)
e013ee27 5855{
c8f4bfdd
YQ
5856 if (type == raw_bkpt_type_sw)
5857 return remove_memory_breakpoint (bp);
5858 else if (the_low_target.remove_point != NULL)
802e8e6d 5859 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5860 else
5861 /* Unsupported (see target.h). */
5862 return 1;
5863}
5864
84320c4e 5865/* Implement the stopped_by_sw_breakpoint target_ops
3e572f71
PA
5866 method. */
5867
84320c4e
TBA
5868bool
5869linux_process_target::stopped_by_sw_breakpoint ()
3e572f71
PA
5870{
5871 struct lwp_info *lwp = get_thread_lwp (current_thread);
5872
5873 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5874}
5875
84320c4e 5876/* Implement the supports_stopped_by_sw_breakpoint target_ops
3e572f71
PA
5877 method. */
5878
84320c4e
TBA
5879bool
5880linux_process_target::supports_stopped_by_sw_breakpoint ()
3e572f71
PA
5881{
5882 return USE_SIGTRAP_SIGINFO;
5883}
5884
93fe88b2 5885/* Implement the stopped_by_hw_breakpoint target_ops
3e572f71
PA
5886 method. */
5887
93fe88b2
TBA
5888bool
5889linux_process_target::stopped_by_hw_breakpoint ()
3e572f71
PA
5890{
5891 struct lwp_info *lwp = get_thread_lwp (current_thread);
5892
5893 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5894}
5895
93fe88b2 5896/* Implement the supports_stopped_by_hw_breakpoint target_ops
3e572f71
PA
5897 method. */
5898
93fe88b2
TBA
5899bool
5900linux_process_target::supports_stopped_by_hw_breakpoint ()
3e572f71
PA
5901{
5902 return USE_SIGTRAP_SIGINFO;
5903}
5904
70b90b91 5905/* Implement the supports_hardware_single_step target_ops method. */
45614f15 5906
22aa6223
TBA
5907bool
5908linux_process_target::supports_hardware_single_step ()
45614f15 5909{
45614f15
YQ
5910 return can_hardware_single_step ();
5911}
5912
5303a34f
TBA
5913bool
5914linux_process_target::supports_software_single_step ()
7d00775e
AT
5915{
5916 return can_software_single_step ();
5917}
5918
6eeb5c55
TBA
5919bool
5920linux_process_target::stopped_by_watchpoint ()
e013ee27 5921{
0bfdf32f 5922 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5923
15c66dd6 5924 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5925}
5926
6eeb5c55
TBA
5927CORE_ADDR
5928linux_process_target::stopped_data_address ()
e013ee27 5929{
0bfdf32f 5930 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5931
5932 return lwp->stopped_data_address;
e013ee27
OF
5933}
5934
db0dfaa0
LM
5935/* This is only used for targets that define PT_TEXT_ADDR,
5936 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5937 the target has different ways of acquiring this information, like
5938 loadmaps. */
52fb6437 5939
5203ae1e
TBA
5940bool
5941linux_process_target::supports_read_offsets ()
5942{
5943#ifdef SUPPORTS_READ_OFFSETS
5944 return true;
5945#else
5946 return false;
5947#endif
5948}
5949
52fb6437
NS
5950/* Under uClinux, programs are loaded at non-zero offsets, which we need
5951 to tell gdb about. */
5952
5203ae1e
TBA
5953int
5954linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
52fb6437 5955{
5203ae1e 5956#ifdef SUPPORTS_READ_OFFSETS
52fb6437 5957 unsigned long text, text_end, data;
62828379 5958 int pid = lwpid_of (current_thread);
52fb6437
NS
5959
5960 errno = 0;
5961
b8e1b30e
LM
5962 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5963 (PTRACE_TYPE_ARG4) 0);
5964 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5965 (PTRACE_TYPE_ARG4) 0);
5966 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5967 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5968
5969 if (errno == 0)
5970 {
5971 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5972 used by gdb) are relative to the beginning of the program,
5973 with the data segment immediately following the text segment.
5974 However, the actual runtime layout in memory may put the data
5975 somewhere else, so when we send gdb a data base-address, we
5976 use the real data base address and subtract the compile-time
5977 data base-address from it (which is just the length of the
5978 text segment). BSS immediately follows data in both
5979 cases. */
52fb6437
NS
5980 *text_p = text;
5981 *data_p = data - (text_end - text);
1b3f6016 5982
52fb6437
NS
5983 return 1;
5984 }
5203ae1e
TBA
5985 return 0;
5986#else
5987 gdb_assert_not_reached ("target op read_offsets not supported");
52fb6437 5988#endif
5203ae1e 5989}
52fb6437 5990
6e3fd7e9
TBA
5991bool
5992linux_process_target::supports_get_tls_address ()
5993{
5994#ifdef USE_THREAD_DB
5995 return true;
5996#else
5997 return false;
5998#endif
5999}
6000
6001int
6002linux_process_target::get_tls_address (thread_info *thread,
6003 CORE_ADDR offset,
6004 CORE_ADDR load_module,
6005 CORE_ADDR *address)
6006{
6007#ifdef USE_THREAD_DB
6008 return thread_db_get_tls_address (thread, offset, load_module, address);
6009#else
6010 return -1;
6011#endif
6012}
6013
2d0795ee
TBA
6014bool
6015linux_process_target::supports_qxfer_osdata ()
6016{
6017 return true;
6018}
6019
6020int
6021linux_process_target::qxfer_osdata (const char *annex,
6022 unsigned char *readbuf,
6023 unsigned const char *writebuf,
6024 CORE_ADDR offset, int len)
07e059b5 6025{
d26e3629 6026 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6027}
6028
d0722149
DE
6029/* Convert a native/host siginfo object, into/from the siginfo in the
6030 layout of the inferiors' architecture. */
6031
6032static void
8adce034 6033siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
6034{
6035 int done = 0;
6036
6037 if (the_low_target.siginfo_fixup != NULL)
6038 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6039
6040 /* If there was no callback, or the callback didn't do anything,
6041 then just do a straight memcpy. */
6042 if (!done)
6043 {
6044 if (direction == 1)
a5362b9a 6045 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6046 else
a5362b9a 6047 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6048 }
6049}
6050
d7abedf7
TBA
6051bool
6052linux_process_target::supports_qxfer_siginfo ()
6053{
6054 return true;
6055}
6056
6057int
6058linux_process_target::qxfer_siginfo (const char *annex,
6059 unsigned char *readbuf,
6060 unsigned const char *writebuf,
6061 CORE_ADDR offset, int len)
4aa995e1 6062{
d0722149 6063 int pid;
a5362b9a 6064 siginfo_t siginfo;
8adce034 6065 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6066
0bfdf32f 6067 if (current_thread == NULL)
4aa995e1
PA
6068 return -1;
6069
0bfdf32f 6070 pid = lwpid_of (current_thread);
4aa995e1
PA
6071
6072 if (debug_threads)
87ce2a04
DE
6073 debug_printf ("%s siginfo for lwp %d.\n",
6074 readbuf != NULL ? "Reading" : "Writing",
6075 pid);
4aa995e1 6076
0adea5f7 6077 if (offset >= sizeof (siginfo))
4aa995e1
PA
6078 return -1;
6079
b8e1b30e 6080 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6081 return -1;
6082
d0722149
DE
6083 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6084 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6085 inferior with a 64-bit GDBSERVER should look the same as debugging it
6086 with a 32-bit GDBSERVER, we need to convert it. */
6087 siginfo_fixup (&siginfo, inf_siginfo, 0);
6088
4aa995e1
PA
6089 if (offset + len > sizeof (siginfo))
6090 len = sizeof (siginfo) - offset;
6091
6092 if (readbuf != NULL)
d0722149 6093 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6094 else
6095 {
d0722149
DE
6096 memcpy (inf_siginfo + offset, writebuf, len);
6097
6098 /* Convert back to ptrace layout before flushing it out. */
6099 siginfo_fixup (&siginfo, inf_siginfo, 1);
6100
b8e1b30e 6101 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6102 return -1;
6103 }
6104
6105 return len;
6106}
6107
bd99dc85
PA
6108/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6109 so we notice when children change state; as the handler for the
6110 sigsuspend in my_waitpid. */
6111
6112static void
6113sigchld_handler (int signo)
6114{
6115 int old_errno = errno;
6116
6117 if (debug_threads)
e581f2b4
PA
6118 {
6119 do
6120 {
a7e559cc
AH
6121 /* Use the async signal safe debug function. */
6122 if (debug_write ("sigchld_handler\n",
6123 sizeof ("sigchld_handler\n") - 1) < 0)
e581f2b4
PA
6124 break; /* just ignore */
6125 } while (0);
6126 }
bd99dc85
PA
6127
6128 if (target_is_async_p ())
6129 async_file_mark (); /* trigger a linux_wait */
6130
6131 errno = old_errno;
6132}
6133
0dc587d4
TBA
6134bool
6135linux_process_target::supports_non_stop ()
bd99dc85 6136{
0dc587d4 6137 return true;
bd99dc85
PA
6138}
6139
0dc587d4
TBA
6140bool
6141linux_process_target::async (bool enable)
bd99dc85 6142{
0dc587d4 6143 bool previous = target_is_async_p ();
bd99dc85 6144
8336d594 6145 if (debug_threads)
87ce2a04
DE
6146 debug_printf ("linux_async (%d), previous=%d\n",
6147 enable, previous);
8336d594 6148
bd99dc85
PA
6149 if (previous != enable)
6150 {
6151 sigset_t mask;
6152 sigemptyset (&mask);
6153 sigaddset (&mask, SIGCHLD);
6154
21987b9c 6155 gdb_sigmask (SIG_BLOCK, &mask, NULL);
bd99dc85
PA
6156
6157 if (enable)
6158 {
6159 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6160 {
6161 linux_event_pipe[0] = -1;
6162 linux_event_pipe[1] = -1;
21987b9c 6163 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
aa96c426
GB
6164
6165 warning ("creating event pipe failed.");
6166 return previous;
6167 }
bd99dc85
PA
6168
6169 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6170 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6171
6172 /* Register the event loop handler. */
6173 add_file_handler (linux_event_pipe[0],
6174 handle_target_event, NULL);
6175
6176 /* Always trigger a linux_wait. */
6177 async_file_mark ();
6178 }
6179 else
6180 {
6181 delete_file_handler (linux_event_pipe[0]);
6182
6183 close (linux_event_pipe[0]);
6184 close (linux_event_pipe[1]);
6185 linux_event_pipe[0] = -1;
6186 linux_event_pipe[1] = -1;
6187 }
6188
21987b9c 6189 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
bd99dc85
PA
6190 }
6191
6192 return previous;
6193}
6194
0dc587d4
TBA
6195int
6196linux_process_target::start_non_stop (bool nonstop)
bd99dc85
PA
6197{
6198 /* Register or unregister from event-loop accordingly. */
0dc587d4 6199 target_async (nonstop);
aa96c426 6200
0dc587d4 6201 if (target_is_async_p () != (nonstop != false))
aa96c426
GB
6202 return -1;
6203
bd99dc85
PA
6204 return 0;
6205}
6206
652aef77
TBA
6207bool
6208linux_process_target::supports_multi_process ()
cf8fd78b 6209{
652aef77 6210 return true;
cf8fd78b
PA
6211}
6212
89245bc0
DB
6213/* Check if fork events are supported. */
6214
9690a72a
TBA
6215bool
6216linux_process_target::supports_fork_events ()
89245bc0
DB
6217{
6218 return linux_supports_tracefork ();
6219}
6220
6221/* Check if vfork events are supported. */
6222
9690a72a
TBA
6223bool
6224linux_process_target::supports_vfork_events ()
89245bc0
DB
6225{
6226 return linux_supports_tracefork ();
6227}
6228
94585166
DB
6229/* Check if exec events are supported. */
6230
9690a72a
TBA
6231bool
6232linux_process_target::supports_exec_events ()
94585166
DB
6233{
6234 return linux_supports_traceexec ();
6235}
6236
de0d863e
DB
6237/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6238 ptrace flags for all inferiors. This is in case the new GDB connection
6239 doesn't support the same set of events that the previous one did. */
6240
fb00dfce
TBA
6241void
6242linux_process_target::handle_new_gdb_connection ()
de0d863e 6243{
de0d863e 6244 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6245 for_each_thread ([] (thread_info *thread)
6246 {
6247 struct lwp_info *lwp = get_thread_lwp (thread);
6248
6249 if (!lwp->stopped)
6250 {
6251 /* Stop the lwp so we can modify its ptrace options. */
6252 lwp->must_set_ptrace_flags = 1;
6253 linux_stop_lwp (lwp);
6254 }
6255 else
6256 {
6257 /* Already stopped; go ahead and set the ptrace options. */
6258 struct process_info *proc = find_process_pid (pid_of (thread));
6259 int options = linux_low_ptrace_options (proc->attached);
6260
6261 linux_enable_event_reporting (lwpid_of (thread), options);
6262 lwp->must_set_ptrace_flags = 0;
6263 }
6264 });
de0d863e
DB
6265}
6266
55cf3021
TBA
6267int
6268linux_process_target::handle_monitor_command (char *mon)
6269{
6270#ifdef USE_THREAD_DB
6271 return thread_db_handle_monitor_command (mon);
6272#else
6273 return 0;
6274#endif
6275}
6276
95a45fc1
TBA
6277int
6278linux_process_target::core_of_thread (ptid_t ptid)
6279{
6280 return linux_common_core_of_thread (ptid);
6281}
6282
c756403b
TBA
6283bool
6284linux_process_target::supports_disable_randomization ()
03583c20
UW
6285{
6286#ifdef HAVE_PERSONALITY
c756403b 6287 return true;
03583c20 6288#else
c756403b 6289 return false;
03583c20
UW
6290#endif
6291}
efcbbd14 6292
c0245cb9
TBA
6293bool
6294linux_process_target::supports_agent ()
d1feda86 6295{
c0245cb9 6296 return true;
d1feda86
YQ
6297}
6298
2526e0cd
TBA
6299bool
6300linux_process_target::supports_range_stepping ()
c2d6af84 6301{
c3805894 6302 if (can_software_single_step ())
2526e0cd 6303 return true;
c2d6af84 6304 if (*the_low_target.supports_range_stepping == NULL)
2526e0cd 6305 return false;
c2d6af84
PA
6306
6307 return (*the_low_target.supports_range_stepping) ();
6308}
6309
8247b823
TBA
6310bool
6311linux_process_target::supports_pid_to_exec_file ()
6312{
6313 return true;
6314}
6315
6316char *
6317linux_process_target::pid_to_exec_file (int pid)
6318{
6319 return linux_proc_pid_to_exec_file (pid);
6320}
6321
c9b7b804
TBA
6322bool
6323linux_process_target::supports_multifs ()
6324{
6325 return true;
6326}
6327
6328int
6329linux_process_target::multifs_open (int pid, const char *filename,
6330 int flags, mode_t mode)
6331{
6332 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6333}
6334
6335int
6336linux_process_target::multifs_unlink (int pid, const char *filename)
6337{
6338 return linux_mntns_unlink (pid, filename);
6339}
6340
6341ssize_t
6342linux_process_target::multifs_readlink (int pid, const char *filename,
6343 char *buf, size_t bufsiz)
6344{
6345 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6346}
6347
723b724b 6348#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6349struct target_loadseg
6350{
6351 /* Core address to which the segment is mapped. */
6352 Elf32_Addr addr;
6353 /* VMA recorded in the program header. */
6354 Elf32_Addr p_vaddr;
6355 /* Size of this segment in memory. */
6356 Elf32_Word p_memsz;
6357};
6358
723b724b 6359# if defined PT_GETDSBT
78d85199
YQ
6360struct target_loadmap
6361{
6362 /* Protocol version number, must be zero. */
6363 Elf32_Word version;
6364 /* Pointer to the DSBT table, its size, and the DSBT index. */
6365 unsigned *dsbt_table;
6366 unsigned dsbt_size, dsbt_index;
6367 /* Number of segments in this map. */
6368 Elf32_Word nsegs;
6369 /* The actual memory map. */
6370 struct target_loadseg segs[/*nsegs*/];
6371};
723b724b
MF
6372# define LINUX_LOADMAP PT_GETDSBT
6373# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6374# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6375# else
6376struct target_loadmap
6377{
6378 /* Protocol version number, must be zero. */
6379 Elf32_Half version;
6380 /* Number of segments in this map. */
6381 Elf32_Half nsegs;
6382 /* The actual memory map. */
6383 struct target_loadseg segs[/*nsegs*/];
6384};
6385# define LINUX_LOADMAP PTRACE_GETFDPIC
6386# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6387# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6388# endif
78d85199 6389
9da41fda
TBA
6390bool
6391linux_process_target::supports_read_loadmap ()
6392{
6393 return true;
6394}
6395
6396int
6397linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6398 unsigned char *myaddr, unsigned int len)
78d85199 6399{
0bfdf32f 6400 int pid = lwpid_of (current_thread);
78d85199
YQ
6401 int addr = -1;
6402 struct target_loadmap *data = NULL;
6403 unsigned int actual_length, copy_length;
6404
6405 if (strcmp (annex, "exec") == 0)
723b724b 6406 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6407 else if (strcmp (annex, "interp") == 0)
723b724b 6408 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6409 else
6410 return -1;
6411
723b724b 6412 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6413 return -1;
6414
6415 if (data == NULL)
6416 return -1;
6417
6418 actual_length = sizeof (struct target_loadmap)
6419 + sizeof (struct target_loadseg) * data->nsegs;
6420
6421 if (offset < 0 || offset > actual_length)
6422 return -1;
6423
6424 copy_length = actual_length - offset < len ? actual_length - offset : len;
6425 memcpy (myaddr, (char *) data + offset, copy_length);
6426 return copy_length;
6427}
723b724b 6428#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6429
0df28b1b
TBA
6430void
6431linux_process_target::process_qsupported (char **features, int count)
1570b33e
L
6432{
6433 if (the_low_target.process_qsupported != NULL)
06e03fff 6434 the_low_target.process_qsupported (features, count);
1570b33e
L
6435}
6436
bc8d3ae4
TBA
6437bool
6438linux_process_target::supports_catch_syscall ()
82075af2
JS
6439{
6440 return (the_low_target.get_syscall_trapinfo != NULL
6441 && linux_supports_tracesysgood ());
6442}
6443
d633e831
TBA
6444int
6445linux_process_target::get_ipa_tdesc_idx ()
ae91f625
MK
6446{
6447 if (the_low_target.get_ipa_tdesc_idx == NULL)
6448 return 0;
6449
6450 return (*the_low_target.get_ipa_tdesc_idx) ();
6451}
6452
290732bf
TBA
6453bool
6454linux_process_target::supports_tracepoints ()
219f2f23
PA
6455{
6456 if (*the_low_target.supports_tracepoints == NULL)
290732bf 6457 return false;
219f2f23
PA
6458
6459 return (*the_low_target.supports_tracepoints) ();
6460}
6461
770d8f6a
TBA
6462CORE_ADDR
6463linux_process_target::read_pc (regcache *regcache)
219f2f23 6464{
bf9ae9d8 6465 if (!low_supports_breakpoints ())
219f2f23
PA
6466 return 0;
6467
bf9ae9d8 6468 return low_get_pc (regcache);
219f2f23
PA
6469}
6470
770d8f6a
TBA
6471void
6472linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
219f2f23 6473{
bf9ae9d8 6474 gdb_assert (low_supports_breakpoints ());
219f2f23 6475
bf9ae9d8 6476 low_set_pc (regcache, pc);
219f2f23
PA
6477}
6478
68119632
TBA
6479bool
6480linux_process_target::supports_thread_stopped ()
6481{
6482 return true;
6483}
6484
6485bool
6486linux_process_target::thread_stopped (thread_info *thread)
8336d594
PA
6487{
6488 return get_thread_lwp (thread)->stopped;
6489}
6490
6491/* This exposes stop-all-threads functionality to other modules. */
6492
29e8dc09
TBA
6493void
6494linux_process_target::pause_all (bool freeze)
8336d594 6495{
7984d532
PA
6496 stop_all_lwps (freeze, NULL);
6497}
6498
6499/* This exposes unstop-all-threads functionality to other gdbserver
6500 modules. */
6501
29e8dc09
TBA
6502void
6503linux_process_target::unpause_all (bool unfreeze)
7984d532
PA
6504{
6505 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6506}
6507
79b44087
TBA
6508int
6509linux_process_target::prepare_to_access_memory ()
90d74c30
PA
6510{
6511 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6512 running LWP. */
6513 if (non_stop)
29e8dc09 6514 target_pause_all (true);
90d74c30
PA
6515 return 0;
6516}
6517
79b44087
TBA
6518void
6519linux_process_target::done_accessing_memory ()
90d74c30
PA
6520{
6521 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6522 running LWP. */
6523 if (non_stop)
29e8dc09 6524 target_unpause_all (true);
90d74c30
PA
6525}
6526
c23c9391
TBA
6527bool
6528linux_process_target::supports_fast_tracepoints ()
6529{
6530 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6531}
6532
6533int
6534linux_process_target::install_fast_tracepoint_jump_pad
6535 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6536 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6537 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6538 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6539 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6540 char *err)
fa593d66
PA
6541{
6542 return (*the_low_target.install_fast_tracepoint_jump_pad)
6543 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6544 jump_entry, trampoline, trampoline_size,
6545 jjump_pad_insn, jjump_pad_insn_size,
6546 adjusted_insn_addr, adjusted_insn_addr_end,
6547 err);
fa593d66
PA
6548}
6549
345dafad
TBA
6550emit_ops *
6551linux_process_target::emit_ops ()
6a271cae
PA
6552{
6553 if (the_low_target.emit_ops != NULL)
6554 return (*the_low_target.emit_ops) ();
6555 else
6556 return NULL;
6557}
6558
c23c9391
TBA
6559int
6560linux_process_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
6561{
6562 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6563}
6564
2268b414
JK
6565/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6566
6567static int
6568get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6569 CORE_ADDR *phdr_memaddr, int *num_phdr)
6570{
6571 char filename[PATH_MAX];
6572 int fd;
6573 const int auxv_size = is_elf64
6574 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6575 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6576
6577 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6578
6579 fd = open (filename, O_RDONLY);
6580 if (fd < 0)
6581 return 1;
6582
6583 *phdr_memaddr = 0;
6584 *num_phdr = 0;
6585 while (read (fd, buf, auxv_size) == auxv_size
6586 && (*phdr_memaddr == 0 || *num_phdr == 0))
6587 {
6588 if (is_elf64)
6589 {
6590 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6591
6592 switch (aux->a_type)
6593 {
6594 case AT_PHDR:
6595 *phdr_memaddr = aux->a_un.a_val;
6596 break;
6597 case AT_PHNUM:
6598 *num_phdr = aux->a_un.a_val;
6599 break;
6600 }
6601 }
6602 else
6603 {
6604 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6605
6606 switch (aux->a_type)
6607 {
6608 case AT_PHDR:
6609 *phdr_memaddr = aux->a_un.a_val;
6610 break;
6611 case AT_PHNUM:
6612 *num_phdr = aux->a_un.a_val;
6613 break;
6614 }
6615 }
6616 }
6617
6618 close (fd);
6619
6620 if (*phdr_memaddr == 0 || *num_phdr == 0)
6621 {
6622 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6623 "phdr_memaddr = %ld, phdr_num = %d",
6624 (long) *phdr_memaddr, *num_phdr);
6625 return 2;
6626 }
6627
6628 return 0;
6629}
6630
6631/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6632
6633static CORE_ADDR
6634get_dynamic (const int pid, const int is_elf64)
6635{
6636 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6637 int num_phdr, i;
2268b414 6638 unsigned char *phdr_buf;
db1ff28b 6639 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6640
6641 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6642 return 0;
6643
6644 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6645 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6646
6647 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6648 return 0;
6649
6650 /* Compute relocation: it is expected to be 0 for "regular" executables,
6651 non-zero for PIE ones. */
6652 relocation = -1;
db1ff28b
JK
6653 for (i = 0; relocation == -1 && i < num_phdr; i++)
6654 if (is_elf64)
6655 {
6656 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6657
6658 if (p->p_type == PT_PHDR)
6659 relocation = phdr_memaddr - p->p_vaddr;
6660 }
6661 else
6662 {
6663 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6664
6665 if (p->p_type == PT_PHDR)
6666 relocation = phdr_memaddr - p->p_vaddr;
6667 }
6668
2268b414
JK
6669 if (relocation == -1)
6670 {
e237a7e2
JK
6671 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6672 any real world executables, including PIE executables, have always
6673 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6674 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6675 or present DT_DEBUG anyway (fpc binaries are statically linked).
6676
6677 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6678
6679 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6680
2268b414
JK
6681 return 0;
6682 }
6683
db1ff28b
JK
6684 for (i = 0; i < num_phdr; i++)
6685 {
6686 if (is_elf64)
6687 {
6688 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6689
6690 if (p->p_type == PT_DYNAMIC)
6691 return p->p_vaddr + relocation;
6692 }
6693 else
6694 {
6695 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6696
db1ff28b
JK
6697 if (p->p_type == PT_DYNAMIC)
6698 return p->p_vaddr + relocation;
6699 }
6700 }
2268b414
JK
6701
6702 return 0;
6703}
6704
6705/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6706 can be 0 if the inferior does not yet have the library list initialized.
6707 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6708 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6709
6710static CORE_ADDR
6711get_r_debug (const int pid, const int is_elf64)
6712{
6713 CORE_ADDR dynamic_memaddr;
6714 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6715 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6716 CORE_ADDR map = -1;
2268b414
JK
6717
6718 dynamic_memaddr = get_dynamic (pid, is_elf64);
6719 if (dynamic_memaddr == 0)
367ba2c2 6720 return map;
2268b414
JK
6721
6722 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6723 {
6724 if (is_elf64)
6725 {
6726 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6727#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6728 union
6729 {
6730 Elf64_Xword map;
6731 unsigned char buf[sizeof (Elf64_Xword)];
6732 }
6733 rld_map;
a738da3a
MF
6734#endif
6735#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6736 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6737 {
6738 if (linux_read_memory (dyn->d_un.d_val,
6739 rld_map.buf, sizeof (rld_map.buf)) == 0)
6740 return rld_map.map;
6741 else
6742 break;
6743 }
75f62ce7 6744#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6745#ifdef DT_MIPS_RLD_MAP_REL
6746 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6747 {
6748 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6749 rld_map.buf, sizeof (rld_map.buf)) == 0)
6750 return rld_map.map;
6751 else
6752 break;
6753 }
6754#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6755
367ba2c2
MR
6756 if (dyn->d_tag == DT_DEBUG && map == -1)
6757 map = dyn->d_un.d_val;
2268b414
JK
6758
6759 if (dyn->d_tag == DT_NULL)
6760 break;
6761 }
6762 else
6763 {
6764 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6765#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6766 union
6767 {
6768 Elf32_Word map;
6769 unsigned char buf[sizeof (Elf32_Word)];
6770 }
6771 rld_map;
a738da3a
MF
6772#endif
6773#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6774 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6775 {
6776 if (linux_read_memory (dyn->d_un.d_val,
6777 rld_map.buf, sizeof (rld_map.buf)) == 0)
6778 return rld_map.map;
6779 else
6780 break;
6781 }
75f62ce7 6782#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6783#ifdef DT_MIPS_RLD_MAP_REL
6784 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6785 {
6786 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6787 rld_map.buf, sizeof (rld_map.buf)) == 0)
6788 return rld_map.map;
6789 else
6790 break;
6791 }
6792#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6793
367ba2c2
MR
6794 if (dyn->d_tag == DT_DEBUG && map == -1)
6795 map = dyn->d_un.d_val;
2268b414
JK
6796
6797 if (dyn->d_tag == DT_NULL)
6798 break;
6799 }
6800
6801 dynamic_memaddr += dyn_size;
6802 }
6803
367ba2c2 6804 return map;
2268b414
JK
6805}
6806
6807/* Read one pointer from MEMADDR in the inferior. */
6808
6809static int
6810read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6811{
485f1ee4
PA
6812 int ret;
6813
6814 /* Go through a union so this works on either big or little endian
6815 hosts, when the inferior's pointer size is smaller than the size
6816 of CORE_ADDR. It is assumed the inferior's endianness is the
6817 same of the superior's. */
6818 union
6819 {
6820 CORE_ADDR core_addr;
6821 unsigned int ui;
6822 unsigned char uc;
6823 } addr;
6824
6825 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6826 if (ret == 0)
6827 {
6828 if (ptr_size == sizeof (CORE_ADDR))
6829 *ptr = addr.core_addr;
6830 else if (ptr_size == sizeof (unsigned int))
6831 *ptr = addr.ui;
6832 else
6833 gdb_assert_not_reached ("unhandled pointer size");
6834 }
6835 return ret;
2268b414
JK
6836}
6837
974387bb
TBA
6838bool
6839linux_process_target::supports_qxfer_libraries_svr4 ()
6840{
6841 return true;
6842}
6843
2268b414
JK
6844struct link_map_offsets
6845 {
6846 /* Offset and size of r_debug.r_version. */
6847 int r_version_offset;
6848
6849 /* Offset and size of r_debug.r_map. */
6850 int r_map_offset;
6851
6852 /* Offset to l_addr field in struct link_map. */
6853 int l_addr_offset;
6854
6855 /* Offset to l_name field in struct link_map. */
6856 int l_name_offset;
6857
6858 /* Offset to l_ld field in struct link_map. */
6859 int l_ld_offset;
6860
6861 /* Offset to l_next field in struct link_map. */
6862 int l_next_offset;
6863
6864 /* Offset to l_prev field in struct link_map. */
6865 int l_prev_offset;
6866 };
6867
fb723180 6868/* Construct qXfer:libraries-svr4:read reply. */
2268b414 6869
974387bb
TBA
6870int
6871linux_process_target::qxfer_libraries_svr4 (const char *annex,
6872 unsigned char *readbuf,
6873 unsigned const char *writebuf,
6874 CORE_ADDR offset, int len)
2268b414 6875{
fe978cb0 6876 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6877 char filename[PATH_MAX];
6878 int pid, is_elf64;
6879
6880 static const struct link_map_offsets lmo_32bit_offsets =
6881 {
6882 0, /* r_version offset. */
6883 4, /* r_debug.r_map offset. */
6884 0, /* l_addr offset in link_map. */
6885 4, /* l_name offset in link_map. */
6886 8, /* l_ld offset in link_map. */
6887 12, /* l_next offset in link_map. */
6888 16 /* l_prev offset in link_map. */
6889 };
6890
6891 static const struct link_map_offsets lmo_64bit_offsets =
6892 {
6893 0, /* r_version offset. */
6894 8, /* r_debug.r_map offset. */
6895 0, /* l_addr offset in link_map. */
6896 8, /* l_name offset in link_map. */
6897 16, /* l_ld offset in link_map. */
6898 24, /* l_next offset in link_map. */
6899 32 /* l_prev offset in link_map. */
6900 };
6901 const struct link_map_offsets *lmo;
214d508e 6902 unsigned int machine;
b1fbec62
GB
6903 int ptr_size;
6904 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
6905 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6906 int header_done = 0;
2268b414
JK
6907
6908 if (writebuf != NULL)
6909 return -2;
6910 if (readbuf == NULL)
6911 return -1;
6912
0bfdf32f 6913 pid = lwpid_of (current_thread);
2268b414 6914 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6915 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6916 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6917 ptr_size = is_elf64 ? 8 : 4;
2268b414 6918
b1fbec62
GB
6919 while (annex[0] != '\0')
6920 {
6921 const char *sep;
6922 CORE_ADDR *addrp;
da4ae14a 6923 int name_len;
2268b414 6924
b1fbec62
GB
6925 sep = strchr (annex, '=');
6926 if (sep == NULL)
6927 break;
0c5bf5a9 6928
da4ae14a
TT
6929 name_len = sep - annex;
6930 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 6931 addrp = &lm_addr;
da4ae14a 6932 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6933 addrp = &lm_prev;
6934 else
6935 {
6936 annex = strchr (sep, ';');
6937 if (annex == NULL)
6938 break;
6939 annex++;
6940 continue;
6941 }
6942
6943 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6944 }
b1fbec62
GB
6945
6946 if (lm_addr == 0)
2268b414 6947 {
b1fbec62
GB
6948 int r_version = 0;
6949
6950 if (priv->r_debug == 0)
6951 priv->r_debug = get_r_debug (pid, is_elf64);
6952
6953 /* We failed to find DT_DEBUG. Such situation will not change
6954 for this inferior - do not retry it. Report it to GDB as
6955 E01, see for the reasons at the GDB solib-svr4.c side. */
6956 if (priv->r_debug == (CORE_ADDR) -1)
6957 return -1;
6958
6959 if (priv->r_debug != 0)
2268b414 6960 {
b1fbec62
GB
6961 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6962 (unsigned char *) &r_version,
6963 sizeof (r_version)) != 0
6964 || r_version != 1)
6965 {
6966 warning ("unexpected r_debug version %d", r_version);
6967 }
6968 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6969 &lm_addr, ptr_size) != 0)
6970 {
6971 warning ("unable to read r_map from 0x%lx",
6972 (long) priv->r_debug + lmo->r_map_offset);
6973 }
2268b414 6974 }
b1fbec62 6975 }
2268b414 6976
f6e8a41e 6977 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
6978
6979 while (lm_addr
6980 && read_one_ptr (lm_addr + lmo->l_name_offset,
6981 &l_name, ptr_size) == 0
6982 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6983 &l_addr, ptr_size) == 0
6984 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6985 &l_ld, ptr_size) == 0
6986 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6987 &l_prev, ptr_size) == 0
6988 && read_one_ptr (lm_addr + lmo->l_next_offset,
6989 &l_next, ptr_size) == 0)
6990 {
6991 unsigned char libname[PATH_MAX];
6992
6993 if (lm_prev != l_prev)
2268b414 6994 {
b1fbec62
GB
6995 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6996 (long) lm_prev, (long) l_prev);
6997 break;
2268b414
JK
6998 }
6999
d878444c
JK
7000 /* Ignore the first entry even if it has valid name as the first entry
7001 corresponds to the main executable. The first entry should not be
7002 skipped if the dynamic loader was loaded late by a static executable
7003 (see solib-svr4.c parameter ignore_first). But in such case the main
7004 executable does not have PT_DYNAMIC present and this function already
7005 exited above due to failed get_r_debug. */
7006 if (lm_prev == 0)
f6e8a41e 7007 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
7008 else
7009 {
7010 /* Not checking for error because reading may stop before
7011 we've got PATH_MAX worth of characters. */
7012 libname[0] = '\0';
7013 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7014 libname[sizeof (libname) - 1] = '\0';
7015 if (libname[0] != '\0')
2268b414 7016 {
d878444c
JK
7017 if (!header_done)
7018 {
7019 /* Terminate `<library-list-svr4'. */
f6e8a41e 7020 document += '>';
d878444c
JK
7021 header_done = 1;
7022 }
2268b414 7023
e6a58aa8
SM
7024 string_appendf (document, "<library name=\"");
7025 xml_escape_text_append (&document, (char *) libname);
7026 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 7027 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
7028 (unsigned long) lm_addr, (unsigned long) l_addr,
7029 (unsigned long) l_ld);
d878444c 7030 }
0afae3cf 7031 }
b1fbec62
GB
7032
7033 lm_prev = lm_addr;
7034 lm_addr = l_next;
2268b414
JK
7035 }
7036
b1fbec62
GB
7037 if (!header_done)
7038 {
7039 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 7040 document += "/>";
b1fbec62
GB
7041 }
7042 else
f6e8a41e 7043 document += "</library-list-svr4>";
b1fbec62 7044
f6e8a41e 7045 int document_len = document.length ();
2268b414
JK
7046 if (offset < document_len)
7047 document_len -= offset;
7048 else
7049 document_len = 0;
7050 if (len > document_len)
7051 len = document_len;
7052
f6e8a41e 7053 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
7054
7055 return len;
7056}
7057
9accd112
MM
7058#ifdef HAVE_LINUX_BTRACE
7059
79597bdd
TBA
7060btrace_target_info *
7061linux_process_target::enable_btrace (ptid_t ptid,
7062 const btrace_config *conf)
7063{
7064 return linux_enable_btrace (ptid, conf);
7065}
7066
969c39fb 7067/* See to_disable_btrace target method. */
9accd112 7068
79597bdd
TBA
7069int
7070linux_process_target::disable_btrace (btrace_target_info *tinfo)
969c39fb
MM
7071{
7072 enum btrace_error err;
7073
7074 err = linux_disable_btrace (tinfo);
7075 return (err == BTRACE_ERR_NONE ? 0 : -1);
7076}
7077
bc504a31 7078/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7079
7080static void
7081linux_low_encode_pt_config (struct buffer *buffer,
7082 const struct btrace_data_pt_config *config)
7083{
7084 buffer_grow_str (buffer, "<pt-config>\n");
7085
7086 switch (config->cpu.vendor)
7087 {
7088 case CV_INTEL:
7089 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7090 "model=\"%u\" stepping=\"%u\"/>\n",
7091 config->cpu.family, config->cpu.model,
7092 config->cpu.stepping);
7093 break;
7094
7095 default:
7096 break;
7097 }
7098
7099 buffer_grow_str (buffer, "</pt-config>\n");
7100}
7101
7102/* Encode a raw buffer. */
7103
7104static void
7105linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7106 unsigned int size)
7107{
7108 if (size == 0)
7109 return;
7110
268a13a5 7111 /* We use hex encoding - see gdbsupport/rsp-low.h. */
b20a6524
MM
7112 buffer_grow_str (buffer, "<raw>\n");
7113
7114 while (size-- > 0)
7115 {
7116 char elem[2];
7117
7118 elem[0] = tohex ((*data >> 4) & 0xf);
7119 elem[1] = tohex (*data++ & 0xf);
7120
7121 buffer_grow (buffer, elem, 2);
7122 }
7123
7124 buffer_grow_str (buffer, "</raw>\n");
7125}
7126
969c39fb
MM
7127/* See to_read_btrace target method. */
7128
79597bdd
TBA
7129int
7130linux_process_target::read_btrace (btrace_target_info *tinfo,
7131 buffer *buffer,
7132 enum btrace_read_type type)
9accd112 7133{
734b0e4b 7134 struct btrace_data btrace;
969c39fb 7135 enum btrace_error err;
9accd112 7136
969c39fb
MM
7137 err = linux_read_btrace (&btrace, tinfo, type);
7138 if (err != BTRACE_ERR_NONE)
7139 {
7140 if (err == BTRACE_ERR_OVERFLOW)
7141 buffer_grow_str0 (buffer, "E.Overflow.");
7142 else
7143 buffer_grow_str0 (buffer, "E.Generic Error.");
7144
8dcc53b3 7145 return -1;
969c39fb 7146 }
9accd112 7147
734b0e4b
MM
7148 switch (btrace.format)
7149 {
7150 case BTRACE_FORMAT_NONE:
7151 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7152 return -1;
734b0e4b
MM
7153
7154 case BTRACE_FORMAT_BTS:
7155 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7156 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7157
46f29a9a 7158 for (const btrace_block &block : *btrace.variant.bts.blocks)
734b0e4b 7159 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
46f29a9a 7160 paddress (block.begin), paddress (block.end));
9accd112 7161
734b0e4b
MM
7162 buffer_grow_str0 (buffer, "</btrace>\n");
7163 break;
7164
b20a6524
MM
7165 case BTRACE_FORMAT_PT:
7166 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7167 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7168 buffer_grow_str (buffer, "<pt>\n");
7169
7170 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7171
b20a6524
MM
7172 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7173 btrace.variant.pt.size);
7174
7175 buffer_grow_str (buffer, "</pt>\n");
7176 buffer_grow_str0 (buffer, "</btrace>\n");
7177 break;
7178
7179 default:
7180 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7181 return -1;
734b0e4b 7182 }
969c39fb
MM
7183
7184 return 0;
9accd112 7185}
f4abbc16
MM
7186
7187/* See to_btrace_conf target method. */
7188
79597bdd
TBA
7189int
7190linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7191 buffer *buffer)
f4abbc16
MM
7192{
7193 const struct btrace_config *conf;
7194
7195 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7196 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7197
7198 conf = linux_btrace_conf (tinfo);
7199 if (conf != NULL)
7200 {
7201 switch (conf->format)
7202 {
7203 case BTRACE_FORMAT_NONE:
7204 break;
7205
7206 case BTRACE_FORMAT_BTS:
d33501a5
MM
7207 buffer_xml_printf (buffer, "<bts");
7208 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7209 buffer_xml_printf (buffer, " />\n");
f4abbc16 7210 break;
b20a6524
MM
7211
7212 case BTRACE_FORMAT_PT:
7213 buffer_xml_printf (buffer, "<pt");
7214 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7215 buffer_xml_printf (buffer, "/>\n");
7216 break;
f4abbc16
MM
7217 }
7218 }
7219
7220 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7221 return 0;
7222}
9accd112
MM
7223#endif /* HAVE_LINUX_BTRACE */
7224
7b669087
GB
7225/* See nat/linux-nat.h. */
7226
7227ptid_t
7228current_lwp_ptid (void)
7229{
7230 return ptid_of (current_thread);
7231}
7232
dd373349
AT
7233/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7234
d367006f
TBA
7235int
7236linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
dd373349
AT
7237{
7238 if (the_low_target.breakpoint_kind_from_pc != NULL)
7239 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7240 else
52405d85 7241 return process_stratum_target::breakpoint_kind_from_pc (pcptr);
dd373349
AT
7242}
7243
7244/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7245
d367006f
TBA
7246const gdb_byte *
7247linux_process_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
7248{
7249 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7250
7251 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7252}
7253
769ef81f
AT
7254/* Implementation of the target_ops method
7255 "breakpoint_kind_from_current_state". */
7256
d367006f
TBA
7257int
7258linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
769ef81f
AT
7259{
7260 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7261 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7262 else
d367006f 7263 return breakpoint_kind_from_pc (pcptr);
769ef81f
AT
7264}
7265
7f63b89b
TBA
7266const char *
7267linux_process_target::thread_name (ptid_t thread)
7268{
7269 return linux_proc_tid_get_name (thread);
7270}
7271
7272#if USE_THREAD_DB
7273bool
7274linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7275 int *handle_len)
7276{
7277 return thread_db_thread_handle (ptid, handle, handle_len);
7278}
7279#endif
7280
276d4552
YQ
7281/* Default implementation of linux_target_ops method "set_pc" for
7282 32-bit pc register which is literally named "pc". */
7283
7284void
7285linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7286{
7287 uint32_t newpc = pc;
7288
7289 supply_register_by_name (regcache, "pc", &newpc);
7290}
7291
7292/* Default implementation of linux_target_ops method "get_pc" for
7293 32-bit pc register which is literally named "pc". */
7294
7295CORE_ADDR
7296linux_get_pc_32bit (struct regcache *regcache)
7297{
7298 uint32_t pc;
7299
7300 collect_register_by_name (regcache, "pc", &pc);
7301 if (debug_threads)
7302 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7303 return pc;
7304}
7305
6f69e520
YQ
7306/* Default implementation of linux_target_ops method "set_pc" for
7307 64-bit pc register which is literally named "pc". */
7308
7309void
7310linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7311{
7312 uint64_t newpc = pc;
7313
7314 supply_register_by_name (regcache, "pc", &newpc);
7315}
7316
7317/* Default implementation of linux_target_ops method "get_pc" for
7318 64-bit pc register which is literally named "pc". */
7319
7320CORE_ADDR
7321linux_get_pc_64bit (struct regcache *regcache)
7322{
7323 uint64_t pc;
7324
7325 collect_register_by_name (regcache, "pc", &pc);
7326 if (debug_threads)
7327 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7328 return pc;
7329}
7330
0570503d 7331/* See linux-low.h. */
974c89e0 7332
0570503d
PFC
7333int
7334linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7335{
7336 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7337 int offset = 0;
7338
7339 gdb_assert (wordsize == 4 || wordsize == 8);
7340
52405d85 7341 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
974c89e0
AH
7342 {
7343 if (wordsize == 4)
7344 {
0570503d 7345 uint32_t *data_p = (uint32_t *) data;
974c89e0 7346 if (data_p[0] == match)
0570503d
PFC
7347 {
7348 *valp = data_p[1];
7349 return 1;
7350 }
974c89e0
AH
7351 }
7352 else
7353 {
0570503d 7354 uint64_t *data_p = (uint64_t *) data;
974c89e0 7355 if (data_p[0] == match)
0570503d
PFC
7356 {
7357 *valp = data_p[1];
7358 return 1;
7359 }
974c89e0
AH
7360 }
7361
7362 offset += 2 * wordsize;
7363 }
7364
7365 return 0;
7366}
7367
7368/* See linux-low.h. */
7369
7370CORE_ADDR
7371linux_get_hwcap (int wordsize)
7372{
0570503d
PFC
7373 CORE_ADDR hwcap = 0;
7374 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7375 return hwcap;
974c89e0
AH
7376}
7377
7378/* See linux-low.h. */
7379
7380CORE_ADDR
7381linux_get_hwcap2 (int wordsize)
7382{
0570503d
PFC
7383 CORE_ADDR hwcap2 = 0;
7384 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7385 return hwcap2;
974c89e0 7386}
6f69e520 7387
3aee8918
PA
7388#ifdef HAVE_LINUX_REGSETS
7389void
7390initialize_regsets_info (struct regsets_info *info)
7391{
7392 for (info->num_regsets = 0;
7393 info->regsets[info->num_regsets].size >= 0;
7394 info->num_regsets++)
7395 ;
3aee8918
PA
7396}
7397#endif
7398
da6d8c04
DJ
7399void
7400initialize_low (void)
7401{
bd99dc85 7402 struct sigaction sigchld_action;
dd373349 7403
bd99dc85 7404 memset (&sigchld_action, 0, sizeof (sigchld_action));
ef0478f6 7405 set_target_ops (the_linux_target);
dd373349 7406
aa7c7447 7407 linux_ptrace_init_warnings ();
1b919490 7408 linux_proc_init_warnings ();
bd99dc85
PA
7409
7410 sigchld_action.sa_handler = sigchld_handler;
7411 sigemptyset (&sigchld_action.sa_mask);
7412 sigchld_action.sa_flags = SA_RESTART;
7413 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7414
7415 initialize_low_arch ();
89245bc0
DB
7416
7417 linux_check_ptrace_features ();
da6d8c04 7418}
This page took 2.133758 seconds and 4 git commands to generate.