Rename gdb exception types
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
42a4f53d 2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
0747795c 22#include "common/agent.h"
de0d863e 23#include "tdesc.h"
0747795c
TT
24#include "common/rsp-low.h"
25#include "common/signals-state-save-restore.h"
96d7229d
LM
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
0747795c 28#include "common/gdb_wait.h"
5826e159 29#include "nat/gdb_ptrace.h"
125f8a3d
GB
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
8cc73a39 32#include "nat/linux-personality.h"
da6d8c04
DJ
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
0a30fbc4 36#include <unistd.h>
fd500816 37#include <sys/syscall.h>
f9387fc3 38#include <sched.h>
07e059b5
VP
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
53ce3c39 43#include <sys/stat.h>
efcbbd14 44#include <sys/vfs.h>
1570b33e 45#include <sys/uio.h>
0747795c 46#include "common/filestuff.h"
c144c7a0 47#include "tracepoint.h"
533b0600 48#include "hostio.h"
276d4552 49#include <inttypes.h>
0747795c 50#include "common/common-inferior.h"
2090129c 51#include "nat/fork-inferior.h"
0747795c 52#include "common/environ.h"
8ce47547 53#include "common/scoped_restore.h"
957f3f49
DE
54#ifndef ELFMAG0
55/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59#include <elf.h>
60#endif
14d2069a 61#include "nat/linux-namespaces.h"
efcbbd14
UW
62
63#ifndef SPUFS_MAGIC
64#define SPUFS_MAGIC 0x23c9b64e
65#endif
da6d8c04 66
03583c20
UW
67#ifdef HAVE_PERSONALITY
68# include <sys/personality.h>
69# if !HAVE_DECL_ADDR_NO_RANDOMIZE
70# define ADDR_NO_RANDOMIZE 0x0040000
71# endif
72#endif
73
fd462a61
DJ
74#ifndef O_LARGEFILE
75#define O_LARGEFILE 0
76#endif
1a981360 77
69f4c9cc
AH
78#ifndef AT_HWCAP2
79#define AT_HWCAP2 26
80#endif
81
db0dfaa0
LM
82/* Some targets did not define these ptrace constants from the start,
83 so gdbserver defines them locally here. In the future, these may
84 be removed after they are added to asm/ptrace.h. */
85#if !(defined(PT_TEXT_ADDR) \
86 || defined(PT_DATA_ADDR) \
87 || defined(PT_TEXT_END_ADDR))
88#if defined(__mcoldfire__)
89/* These are still undefined in 3.10 kernels. */
90#define PT_TEXT_ADDR 49*4
91#define PT_DATA_ADDR 50*4
92#define PT_TEXT_END_ADDR 51*4
93/* BFIN already defines these since at least 2.6.32 kernels. */
94#elif defined(BFIN)
95#define PT_TEXT_ADDR 220
96#define PT_TEXT_END_ADDR 224
97#define PT_DATA_ADDR 228
98/* These are still undefined in 3.10 kernels. */
99#elif defined(__TMS320C6X__)
100#define PT_TEXT_ADDR (0x10000*4)
101#define PT_DATA_ADDR (0x10004*4)
102#define PT_TEXT_END_ADDR (0x10008*4)
103#endif
104#endif
105
9accd112 106#ifdef HAVE_LINUX_BTRACE
125f8a3d 107# include "nat/linux-btrace.h"
0747795c 108# include "common/btrace-common.h"
9accd112
MM
109#endif
110
8365dcf5
TJB
111#ifndef HAVE_ELF32_AUXV_T
112/* Copied from glibc's elf.h. */
113typedef struct
114{
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123} Elf32_auxv_t;
124#endif
125
126#ifndef HAVE_ELF64_AUXV_T
127/* Copied from glibc's elf.h. */
128typedef struct
129{
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138} Elf64_auxv_t;
139#endif
140
ded48a5e
YQ
141/* Does the current host support PTRACE_GETREGSET? */
142int have_ptrace_getregset = -1;
143
cff068da
GB
144/* LWP accessors. */
145
146/* See nat/linux-nat.h. */
147
148ptid_t
149ptid_of_lwp (struct lwp_info *lwp)
150{
151 return ptid_of (get_lwp_thread (lwp));
152}
153
154/* See nat/linux-nat.h. */
155
4b134ca1
GB
156void
157lwp_set_arch_private_info (struct lwp_info *lwp,
158 struct arch_lwp_info *info)
159{
160 lwp->arch_private = info;
161}
162
163/* See nat/linux-nat.h. */
164
165struct arch_lwp_info *
166lwp_arch_private_info (struct lwp_info *lwp)
167{
168 return lwp->arch_private;
169}
170
171/* See nat/linux-nat.h. */
172
cff068da
GB
173int
174lwp_is_stopped (struct lwp_info *lwp)
175{
176 return lwp->stopped;
177}
178
179/* See nat/linux-nat.h. */
180
181enum target_stop_reason
182lwp_stop_reason (struct lwp_info *lwp)
183{
184 return lwp->stop_reason;
185}
186
0e00e962
AA
187/* See nat/linux-nat.h. */
188
189int
190lwp_is_stepping (struct lwp_info *lwp)
191{
192 return lwp->stepping;
193}
194
05044653
PA
195/* A list of all unknown processes which receive stop signals. Some
196 other process will presumably claim each of these as forked
197 children momentarily. */
24a09b5f 198
05044653
PA
199struct simple_pid_list
200{
201 /* The process ID. */
202 int pid;
203
204 /* The status as reported by waitpid. */
205 int status;
206
207 /* Next in chain. */
208 struct simple_pid_list *next;
209};
210struct simple_pid_list *stopped_pids;
211
212/* Trivial list manipulation functions to keep track of a list of new
213 stopped processes. */
214
215static void
216add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
217{
8d749320 218 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
05044653
PA
219
220 new_pid->pid = pid;
221 new_pid->status = status;
222 new_pid->next = *listp;
223 *listp = new_pid;
224}
225
226static int
227pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
228{
229 struct simple_pid_list **p;
230
231 for (p = listp; *p != NULL; p = &(*p)->next)
232 if ((*p)->pid == pid)
233 {
234 struct simple_pid_list *next = (*p)->next;
235
236 *statusp = (*p)->status;
237 xfree (*p);
238 *p = next;
239 return 1;
240 }
241 return 0;
242}
24a09b5f 243
bde24c0a
PA
244enum stopping_threads_kind
245 {
246 /* Not stopping threads presently. */
247 NOT_STOPPING_THREADS,
248
249 /* Stopping threads. */
250 STOPPING_THREADS,
251
252 /* Stopping and suspending threads. */
253 STOPPING_AND_SUSPENDING_THREADS
254 };
255
256/* This is set while stop_all_lwps is in effect. */
257enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
258
259/* FIXME make into a target method? */
24a09b5f 260int using_threads = 1;
24a09b5f 261
fa593d66
PA
262/* True if we're presently stabilizing threads (moving them out of
263 jump pads). */
264static int stabilizing_threads;
265
2acc282a 266static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 267 int step, int signal, siginfo_t *info);
2bd7c093 268static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
269static void stop_all_lwps (int suspend, struct lwp_info *except);
270static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
f50bf8e5 271static void unsuspend_all_lwps (struct lwp_info *except);
fa96cb38
PA
272static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
273 int *wstat, int options);
95954743 274static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 275static struct lwp_info *add_lwp (ptid_t ptid);
94585166 276static void linux_mourn (struct process_info *process);
c35fafde 277static int linux_stopped_by_watchpoint (void);
95954743 278static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
00db26fa 279static int lwp_is_marked_dead (struct lwp_info *lwp);
d50171e4 280static void proceed_all_lwps (void);
d50171e4 281static int finish_step_over (struct lwp_info *lwp);
d50171e4 282static int kill_lwp (unsigned long lwpid, int signo);
863d01bd
PA
283static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
284static void complete_ongoing_step_over (void);
ece66d65 285static int linux_low_ptrace_options (int attached);
ced2dffb 286static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
e2b44075 287static void proceed_one_lwp (thread_info *thread, lwp_info *except);
d50171e4 288
582511be
PA
289/* When the event-loop is doing a step-over, this points at the thread
290 being stepped. */
291ptid_t step_over_bkpt;
292
7d00775e 293/* True if the low target can hardware single-step. */
d50171e4
PA
294
295static int
296can_hardware_single_step (void)
297{
7d00775e
AT
298 if (the_low_target.supports_hardware_single_step != NULL)
299 return the_low_target.supports_hardware_single_step ();
300 else
301 return 0;
302}
303
304/* True if the low target can software single-step. Such targets
fa5308bd 305 implement the GET_NEXT_PCS callback. */
7d00775e
AT
306
307static int
308can_software_single_step (void)
309{
fa5308bd 310 return (the_low_target.get_next_pcs != NULL);
d50171e4
PA
311}
312
313/* True if the low target supports memory breakpoints. If so, we'll
314 have a GET_PC implementation. */
315
316static int
317supports_breakpoints (void)
318{
319 return (the_low_target.get_pc != NULL);
320}
0d62e5e8 321
fa593d66
PA
322/* Returns true if this target can support fast tracepoints. This
323 does not mean that the in-process agent has been loaded in the
324 inferior. */
325
326static int
327supports_fast_tracepoints (void)
328{
329 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
330}
331
c2d6af84
PA
332/* True if LWP is stopped in its stepping range. */
333
334static int
335lwp_in_step_range (struct lwp_info *lwp)
336{
337 CORE_ADDR pc = lwp->stop_pc;
338
339 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
340}
341
0d62e5e8
DJ
342struct pending_signals
343{
344 int signal;
32ca6d61 345 siginfo_t info;
0d62e5e8
DJ
346 struct pending_signals *prev;
347};
611cb4a5 348
bd99dc85
PA
349/* The read/write ends of the pipe registered as waitable file in the
350 event loop. */
351static int linux_event_pipe[2] = { -1, -1 };
352
353/* True if we're currently in async mode. */
354#define target_is_async_p() (linux_event_pipe[0] != -1)
355
02fc4de7 356static void send_sigstop (struct lwp_info *lwp);
fa96cb38 357static void wait_for_sigstop (void);
bd99dc85 358
d0722149
DE
359/* Return non-zero if HEADER is a 64-bit ELF file. */
360
361static int
214d508e 362elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 363{
214d508e
L
364 if (header->e_ident[EI_MAG0] == ELFMAG0
365 && header->e_ident[EI_MAG1] == ELFMAG1
366 && header->e_ident[EI_MAG2] == ELFMAG2
367 && header->e_ident[EI_MAG3] == ELFMAG3)
368 {
369 *machine = header->e_machine;
370 return header->e_ident[EI_CLASS] == ELFCLASS64;
371
372 }
373 *machine = EM_NONE;
374 return -1;
d0722149
DE
375}
376
377/* Return non-zero if FILE is a 64-bit ELF file,
378 zero if the file is not a 64-bit ELF file,
379 and -1 if the file is not accessible or doesn't exist. */
380
be07f1a2 381static int
214d508e 382elf_64_file_p (const char *file, unsigned int *machine)
d0722149 383{
957f3f49 384 Elf64_Ehdr header;
d0722149
DE
385 int fd;
386
387 fd = open (file, O_RDONLY);
388 if (fd < 0)
389 return -1;
390
391 if (read (fd, &header, sizeof (header)) != sizeof (header))
392 {
393 close (fd);
394 return 0;
395 }
396 close (fd);
397
214d508e 398 return elf_64_header_p (&header, machine);
d0722149
DE
399}
400
be07f1a2
PA
401/* Accepts an integer PID; Returns true if the executable PID is
402 running is a 64-bit ELF file.. */
403
404int
214d508e 405linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 406{
d8d2a3ee 407 char file[PATH_MAX];
be07f1a2
PA
408
409 sprintf (file, "/proc/%d/exe", pid);
214d508e 410 return elf_64_file_p (file, machine);
be07f1a2
PA
411}
412
bd99dc85
PA
413static void
414delete_lwp (struct lwp_info *lwp)
415{
fa96cb38
PA
416 struct thread_info *thr = get_lwp_thread (lwp);
417
418 if (debug_threads)
419 debug_printf ("deleting %ld\n", lwpid_of (thr));
420
421 remove_thread (thr);
466eecee
SM
422
423 if (the_low_target.delete_thread != NULL)
424 the_low_target.delete_thread (lwp->arch_private);
425 else
426 gdb_assert (lwp->arch_private == NULL);
427
bd99dc85
PA
428 free (lwp);
429}
430
95954743
PA
431/* Add a process to the common process list, and set its private
432 data. */
433
434static struct process_info *
435linux_add_process (int pid, int attached)
436{
437 struct process_info *proc;
438
95954743 439 proc = add_process (pid, attached);
8d749320 440 proc->priv = XCNEW (struct process_info_private);
95954743 441
aa5ca48f 442 if (the_low_target.new_process != NULL)
fe978cb0 443 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 444
95954743
PA
445 return proc;
446}
447
582511be
PA
448static CORE_ADDR get_pc (struct lwp_info *lwp);
449
ece66d65 450/* Call the target arch_setup function on the current thread. */
94585166
DB
451
452static void
453linux_arch_setup (void)
454{
455 the_low_target.arch_setup ();
456}
457
458/* Call the target arch_setup function on THREAD. */
459
460static void
461linux_arch_setup_thread (struct thread_info *thread)
462{
463 struct thread_info *saved_thread;
464
465 saved_thread = current_thread;
466 current_thread = thread;
467
468 linux_arch_setup ();
469
470 current_thread = saved_thread;
471}
472
473/* Handle a GNU/Linux extended wait response. If we see a clone,
474 fork, or vfork event, we need to add the new LWP to our list
475 (and return 0 so as not to report the trap to higher layers).
476 If we see an exec event, we will modify ORIG_EVENT_LWP to point
477 to a new LWP representing the new program. */
0d62e5e8 478
de0d863e 479static int
94585166 480handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
24a09b5f 481{
c12a5089 482 client_state &cs = get_client_state ();
94585166 483 struct lwp_info *event_lwp = *orig_event_lwp;
89a5711c 484 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 485 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 486 struct lwp_info *new_lwp;
24a09b5f 487
65706a29
PA
488 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
489
82075af2
JS
490 /* All extended events we currently use are mid-syscall. Only
491 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
492 you have to be using PTRACE_SEIZE to get that. */
493 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
494
c269dbdb
DB
495 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
496 || (event == PTRACE_EVENT_CLONE))
24a09b5f 497 {
95954743 498 ptid_t ptid;
24a09b5f 499 unsigned long new_pid;
05044653 500 int ret, status;
24a09b5f 501
de0d863e 502 /* Get the pid of the new lwp. */
d86d4aaf 503 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 504 &new_pid);
24a09b5f
DJ
505
506 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 507 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
508 {
509 /* The new child has a pending SIGSTOP. We can't affect it until it
510 hits the SIGSTOP, but we're already attached. */
511
97438e3f 512 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
513
514 if (ret == -1)
515 perror_with_name ("waiting for new child");
516 else if (ret != new_pid)
517 warning ("wait returned unexpected PID %d", ret);
da5898ce 518 else if (!WIFSTOPPED (status))
24a09b5f
DJ
519 warning ("wait returned unexpected status 0x%x", status);
520 }
521
c269dbdb 522 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
523 {
524 struct process_info *parent_proc;
525 struct process_info *child_proc;
526 struct lwp_info *child_lwp;
bfacd19d 527 struct thread_info *child_thr;
de0d863e
DB
528 struct target_desc *tdesc;
529
fd79271b 530 ptid = ptid_t (new_pid, new_pid, 0);
de0d863e
DB
531
532 if (debug_threads)
533 {
534 debug_printf ("HEW: Got fork event from LWP %ld, "
535 "new child is %d\n",
e38504b3 536 ptid_of (event_thr).lwp (),
e99b03dc 537 ptid.pid ());
de0d863e
DB
538 }
539
540 /* Add the new process to the tables and clone the breakpoint
541 lists of the parent. We need to do this even if the new process
542 will be detached, since we will need the process object and the
543 breakpoints to remove any breakpoints from memory when we
544 detach, and the client side will access registers. */
545 child_proc = linux_add_process (new_pid, 0);
546 gdb_assert (child_proc != NULL);
547 child_lwp = add_lwp (ptid);
548 gdb_assert (child_lwp != NULL);
549 child_lwp->stopped = 1;
bfacd19d
DB
550 child_lwp->must_set_ptrace_flags = 1;
551 child_lwp->status_pending_p = 0;
552 child_thr = get_lwp_thread (child_lwp);
553 child_thr->last_resume_kind = resume_stop;
998d452a
PA
554 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
555
863d01bd 556 /* If we're suspending all threads, leave this one suspended
0f8288ae
YQ
557 too. If the fork/clone parent is stepping over a breakpoint,
558 all other threads have been suspended already. Leave the
559 child suspended too. */
560 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
561 || event_lwp->bp_reinsert != 0)
863d01bd
PA
562 {
563 if (debug_threads)
564 debug_printf ("HEW: leaving child suspended\n");
565 child_lwp->suspended = 1;
566 }
567
de0d863e
DB
568 parent_proc = get_thread_process (event_thr);
569 child_proc->attached = parent_proc->attached;
2e7b624b
YQ
570
571 if (event_lwp->bp_reinsert != 0
572 && can_software_single_step ()
573 && event == PTRACE_EVENT_VFORK)
574 {
3b9a79ef
YQ
575 /* If we leave single-step breakpoints there, child will
576 hit it, so uninsert single-step breakpoints from parent
2e7b624b
YQ
577 (and child). Once vfork child is done, reinsert
578 them back to parent. */
3b9a79ef 579 uninsert_single_step_breakpoints (event_thr);
2e7b624b
YQ
580 }
581
63c40ec7 582 clone_all_breakpoints (child_thr, event_thr);
de0d863e 583
cc397f3a 584 tdesc = allocate_target_description ();
de0d863e
DB
585 copy_target_description (tdesc, parent_proc->tdesc);
586 child_proc->tdesc = tdesc;
de0d863e 587
3a8a0396
DB
588 /* Clone arch-specific process data. */
589 if (the_low_target.new_fork != NULL)
590 the_low_target.new_fork (parent_proc, child_proc);
591
de0d863e 592 /* Save fork info in the parent thread. */
c269dbdb
DB
593 if (event == PTRACE_EVENT_FORK)
594 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
595 else if (event == PTRACE_EVENT_VFORK)
596 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
597
de0d863e 598 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 599
de0d863e
DB
600 /* The status_pending field contains bits denoting the
601 extended event, so when the pending event is handled,
602 the handler will look at lwp->waitstatus. */
603 event_lwp->status_pending_p = 1;
604 event_lwp->status_pending = wstat;
605
5a04c4cf
PA
606 /* Link the threads until the parent event is passed on to
607 higher layers. */
608 event_lwp->fork_relative = child_lwp;
609 child_lwp->fork_relative = event_lwp;
610
3b9a79ef
YQ
611 /* If the parent thread is doing step-over with single-step
612 breakpoints, the list of single-step breakpoints are cloned
2e7b624b
YQ
613 from the parent's. Remove them from the child process.
614 In case of vfork, we'll reinsert them back once vforked
615 child is done. */
8a81c5d7 616 if (event_lwp->bp_reinsert != 0
2e7b624b 617 && can_software_single_step ())
8a81c5d7 618 {
8a81c5d7
YQ
619 /* The child process is forked and stopped, so it is safe
620 to access its memory without stopping all other threads
621 from other processes. */
3b9a79ef 622 delete_single_step_breakpoints (child_thr);
8a81c5d7 623
3b9a79ef
YQ
624 gdb_assert (has_single_step_breakpoints (event_thr));
625 gdb_assert (!has_single_step_breakpoints (child_thr));
8a81c5d7
YQ
626 }
627
de0d863e
DB
628 /* Report the event. */
629 return 0;
630 }
631
fa96cb38
PA
632 if (debug_threads)
633 debug_printf ("HEW: Got clone event "
634 "from LWP %ld, new child is LWP %ld\n",
635 lwpid_of (event_thr), new_pid);
636
fd79271b 637 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
b3312d80 638 new_lwp = add_lwp (ptid);
24a09b5f 639
e27d73f6
DE
640 /* Either we're going to immediately resume the new thread
641 or leave it stopped. linux_resume_one_lwp is a nop if it
642 thinks the thread is currently running, so set this first
643 before calling linux_resume_one_lwp. */
644 new_lwp->stopped = 1;
645
0f8288ae
YQ
646 /* If we're suspending all threads, leave this one suspended
647 too. If the fork/clone parent is stepping over a breakpoint,
648 all other threads have been suspended already. Leave the
649 child suspended too. */
650 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
651 || event_lwp->bp_reinsert != 0)
bde24c0a
PA
652 new_lwp->suspended = 1;
653
da5898ce
DJ
654 /* Normally we will get the pending SIGSTOP. But in some cases
655 we might get another signal delivered to the group first.
f21cc1a2 656 If we do get another signal, be sure not to lose it. */
20ba1ce6 657 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 658 {
54a0b537 659 new_lwp->stop_expected = 1;
20ba1ce6
PA
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
da5898ce 662 }
c12a5089 663 else if (cs.report_thread_events)
65706a29
PA
664 {
665 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
666 new_lwp->status_pending_p = 1;
667 new_lwp->status_pending = status;
668 }
de0d863e 669
a0aad537 670#ifdef USE_THREAD_DB
94c207e0 671 thread_db_notice_clone (event_thr, ptid);
a0aad537 672#endif
86299109 673
de0d863e
DB
674 /* Don't report the event. */
675 return 1;
24a09b5f 676 }
c269dbdb
DB
677 else if (event == PTRACE_EVENT_VFORK_DONE)
678 {
679 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
680
2e7b624b
YQ
681 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
682 {
3b9a79ef 683 reinsert_single_step_breakpoints (event_thr);
2e7b624b 684
3b9a79ef 685 gdb_assert (has_single_step_breakpoints (event_thr));
2e7b624b
YQ
686 }
687
c269dbdb
DB
688 /* Report the event. */
689 return 0;
690 }
c12a5089 691 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
94585166
DB
692 {
693 struct process_info *proc;
f27866ba 694 std::vector<int> syscalls_to_catch;
94585166
DB
695 ptid_t event_ptid;
696 pid_t event_pid;
697
698 if (debug_threads)
699 {
700 debug_printf ("HEW: Got exec event from LWP %ld\n",
701 lwpid_of (event_thr));
702 }
703
704 /* Get the event ptid. */
705 event_ptid = ptid_of (event_thr);
e99b03dc 706 event_pid = event_ptid.pid ();
94585166 707
82075af2 708 /* Save the syscall list from the execing process. */
94585166 709 proc = get_thread_process (event_thr);
f27866ba 710 syscalls_to_catch = std::move (proc->syscalls_to_catch);
82075af2
JS
711
712 /* Delete the execing process and all its threads. */
94585166
DB
713 linux_mourn (proc);
714 current_thread = NULL;
715
716 /* Create a new process/lwp/thread. */
717 proc = linux_add_process (event_pid, 0);
718 event_lwp = add_lwp (event_ptid);
719 event_thr = get_lwp_thread (event_lwp);
720 gdb_assert (current_thread == event_thr);
721 linux_arch_setup_thread (event_thr);
722
723 /* Set the event status. */
724 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
725 event_lwp->waitstatus.value.execd_pathname
726 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
727
728 /* Mark the exec status as pending. */
729 event_lwp->stopped = 1;
730 event_lwp->status_pending_p = 1;
731 event_lwp->status_pending = wstat;
732 event_thr->last_resume_kind = resume_continue;
733 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
734
82075af2
JS
735 /* Update syscall state in the new lwp, effectively mid-syscall too. */
736 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
737
738 /* Restore the list to catch. Don't rely on the client, which is free
739 to avoid sending a new list when the architecture doesn't change.
740 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
f27866ba 741 proc->syscalls_to_catch = std::move (syscalls_to_catch);
82075af2 742
94585166
DB
743 /* Report the event. */
744 *orig_event_lwp = event_lwp;
745 return 0;
746 }
de0d863e
DB
747
748 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
749}
750
d50171e4
PA
751/* Return the PC as read from the regcache of LWP, without any
752 adjustment. */
753
754static CORE_ADDR
755get_pc (struct lwp_info *lwp)
756{
0bfdf32f 757 struct thread_info *saved_thread;
d50171e4
PA
758 struct regcache *regcache;
759 CORE_ADDR pc;
760
761 if (the_low_target.get_pc == NULL)
762 return 0;
763
0bfdf32f
GB
764 saved_thread = current_thread;
765 current_thread = get_lwp_thread (lwp);
d50171e4 766
0bfdf32f 767 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
768 pc = (*the_low_target.get_pc) (regcache);
769
770 if (debug_threads)
87ce2a04 771 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 772
0bfdf32f 773 current_thread = saved_thread;
d50171e4
PA
774 return pc;
775}
776
82075af2 777/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
4cc32bec 778 Fill *SYSNO with the syscall nr trapped. */
82075af2
JS
779
780static void
4cc32bec 781get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
82075af2
JS
782{
783 struct thread_info *saved_thread;
784 struct regcache *regcache;
785
786 if (the_low_target.get_syscall_trapinfo == NULL)
787 {
788 /* If we cannot get the syscall trapinfo, report an unknown
4cc32bec 789 system call number. */
82075af2 790 *sysno = UNKNOWN_SYSCALL;
82075af2
JS
791 return;
792 }
793
794 saved_thread = current_thread;
795 current_thread = get_lwp_thread (lwp);
796
797 regcache = get_thread_regcache (current_thread, 1);
4cc32bec 798 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
82075af2
JS
799
800 if (debug_threads)
4cc32bec 801 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
82075af2
JS
802
803 current_thread = saved_thread;
804}
805
e7ad2f14 806static int check_stopped_by_watchpoint (struct lwp_info *child);
0d62e5e8 807
e7ad2f14
PA
808/* Called when the LWP stopped for a signal/trap. If it stopped for a
809 trap check what caused it (breakpoint, watchpoint, trace, etc.),
810 and save the result in the LWP's stop_reason field. If it stopped
811 for a breakpoint, decrement the PC if necessary on the lwp's
812 architecture. Returns true if we now have the LWP's stop PC. */
0d62e5e8 813
582511be 814static int
e7ad2f14 815save_stop_reason (struct lwp_info *lwp)
0d62e5e8 816{
582511be
PA
817 CORE_ADDR pc;
818 CORE_ADDR sw_breakpoint_pc;
819 struct thread_info *saved_thread;
3e572f71
PA
820#if USE_SIGTRAP_SIGINFO
821 siginfo_t siginfo;
822#endif
d50171e4
PA
823
824 if (the_low_target.get_pc == NULL)
825 return 0;
0d62e5e8 826
582511be
PA
827 pc = get_pc (lwp);
828 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 829
582511be
PA
830 /* breakpoint_at reads from the current thread. */
831 saved_thread = current_thread;
832 current_thread = get_lwp_thread (lwp);
47c0c975 833
3e572f71
PA
834#if USE_SIGTRAP_SIGINFO
835 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
836 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
837 {
838 if (siginfo.si_signo == SIGTRAP)
839 {
e7ad2f14
PA
840 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
841 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 842 {
e7ad2f14
PA
843 /* The si_code is ambiguous on this arch -- check debug
844 registers. */
845 if (!check_stopped_by_watchpoint (lwp))
846 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
847 }
848 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
849 {
850 /* If we determine the LWP stopped for a SW breakpoint,
851 trust it. Particularly don't check watchpoint
852 registers, because at least on s390, we'd find
853 stopped-by-watchpoint as long as there's a watchpoint
854 set. */
3e572f71 855 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
3e572f71 856 }
e7ad2f14 857 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
3e572f71 858 {
e7ad2f14
PA
859 /* This can indicate either a hardware breakpoint or
860 hardware watchpoint. Check debug registers. */
861 if (!check_stopped_by_watchpoint (lwp))
862 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
3e572f71 863 }
2bf6fb9d
PA
864 else if (siginfo.si_code == TRAP_TRACE)
865 {
e7ad2f14
PA
866 /* We may have single stepped an instruction that
867 triggered a watchpoint. In that case, on some
868 architectures (such as x86), instead of TRAP_HWBKPT,
869 si_code indicates TRAP_TRACE, and we need to check
870 the debug registers separately. */
871 if (!check_stopped_by_watchpoint (lwp))
872 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
2bf6fb9d 873 }
3e572f71
PA
874 }
875 }
876#else
582511be
PA
877 /* We may have just stepped a breakpoint instruction. E.g., in
878 non-stop mode, GDB first tells the thread A to step a range, and
879 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
880 case we need to report the breakpoint PC. */
881 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be 882 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
e7ad2f14
PA
883 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
884
885 if (hardware_breakpoint_inserted_here (pc))
886 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
889 check_stopped_by_watchpoint (lwp);
890#endif
891
892 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
582511be
PA
893 {
894 if (debug_threads)
895 {
896 struct thread_info *thr = get_lwp_thread (lwp);
897
898 debug_printf ("CSBB: %s stopped by software breakpoint\n",
899 target_pid_to_str (ptid_of (thr)));
900 }
901
902 /* Back up the PC if necessary. */
903 if (pc != sw_breakpoint_pc)
e7ad2f14 904 {
582511be
PA
905 struct regcache *regcache
906 = get_thread_regcache (current_thread, 1);
907 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
908 }
909
e7ad2f14
PA
910 /* Update this so we record the correct stop PC below. */
911 pc = sw_breakpoint_pc;
582511be 912 }
e7ad2f14 913 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
582511be
PA
914 {
915 if (debug_threads)
916 {
917 struct thread_info *thr = get_lwp_thread (lwp);
918
919 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
920 target_pid_to_str (ptid_of (thr)));
921 }
e7ad2f14
PA
922 }
923 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
924 {
925 if (debug_threads)
926 {
927 struct thread_info *thr = get_lwp_thread (lwp);
47c0c975 928
e7ad2f14
PA
929 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
930 target_pid_to_str (ptid_of (thr)));
931 }
582511be 932 }
e7ad2f14
PA
933 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
934 {
935 if (debug_threads)
936 {
937 struct thread_info *thr = get_lwp_thread (lwp);
582511be 938
e7ad2f14
PA
939 debug_printf ("CSBB: %s stopped by trace\n",
940 target_pid_to_str (ptid_of (thr)));
941 }
942 }
943
944 lwp->stop_pc = pc;
582511be 945 current_thread = saved_thread;
e7ad2f14 946 return 1;
0d62e5e8 947}
ce3a066d 948
b3312d80 949static struct lwp_info *
95954743 950add_lwp (ptid_t ptid)
611cb4a5 951{
54a0b537 952 struct lwp_info *lwp;
0d62e5e8 953
8d749320 954 lwp = XCNEW (struct lwp_info);
00db26fa
PA
955
956 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
0d62e5e8 957
754e3168
AH
958 lwp->thread = add_thread (ptid, lwp);
959
aa5ca48f 960 if (the_low_target.new_thread != NULL)
34c703da 961 the_low_target.new_thread (lwp);
aa5ca48f 962
54a0b537 963 return lwp;
0d62e5e8 964}
611cb4a5 965
2090129c
SDJ
966/* Callback to be used when calling fork_inferior, responsible for
967 actually initiating the tracing of the inferior. */
968
969static void
970linux_ptrace_fun ()
971{
972 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
973 (PTRACE_TYPE_ARG4) 0) < 0)
974 trace_start_error_with_name ("ptrace");
975
976 if (setpgid (0, 0) < 0)
977 trace_start_error_with_name ("setpgid");
978
979 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
980 stdout to stderr so that inferior i/o doesn't corrupt the connection.
981 Also, redirect stdin to /dev/null. */
982 if (remote_connection_is_stdio ())
983 {
984 if (close (0) < 0)
985 trace_start_error_with_name ("close");
986 if (open ("/dev/null", O_RDONLY) < 0)
987 trace_start_error_with_name ("open");
988 if (dup2 (2, 1) < 0)
989 trace_start_error_with_name ("dup2");
990 if (write (2, "stdin/stdout redirected\n",
991 sizeof ("stdin/stdout redirected\n") - 1) < 0)
992 {
993 /* Errors ignored. */;
994 }
995 }
996}
997
da6d8c04 998/* Start an inferior process and returns its pid.
2090129c
SDJ
999 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
1000 are its arguments. */
da6d8c04 1001
ce3a066d 1002static int
2090129c
SDJ
1003linux_create_inferior (const char *program,
1004 const std::vector<char *> &program_args)
da6d8c04 1005{
c12a5089 1006 client_state &cs = get_client_state ();
a6dbe5df 1007 struct lwp_info *new_lwp;
da6d8c04 1008 int pid;
95954743 1009 ptid_t ptid;
03583c20 1010
41272101
TT
1011 {
1012 maybe_disable_address_space_randomization restore_personality
c12a5089 1013 (cs.disable_randomization);
41272101
TT
1014 std::string str_program_args = stringify_argv (program_args);
1015
1016 pid = fork_inferior (program,
1017 str_program_args.c_str (),
1018 get_environ ()->envp (), linux_ptrace_fun,
1019 NULL, NULL, NULL, NULL);
1020 }
03583c20 1021
55d7b841 1022 linux_add_process (pid, 0);
95954743 1023
fd79271b 1024 ptid = ptid_t (pid, pid, 0);
95954743 1025 new_lwp = add_lwp (ptid);
a6dbe5df 1026 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 1027
2090129c
SDJ
1028 post_fork_inferior (pid, program);
1029
a9fa9f7d 1030 return pid;
da6d8c04
DJ
1031}
1032
ece66d65
JS
1033/* Implement the post_create_inferior target_ops method. */
1034
1035static void
1036linux_post_create_inferior (void)
1037{
1038 struct lwp_info *lwp = get_thread_lwp (current_thread);
1039
1040 linux_arch_setup ();
1041
1042 if (lwp->must_set_ptrace_flags)
1043 {
1044 struct process_info *proc = current_process ();
1045 int options = linux_low_ptrace_options (proc->attached);
1046
1047 linux_enable_event_reporting (lwpid_of (current_thread), options);
1048 lwp->must_set_ptrace_flags = 0;
1049 }
1050}
1051
8784d563
PA
1052/* Attach to an inferior process. Returns 0 on success, ERRNO on
1053 error. */
da6d8c04 1054
7ae1a6a6
PA
1055int
1056linux_attach_lwp (ptid_t ptid)
da6d8c04 1057{
54a0b537 1058 struct lwp_info *new_lwp;
e38504b3 1059 int lwpid = ptid.lwp ();
611cb4a5 1060
b8e1b30e 1061 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 1062 != 0)
7ae1a6a6 1063 return errno;
24a09b5f 1064
b3312d80 1065 new_lwp = add_lwp (ptid);
0d62e5e8 1066
a6dbe5df
PA
1067 /* We need to wait for SIGSTOP before being able to make the next
1068 ptrace call on this LWP. */
1069 new_lwp->must_set_ptrace_flags = 1;
1070
644cebc9 1071 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
1072 {
1073 if (debug_threads)
87ce2a04 1074 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
1075
1076 /* The process is definitely stopped. It is in a job control
1077 stop, unless the kernel predates the TASK_STOPPED /
1078 TASK_TRACED distinction, in which case it might be in a
1079 ptrace stop. Make sure it is in a ptrace stop; from there we
1080 can kill it, signal it, et cetera.
1081
1082 First make sure there is a pending SIGSTOP. Since we are
1083 already attached, the process can not transition from stopped
1084 to running without a PTRACE_CONT; so we know this signal will
1085 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1086 probably already in the queue (unless this kernel is old
1087 enough to use TASK_STOPPED for ptrace stops); but since
1088 SIGSTOP is not an RT signal, it can only be queued once. */
1089 kill_lwp (lwpid, SIGSTOP);
1090
1091 /* Finally, resume the stopped process. This will deliver the
1092 SIGSTOP (or a higher priority signal, just like normal
1093 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 1094 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
1095 }
1096
0d62e5e8 1097 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
1098 brings it to a halt.
1099
1100 There are several cases to consider here:
1101
1102 1) gdbserver has already attached to the process and is being notified
1b3f6016 1103 of a new thread that is being created.
d50171e4
PA
1104 In this case we should ignore that SIGSTOP and resume the
1105 process. This is handled below by setting stop_expected = 1,
8336d594 1106 and the fact that add_thread sets last_resume_kind ==
d50171e4 1107 resume_continue.
0e21c1ec
DE
1108
1109 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
1110 to it via attach_inferior.
1111 In this case we want the process thread to stop.
d50171e4
PA
1112 This is handled by having linux_attach set last_resume_kind ==
1113 resume_stop after we return.
e3deef73
LM
1114
1115 If the pid we are attaching to is also the tgid, we attach to and
1116 stop all the existing threads. Otherwise, we attach to pid and
1117 ignore any other threads in the same group as this pid.
0e21c1ec
DE
1118
1119 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
1120 existing threads.
1121 In this case we want the thread to stop.
1122 FIXME: This case is currently not properly handled.
1123 We should wait for the SIGSTOP but don't. Things work apparently
1124 because enough time passes between when we ptrace (ATTACH) and when
1125 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
1126
1127 On the other hand, if we are currently trying to stop all threads, we
1128 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 1129 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
1130 end of the list, and so the new thread has not yet reached
1131 wait_for_sigstop (but will). */
d50171e4 1132 new_lwp->stop_expected = 1;
0d62e5e8 1133
7ae1a6a6 1134 return 0;
95954743
PA
1135}
1136
8784d563
PA
1137/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1138 already attached. Returns true if a new LWP is found, false
1139 otherwise. */
1140
1141static int
1142attach_proc_task_lwp_callback (ptid_t ptid)
1143{
1144 /* Is this a new thread? */
1145 if (find_thread_ptid (ptid) == NULL)
1146 {
e38504b3 1147 int lwpid = ptid.lwp ();
8784d563
PA
1148 int err;
1149
1150 if (debug_threads)
1151 debug_printf ("Found new lwp %d\n", lwpid);
1152
1153 err = linux_attach_lwp (ptid);
1154
1155 /* Be quiet if we simply raced with the thread exiting. EPERM
1156 is returned if the thread's task still exists, and is marked
1157 as exited or zombie, as well as other conditions, so in that
1158 case, confirm the status in /proc/PID/status. */
1159 if (err == ESRCH
1160 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1161 {
1162 if (debug_threads)
1163 {
1164 debug_printf ("Cannot attach to lwp %d: "
1165 "thread is gone (%d: %s)\n",
1166 lwpid, err, strerror (err));
1167 }
1168 }
1169 else if (err != 0)
1170 {
4d9b86e1
SM
1171 std::string reason
1172 = linux_ptrace_attach_fail_reason_string (ptid, err);
1173
1174 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
8784d563
PA
1175 }
1176
1177 return 1;
1178 }
1179 return 0;
1180}
1181
500c1d85
PA
1182static void async_file_mark (void);
1183
e3deef73
LM
1184/* Attach to PID. If PID is the tgid, attach to it and all
1185 of its threads. */
1186
c52daf70 1187static int
a1928bad 1188linux_attach (unsigned long pid)
0d62e5e8 1189{
500c1d85
PA
1190 struct process_info *proc;
1191 struct thread_info *initial_thread;
fd79271b 1192 ptid_t ptid = ptid_t (pid, pid, 0);
7ae1a6a6
PA
1193 int err;
1194
df0da8a2
AH
1195 proc = linux_add_process (pid, 1);
1196
e3deef73
LM
1197 /* Attach to PID. We will check for other threads
1198 soon. */
7ae1a6a6
PA
1199 err = linux_attach_lwp (ptid);
1200 if (err != 0)
4d9b86e1 1201 {
df0da8a2 1202 remove_process (proc);
4d9b86e1 1203
df0da8a2 1204 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
4d9b86e1
SM
1205 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1206 }
7ae1a6a6 1207
500c1d85
PA
1208 /* Don't ignore the initial SIGSTOP if we just attached to this
1209 process. It will be collected by wait shortly. */
fd79271b 1210 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
500c1d85 1211 initial_thread->last_resume_kind = resume_stop;
0d62e5e8 1212
8784d563
PA
1213 /* We must attach to every LWP. If /proc is mounted, use that to
1214 find them now. On the one hand, the inferior may be using raw
1215 clone instead of using pthreads. On the other hand, even if it
1216 is using pthreads, GDB may not be connected yet (thread_db needs
1217 to do symbol lookups, through qSymbol). Also, thread_db walks
1218 structures in the inferior's address space to find the list of
1219 threads/LWPs, and those structures may well be corrupted. Note
1220 that once thread_db is loaded, we'll still use it to list threads
1221 and associate pthread info with each LWP. */
1222 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
500c1d85
PA
1223
1224 /* GDB will shortly read the xml target description for this
1225 process, to figure out the process' architecture. But the target
1226 description is only filled in when the first process/thread in
1227 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1228 that now, otherwise, if GDB is fast enough, it could read the
1229 target description _before_ that initial stop. */
1230 if (non_stop)
1231 {
1232 struct lwp_info *lwp;
1233 int wstat, lwpid;
f2907e49 1234 ptid_t pid_ptid = ptid_t (pid);
500c1d85
PA
1235
1236 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1237 &wstat, __WALL);
1238 gdb_assert (lwpid > 0);
1239
f2907e49 1240 lwp = find_lwp_pid (ptid_t (lwpid));
500c1d85
PA
1241
1242 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1243 {
1244 lwp->status_pending_p = 1;
1245 lwp->status_pending = wstat;
1246 }
1247
1248 initial_thread->last_resume_kind = resume_continue;
1249
1250 async_file_mark ();
1251
1252 gdb_assert (proc->tdesc != NULL);
1253 }
1254
95954743
PA
1255 return 0;
1256}
1257
95954743 1258static int
e4eb0dec 1259last_thread_of_process_p (int pid)
95954743 1260{
e4eb0dec 1261 bool seen_one = false;
95954743 1262
da4ae14a 1263 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
95954743 1264 {
e4eb0dec
SM
1265 if (!seen_one)
1266 {
1267 /* This is the first thread of this process we see. */
1268 seen_one = true;
1269 return false;
1270 }
1271 else
1272 {
1273 /* This is the second thread of this process we see. */
1274 return true;
1275 }
1276 });
da6d8c04 1277
e4eb0dec 1278 return thread == NULL;
95954743
PA
1279}
1280
da84f473
PA
1281/* Kill LWP. */
1282
1283static void
1284linux_kill_one_lwp (struct lwp_info *lwp)
1285{
d86d4aaf
DE
1286 struct thread_info *thr = get_lwp_thread (lwp);
1287 int pid = lwpid_of (thr);
da84f473
PA
1288
1289 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1290 there is no signal context, and ptrace(PTRACE_KILL) (or
1291 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1292 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1293 alternative is to kill with SIGKILL. We only need one SIGKILL
1294 per process, not one for each thread. But since we still support
4a6ed09b
PA
1295 support debugging programs using raw clone without CLONE_THREAD,
1296 we send one for each thread. For years, we used PTRACE_KILL
1297 only, so we're being a bit paranoid about some old kernels where
1298 PTRACE_KILL might work better (dubious if there are any such, but
1299 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1300 second, and so we're fine everywhere. */
da84f473
PA
1301
1302 errno = 0;
69ff6be5 1303 kill_lwp (pid, SIGKILL);
da84f473 1304 if (debug_threads)
ce9e3fe7
PA
1305 {
1306 int save_errno = errno;
1307
1308 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1309 target_pid_to_str (ptid_of (thr)),
1310 save_errno ? strerror (save_errno) : "OK");
1311 }
da84f473
PA
1312
1313 errno = 0;
b8e1b30e 1314 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1315 if (debug_threads)
ce9e3fe7
PA
1316 {
1317 int save_errno = errno;
1318
1319 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1320 target_pid_to_str (ptid_of (thr)),
1321 save_errno ? strerror (save_errno) : "OK");
1322 }
da84f473
PA
1323}
1324
e76126e8
PA
1325/* Kill LWP and wait for it to die. */
1326
1327static void
1328kill_wait_lwp (struct lwp_info *lwp)
1329{
1330 struct thread_info *thr = get_lwp_thread (lwp);
e99b03dc 1331 int pid = ptid_of (thr).pid ();
e38504b3 1332 int lwpid = ptid_of (thr).lwp ();
e76126e8
PA
1333 int wstat;
1334 int res;
1335
1336 if (debug_threads)
1337 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1338
1339 do
1340 {
1341 linux_kill_one_lwp (lwp);
1342
1343 /* Make sure it died. Notes:
1344
1345 - The loop is most likely unnecessary.
1346
1347 - We don't use linux_wait_for_event as that could delete lwps
1348 while we're iterating over them. We're not interested in
1349 any pending status at this point, only in making sure all
1350 wait status on the kernel side are collected until the
1351 process is reaped.
1352
1353 - We don't use __WALL here as the __WALL emulation relies on
1354 SIGCHLD, and killing a stopped process doesn't generate
1355 one, nor an exit status.
1356 */
1357 res = my_waitpid (lwpid, &wstat, 0);
1358 if (res == -1 && errno == ECHILD)
1359 res = my_waitpid (lwpid, &wstat, __WCLONE);
1360 } while (res > 0 && WIFSTOPPED (wstat));
1361
586b02a9
PA
1362 /* Even if it was stopped, the child may have already disappeared.
1363 E.g., if it was killed by SIGKILL. */
1364 if (res < 0 && errno != ECHILD)
1365 perror_with_name ("kill_wait_lwp");
e76126e8
PA
1366}
1367
578290ec 1368/* Callback for `for_each_thread'. Kills an lwp of a given process,
da84f473 1369 except the leader. */
95954743 1370
578290ec
SM
1371static void
1372kill_one_lwp_callback (thread_info *thread, int pid)
da6d8c04 1373{
54a0b537 1374 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 1375
fd500816
DJ
1376 /* We avoid killing the first thread here, because of a Linux kernel (at
1377 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1378 the children get a chance to be reaped, it will remain a zombie
1379 forever. */
95954743 1380
d86d4aaf 1381 if (lwpid_of (thread) == pid)
95954743
PA
1382 {
1383 if (debug_threads)
87ce2a04 1384 debug_printf ("lkop: is last of process %s\n",
9c80ecd6 1385 target_pid_to_str (thread->id));
578290ec 1386 return;
95954743 1387 }
fd500816 1388
e76126e8 1389 kill_wait_lwp (lwp);
da6d8c04
DJ
1390}
1391
95954743 1392static int
a780ef4f 1393linux_kill (process_info *process)
0d62e5e8 1394{
a780ef4f 1395 int pid = process->pid;
9d606399 1396
f9e39928
PA
1397 /* If we're killing a running inferior, make sure it is stopped
1398 first, as PTRACE_KILL will not work otherwise. */
7984d532 1399 stop_all_lwps (0, NULL);
f9e39928 1400
578290ec
SM
1401 for_each_thread (pid, [&] (thread_info *thread)
1402 {
1403 kill_one_lwp_callback (thread, pid);
1404 });
fd500816 1405
54a0b537 1406 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1407 thread in the list, so do so now. */
a780ef4f 1408 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
bd99dc85 1409
784867a5 1410 if (lwp == NULL)
fd500816 1411 {
784867a5 1412 if (debug_threads)
d86d4aaf
DE
1413 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1414 pid);
784867a5
JK
1415 }
1416 else
e76126e8 1417 kill_wait_lwp (lwp);
2d717e4f 1418
8336d594 1419 the_target->mourn (process);
f9e39928
PA
1420
1421 /* Since we presently can only stop all lwps of all processes, we
1422 need to unstop lwps of other processes. */
7984d532 1423 unstop_all_lwps (0, NULL);
95954743 1424 return 0;
0d62e5e8
DJ
1425}
1426
9b224c5e
PA
1427/* Get pending signal of THREAD, for detaching purposes. This is the
1428 signal the thread last stopped for, which we need to deliver to the
1429 thread when detaching, otherwise, it'd be suppressed/lost. */
1430
1431static int
1432get_detach_signal (struct thread_info *thread)
1433{
c12a5089 1434 client_state &cs = get_client_state ();
a493e3e2 1435 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1436 int status;
1437 struct lwp_info *lp = get_thread_lwp (thread);
1438
1439 if (lp->status_pending_p)
1440 status = lp->status_pending;
1441 else
1442 {
1443 /* If the thread had been suspended by gdbserver, and it stopped
1444 cleanly, then it'll have stopped with SIGSTOP. But we don't
1445 want to deliver that SIGSTOP. */
1446 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1447 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1448 return 0;
1449
1450 /* Otherwise, we may need to deliver the signal we
1451 intercepted. */
1452 status = lp->last_status;
1453 }
1454
1455 if (!WIFSTOPPED (status))
1456 {
1457 if (debug_threads)
87ce2a04 1458 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1459 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1460 return 0;
1461 }
1462
1463 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1464 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1465 {
1466 if (debug_threads)
87ce2a04
DE
1467 debug_printf ("GPS: lwp %s had stopped with extended "
1468 "status: no pending signal\n",
d86d4aaf 1469 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1470 return 0;
1471 }
1472
2ea28649 1473 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e 1474
c12a5089 1475 if (cs.program_signals_p && !cs.program_signals[signo])
9b224c5e
PA
1476 {
1477 if (debug_threads)
87ce2a04 1478 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1479 target_pid_to_str (ptid_of (thread)),
87ce2a04 1480 gdb_signal_to_string (signo));
9b224c5e
PA
1481 return 0;
1482 }
c12a5089 1483 else if (!cs.program_signals_p
9b224c5e
PA
1484 /* If we have no way to know which signals GDB does not
1485 want to have passed to the program, assume
1486 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1487 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1488 {
1489 if (debug_threads)
87ce2a04
DE
1490 debug_printf ("GPS: lwp %s had signal %s, "
1491 "but we don't know if we should pass it. "
1492 "Default to not.\n",
d86d4aaf 1493 target_pid_to_str (ptid_of (thread)),
87ce2a04 1494 gdb_signal_to_string (signo));
9b224c5e
PA
1495 return 0;
1496 }
1497 else
1498 {
1499 if (debug_threads)
87ce2a04 1500 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1501 target_pid_to_str (ptid_of (thread)),
87ce2a04 1502 gdb_signal_to_string (signo));
9b224c5e
PA
1503
1504 return WSTOPSIG (status);
1505 }
1506}
1507
ced2dffb
PA
1508/* Detach from LWP. */
1509
1510static void
1511linux_detach_one_lwp (struct lwp_info *lwp)
6ad8ae5c 1512{
ced2dffb 1513 struct thread_info *thread = get_lwp_thread (lwp);
9b224c5e 1514 int sig;
ced2dffb 1515 int lwpid;
6ad8ae5c 1516
9b224c5e 1517 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1518 if (lwp->stop_expected)
ae13219e 1519 {
9b224c5e 1520 if (debug_threads)
87ce2a04 1521 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1522 target_pid_to_str (ptid_of (thread)));
9b224c5e 1523
d86d4aaf 1524 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1525 lwp->stop_expected = 0;
ae13219e
DJ
1526 }
1527
9b224c5e
PA
1528 /* Pass on any pending signal for this thread. */
1529 sig = get_detach_signal (thread);
1530
ced2dffb
PA
1531 /* Preparing to resume may try to write registers, and fail if the
1532 lwp is zombie. If that happens, ignore the error. We'll handle
1533 it below, when detach fails with ESRCH. */
a70b8144 1534 try
ced2dffb
PA
1535 {
1536 /* Flush any pending changes to the process's registers. */
1537 regcache_invalidate_thread (thread);
1538
1539 /* Finally, let it resume. */
1540 if (the_low_target.prepare_to_resume != NULL)
1541 the_low_target.prepare_to_resume (lwp);
1542 }
230d2906 1543 catch (const gdb_exception_error &ex)
ced2dffb
PA
1544 {
1545 if (!check_ptrace_stopped_lwp_gone (lwp))
1546 throw_exception (ex);
1547 }
ced2dffb
PA
1548
1549 lwpid = lwpid_of (thread);
1550 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1551 (PTRACE_TYPE_ARG4) (long) sig) < 0)
ced2dffb
PA
1552 {
1553 int save_errno = errno;
1554
1555 /* We know the thread exists, so ESRCH must mean the lwp is
1556 zombie. This can happen if one of the already-detached
1557 threads exits the whole thread group. In that case we're
1558 still attached, and must reap the lwp. */
1559 if (save_errno == ESRCH)
1560 {
1561 int ret, status;
1562
1563 ret = my_waitpid (lwpid, &status, __WALL);
1564 if (ret == -1)
1565 {
1566 warning (_("Couldn't reap LWP %d while detaching: %s"),
1567 lwpid, strerror (errno));
1568 }
1569 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1570 {
1571 warning (_("Reaping LWP %d while detaching "
1572 "returned unexpected status 0x%x"),
1573 lwpid, status);
1574 }
1575 }
1576 else
1577 {
1578 error (_("Can't detach %s: %s"),
1579 target_pid_to_str (ptid_of (thread)),
1580 strerror (save_errno));
1581 }
1582 }
1583 else if (debug_threads)
1584 {
1585 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1586 target_pid_to_str (ptid_of (thread)),
1587 strsignal (sig));
1588 }
bd99dc85
PA
1589
1590 delete_lwp (lwp);
ced2dffb
PA
1591}
1592
798a38e8 1593/* Callback for for_each_thread. Detaches from non-leader threads of a
ced2dffb
PA
1594 given process. */
1595
798a38e8
SM
1596static void
1597linux_detach_lwp_callback (thread_info *thread)
ced2dffb 1598{
ced2dffb
PA
1599 /* We don't actually detach from the thread group leader just yet.
1600 If the thread group exits, we must reap the zombie clone lwps
1601 before we're able to reap the leader. */
798a38e8
SM
1602 if (thread->id.pid () == thread->id.lwp ())
1603 return;
ced2dffb 1604
798a38e8 1605 lwp_info *lwp = get_thread_lwp (thread);
ced2dffb 1606 linux_detach_one_lwp (lwp);
6ad8ae5c
DJ
1607}
1608
95954743 1609static int
ef2ddb33 1610linux_detach (process_info *process)
95954743 1611{
ced2dffb 1612 struct lwp_info *main_lwp;
95954743 1613
863d01bd
PA
1614 /* As there's a step over already in progress, let it finish first,
1615 otherwise nesting a stabilize_threads operation on top gets real
1616 messy. */
1617 complete_ongoing_step_over ();
1618
f9e39928
PA
1619 /* Stop all threads before detaching. First, ptrace requires that
1620 the thread is stopped to sucessfully detach. Second, thread_db
1621 may need to uninstall thread event breakpoints from memory, which
1622 only works with a stopped process anyway. */
7984d532 1623 stop_all_lwps (0, NULL);
f9e39928 1624
ca5c370d 1625#ifdef USE_THREAD_DB
8336d594 1626 thread_db_detach (process);
ca5c370d
PA
1627#endif
1628
fa593d66
PA
1629 /* Stabilize threads (move out of jump pads). */
1630 stabilize_threads ();
1631
ced2dffb
PA
1632 /* Detach from the clone lwps first. If the thread group exits just
1633 while we're detaching, we must reap the clone lwps before we're
1634 able to reap the leader. */
ef2ddb33 1635 for_each_thread (process->pid, linux_detach_lwp_callback);
ced2dffb 1636
ef2ddb33 1637 main_lwp = find_lwp_pid (ptid_t (process->pid));
ced2dffb 1638 linux_detach_one_lwp (main_lwp);
8336d594
PA
1639
1640 the_target->mourn (process);
f9e39928
PA
1641
1642 /* Since we presently can only stop all lwps of all processes, we
1643 need to unstop lwps of other processes. */
7984d532 1644 unstop_all_lwps (0, NULL);
f9e39928
PA
1645 return 0;
1646}
1647
1648/* Remove all LWPs that belong to process PROC from the lwp list. */
1649
8336d594
PA
1650static void
1651linux_mourn (struct process_info *process)
1652{
1653 struct process_info_private *priv;
1654
1655#ifdef USE_THREAD_DB
1656 thread_db_mourn (process);
1657#endif
1658
6b2a85da
SM
1659 for_each_thread (process->pid, [] (thread_info *thread)
1660 {
1661 delete_lwp (get_thread_lwp (thread));
1662 });
f9e39928 1663
8336d594 1664 /* Freeing all private data. */
fe978cb0 1665 priv = process->priv;
04ec7890
SM
1666 if (the_low_target.delete_process != NULL)
1667 the_low_target.delete_process (priv->arch_private);
1668 else
1669 gdb_assert (priv->arch_private == NULL);
8336d594 1670 free (priv);
fe978cb0 1671 process->priv = NULL;
505106cd
PA
1672
1673 remove_process (process);
8336d594
PA
1674}
1675
444d6139 1676static void
d105de22 1677linux_join (int pid)
444d6139 1678{
444d6139
PA
1679 int status, ret;
1680
1681 do {
d105de22 1682 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1683 if (WIFEXITED (status) || WIFSIGNALED (status))
1684 break;
1685 } while (ret != -1 || errno != ECHILD);
1686}
1687
6ad8ae5c 1688/* Return nonzero if the given thread is still alive. */
0d62e5e8 1689static int
95954743 1690linux_thread_alive (ptid_t ptid)
0d62e5e8 1691{
95954743
PA
1692 struct lwp_info *lwp = find_lwp_pid (ptid);
1693
1694 /* We assume we always know if a thread exits. If a whole process
1695 exited but we still haven't been able to report it to GDB, we'll
1696 hold on to the last lwp of the dead process. */
1697 if (lwp != NULL)
00db26fa 1698 return !lwp_is_marked_dead (lwp);
0d62e5e8
DJ
1699 else
1700 return 0;
1701}
1702
582511be
PA
1703/* Return 1 if this lwp still has an interesting status pending. If
1704 not (e.g., it had stopped for a breakpoint that is gone), return
1705 false. */
1706
1707static int
1708thread_still_has_status_pending_p (struct thread_info *thread)
1709{
1710 struct lwp_info *lp = get_thread_lwp (thread);
1711
1712 if (!lp->status_pending_p)
1713 return 0;
1714
582511be 1715 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1716 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1717 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1718 {
1719 struct thread_info *saved_thread;
1720 CORE_ADDR pc;
1721 int discard = 0;
1722
1723 gdb_assert (lp->last_status != 0);
1724
1725 pc = get_pc (lp);
1726
1727 saved_thread = current_thread;
1728 current_thread = thread;
1729
1730 if (pc != lp->stop_pc)
1731 {
1732 if (debug_threads)
1733 debug_printf ("PC of %ld changed\n",
1734 lwpid_of (thread));
1735 discard = 1;
1736 }
3e572f71
PA
1737
1738#if !USE_SIGTRAP_SIGINFO
15c66dd6 1739 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1740 && !(*the_low_target.breakpoint_at) (pc))
1741 {
1742 if (debug_threads)
1743 debug_printf ("previous SW breakpoint of %ld gone\n",
1744 lwpid_of (thread));
1745 discard = 1;
1746 }
15c66dd6 1747 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1748 && !hardware_breakpoint_inserted_here (pc))
1749 {
1750 if (debug_threads)
1751 debug_printf ("previous HW breakpoint of %ld gone\n",
1752 lwpid_of (thread));
1753 discard = 1;
1754 }
3e572f71 1755#endif
582511be
PA
1756
1757 current_thread = saved_thread;
1758
1759 if (discard)
1760 {
1761 if (debug_threads)
1762 debug_printf ("discarding pending breakpoint status\n");
1763 lp->status_pending_p = 0;
1764 return 0;
1765 }
1766 }
1767
1768 return 1;
1769}
1770
a681f9c9
PA
1771/* Returns true if LWP is resumed from the client's perspective. */
1772
1773static int
1774lwp_resumed (struct lwp_info *lwp)
1775{
1776 struct thread_info *thread = get_lwp_thread (lwp);
1777
1778 if (thread->last_resume_kind != resume_stop)
1779 return 1;
1780
1781 /* Did gdb send us a `vCont;t', but we haven't reported the
1782 corresponding stop to gdb yet? If so, the thread is still
1783 resumed/running from gdb's perspective. */
1784 if (thread->last_resume_kind == resume_stop
1785 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1786 return 1;
1787
1788 return 0;
1789}
1790
83e1b6c1
SM
1791/* Return true if this lwp has an interesting status pending. */
1792static bool
1793status_pending_p_callback (thread_info *thread, ptid_t ptid)
0d62e5e8 1794{
582511be 1795 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1796
1797 /* Check if we're only interested in events from a specific process
afa8d396 1798 or a specific LWP. */
83e1b6c1 1799 if (!thread->id.matches (ptid))
95954743 1800 return 0;
0d62e5e8 1801
a681f9c9
PA
1802 if (!lwp_resumed (lp))
1803 return 0;
1804
582511be
PA
1805 if (lp->status_pending_p
1806 && !thread_still_has_status_pending_p (thread))
1807 {
1808 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1809 return 0;
1810 }
0d62e5e8 1811
582511be 1812 return lp->status_pending_p;
0d62e5e8
DJ
1813}
1814
95954743
PA
1815struct lwp_info *
1816find_lwp_pid (ptid_t ptid)
1817{
da4ae14a 1818 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
454296a2
SM
1819 {
1820 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
da4ae14a 1821 return thr_arg->id.lwp () == lwp;
454296a2 1822 });
d86d4aaf
DE
1823
1824 if (thread == NULL)
1825 return NULL;
1826
9c80ecd6 1827 return get_thread_lwp (thread);
95954743
PA
1828}
1829
fa96cb38 1830/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1831
fa96cb38
PA
1832static int
1833num_lwps (int pid)
1834{
fa96cb38 1835 int count = 0;
0d62e5e8 1836
4d3bb80e
SM
1837 for_each_thread (pid, [&] (thread_info *thread)
1838 {
9c80ecd6 1839 count++;
4d3bb80e 1840 });
3aee8918 1841
fa96cb38
PA
1842 return count;
1843}
d61ddec4 1844
6d4ee8c6
GB
1845/* See nat/linux-nat.h. */
1846
1847struct lwp_info *
1848iterate_over_lwps (ptid_t filter,
d3a70e03 1849 gdb::function_view<iterate_over_lwps_ftype> callback)
6d4ee8c6 1850{
da4ae14a 1851 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
6d1e5673 1852 {
da4ae14a 1853 lwp_info *lwp = get_thread_lwp (thr_arg);
6d1e5673 1854
d3a70e03 1855 return callback (lwp);
6d1e5673 1856 });
6d4ee8c6 1857
9c80ecd6 1858 if (thread == NULL)
6d4ee8c6
GB
1859 return NULL;
1860
9c80ecd6 1861 return get_thread_lwp (thread);
6d4ee8c6
GB
1862}
1863
fa96cb38
PA
1864/* Detect zombie thread group leaders, and "exit" them. We can't reap
1865 their exits until all other threads in the group have exited. */
c3adc08c 1866
fa96cb38
PA
1867static void
1868check_zombie_leaders (void)
1869{
9179355e
SM
1870 for_each_process ([] (process_info *proc) {
1871 pid_t leader_pid = pid_of (proc);
1872 struct lwp_info *leader_lp;
1873
f2907e49 1874 leader_lp = find_lwp_pid (ptid_t (leader_pid));
9179355e
SM
1875
1876 if (debug_threads)
1877 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1878 "num_lwps=%d, zombie=%d\n",
1879 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1880 linux_proc_pid_is_zombie (leader_pid));
1881
1882 if (leader_lp != NULL && !leader_lp->stopped
1883 /* Check if there are other threads in the group, as we may
1884 have raced with the inferior simply exiting. */
1885 && !last_thread_of_process_p (leader_pid)
1886 && linux_proc_pid_is_zombie (leader_pid))
1887 {
1888 /* A leader zombie can mean one of two things:
1889
1890 - It exited, and there's an exit status pending
1891 available, or only the leader exited (not the whole
1892 program). In the latter case, we can't waitpid the
1893 leader's exit status until all other threads are gone.
1894
1895 - There are 3 or more threads in the group, and a thread
1896 other than the leader exec'd. On an exec, the Linux
1897 kernel destroys all other threads (except the execing
1898 one) in the thread group, and resets the execing thread's
1899 tid to the tgid. No exit notification is sent for the
1900 execing thread -- from the ptracer's perspective, it
1901 appears as though the execing thread just vanishes.
1902 Until we reap all other threads except the leader and the
1903 execing thread, the leader will be zombie, and the
1904 execing thread will be in `D (disc sleep)'. As soon as
1905 all other threads are reaped, the execing thread changes
1906 it's tid to the tgid, and the previous (zombie) leader
1907 vanishes, giving place to the "new" leader. We could try
1908 distinguishing the exit and exec cases, by waiting once
1909 more, and seeing if something comes out, but it doesn't
1910 sound useful. The previous leader _does_ go away, and
1911 we'll re-add the new one once we see the exec event
1912 (which is just the same as what would happen if the
1913 previous leader did exit voluntarily before some other
1914 thread execs). */
1915
1916 if (debug_threads)
1917 debug_printf ("CZL: Thread group leader %d zombie "
1918 "(it exited, or another thread execd).\n",
1919 leader_pid);
1920
1921 delete_lwp (leader_lp);
1922 }
1923 });
fa96cb38 1924}
c3adc08c 1925
a1385b7b
SM
1926/* Callback for `find_thread'. Returns the first LWP that is not
1927 stopped. */
d50171e4 1928
a1385b7b
SM
1929static bool
1930not_stopped_callback (thread_info *thread, ptid_t filter)
fa96cb38 1931{
a1385b7b
SM
1932 if (!thread->id.matches (filter))
1933 return false;
47c0c975 1934
a1385b7b 1935 lwp_info *lwp = get_thread_lwp (thread);
fa96cb38 1936
a1385b7b 1937 return !lwp->stopped;
0d62e5e8 1938}
611cb4a5 1939
863d01bd
PA
1940/* Increment LWP's suspend count. */
1941
1942static void
1943lwp_suspended_inc (struct lwp_info *lwp)
1944{
1945 lwp->suspended++;
1946
1947 if (debug_threads && lwp->suspended > 4)
1948 {
1949 struct thread_info *thread = get_lwp_thread (lwp);
1950
1951 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1952 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1953 }
1954}
1955
1956/* Decrement LWP's suspend count. */
1957
1958static void
1959lwp_suspended_decr (struct lwp_info *lwp)
1960{
1961 lwp->suspended--;
1962
1963 if (lwp->suspended < 0)
1964 {
1965 struct thread_info *thread = get_lwp_thread (lwp);
1966
1967 internal_error (__FILE__, __LINE__,
1968 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1969 lwp->suspended);
1970 }
1971}
1972
219f2f23
PA
1973/* This function should only be called if the LWP got a SIGTRAP.
1974
1975 Handle any tracepoint steps or hits. Return true if a tracepoint
1976 event was handled, 0 otherwise. */
1977
1978static int
1979handle_tracepoints (struct lwp_info *lwp)
1980{
1981 struct thread_info *tinfo = get_lwp_thread (lwp);
1982 int tpoint_related_event = 0;
1983
582511be
PA
1984 gdb_assert (lwp->suspended == 0);
1985
7984d532
PA
1986 /* If this tracepoint hit causes a tracing stop, we'll immediately
1987 uninsert tracepoints. To do this, we temporarily pause all
1988 threads, unpatch away, and then unpause threads. We need to make
1989 sure the unpausing doesn't resume LWP too. */
863d01bd 1990 lwp_suspended_inc (lwp);
7984d532 1991
219f2f23
PA
1992 /* And we need to be sure that any all-threads-stopping doesn't try
1993 to move threads out of the jump pads, as it could deadlock the
1994 inferior (LWP could be in the jump pad, maybe even holding the
1995 lock.) */
1996
1997 /* Do any necessary step collect actions. */
1998 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1999
fa593d66
PA
2000 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2001
219f2f23
PA
2002 /* See if we just hit a tracepoint and do its main collect
2003 actions. */
2004 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2005
863d01bd 2006 lwp_suspended_decr (lwp);
7984d532
PA
2007
2008 gdb_assert (lwp->suspended == 0);
229d26fc
SM
2009 gdb_assert (!stabilizing_threads
2010 || (lwp->collecting_fast_tracepoint
2011 != fast_tpoint_collect_result::not_collecting));
7984d532 2012
219f2f23
PA
2013 if (tpoint_related_event)
2014 {
2015 if (debug_threads)
87ce2a04 2016 debug_printf ("got a tracepoint event\n");
219f2f23
PA
2017 return 1;
2018 }
2019
2020 return 0;
2021}
2022
229d26fc
SM
2023/* Convenience wrapper. Returns information about LWP's fast tracepoint
2024 collection status. */
fa593d66 2025
229d26fc 2026static fast_tpoint_collect_result
fa593d66
PA
2027linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2028 struct fast_tpoint_collect_status *status)
2029{
2030 CORE_ADDR thread_area;
d86d4aaf 2031 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2032
2033 if (the_low_target.get_thread_area == NULL)
229d26fc 2034 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2035
2036 /* Get the thread area address. This is used to recognize which
2037 thread is which when tracing with the in-process agent library.
2038 We don't read anything from the address, and treat it as opaque;
2039 it's the address itself that we assume is unique per-thread. */
d86d4aaf 2040 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
229d26fc 2041 return fast_tpoint_collect_result::not_collecting;
fa593d66
PA
2042
2043 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2044}
2045
2046/* The reason we resume in the caller, is because we want to be able
2047 to pass lwp->status_pending as WSTAT, and we need to clear
2048 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2049 refuses to resume. */
2050
2051static int
2052maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2053{
0bfdf32f 2054 struct thread_info *saved_thread;
fa593d66 2055
0bfdf32f
GB
2056 saved_thread = current_thread;
2057 current_thread = get_lwp_thread (lwp);
fa593d66
PA
2058
2059 if ((wstat == NULL
2060 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2061 && supports_fast_tracepoints ()
58b4daa5 2062 && agent_loaded_p ())
fa593d66
PA
2063 {
2064 struct fast_tpoint_collect_status status;
fa593d66
PA
2065
2066 if (debug_threads)
87ce2a04
DE
2067 debug_printf ("Checking whether LWP %ld needs to move out of the "
2068 "jump pad.\n",
0bfdf32f 2069 lwpid_of (current_thread));
fa593d66 2070
229d26fc
SM
2071 fast_tpoint_collect_result r
2072 = linux_fast_tracepoint_collecting (lwp, &status);
fa593d66
PA
2073
2074 if (wstat == NULL
2075 || (WSTOPSIG (*wstat) != SIGILL
2076 && WSTOPSIG (*wstat) != SIGFPE
2077 && WSTOPSIG (*wstat) != SIGSEGV
2078 && WSTOPSIG (*wstat) != SIGBUS))
2079 {
2080 lwp->collecting_fast_tracepoint = r;
2081
229d26fc 2082 if (r != fast_tpoint_collect_result::not_collecting)
fa593d66 2083 {
229d26fc
SM
2084 if (r == fast_tpoint_collect_result::before_insn
2085 && lwp->exit_jump_pad_bkpt == NULL)
fa593d66
PA
2086 {
2087 /* Haven't executed the original instruction yet.
2088 Set breakpoint there, and wait till it's hit,
2089 then single-step until exiting the jump pad. */
2090 lwp->exit_jump_pad_bkpt
2091 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2092 }
2093
2094 if (debug_threads)
87ce2a04
DE
2095 debug_printf ("Checking whether LWP %ld needs to move out of "
2096 "the jump pad...it does\n",
0bfdf32f
GB
2097 lwpid_of (current_thread));
2098 current_thread = saved_thread;
fa593d66
PA
2099
2100 return 1;
2101 }
2102 }
2103 else
2104 {
2105 /* If we get a synchronous signal while collecting, *and*
2106 while executing the (relocated) original instruction,
2107 reset the PC to point at the tpoint address, before
2108 reporting to GDB. Otherwise, it's an IPA lib bug: just
2109 report the signal to GDB, and pray for the best. */
2110
229d26fc
SM
2111 lwp->collecting_fast_tracepoint
2112 = fast_tpoint_collect_result::not_collecting;
fa593d66 2113
229d26fc 2114 if (r != fast_tpoint_collect_result::not_collecting
fa593d66
PA
2115 && (status.adjusted_insn_addr <= lwp->stop_pc
2116 && lwp->stop_pc < status.adjusted_insn_addr_end))
2117 {
2118 siginfo_t info;
2119 struct regcache *regcache;
2120
2121 /* The si_addr on a few signals references the address
2122 of the faulting instruction. Adjust that as
2123 well. */
2124 if ((WSTOPSIG (*wstat) == SIGILL
2125 || WSTOPSIG (*wstat) == SIGFPE
2126 || WSTOPSIG (*wstat) == SIGBUS
2127 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 2128 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2129 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
2130 /* Final check just to make sure we don't clobber
2131 the siginfo of non-kernel-sent signals. */
2132 && (uintptr_t) info.si_addr == lwp->stop_pc)
2133 {
2134 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 2135 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 2136 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
2137 }
2138
0bfdf32f 2139 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
2140 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2141 lwp->stop_pc = status.tpoint_addr;
2142
2143 /* Cancel any fast tracepoint lock this thread was
2144 holding. */
2145 force_unlock_trace_buffer ();
2146 }
2147
2148 if (lwp->exit_jump_pad_bkpt != NULL)
2149 {
2150 if (debug_threads)
87ce2a04
DE
2151 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2152 "stopping all threads momentarily.\n");
fa593d66
PA
2153
2154 stop_all_lwps (1, lwp);
fa593d66
PA
2155
2156 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2157 lwp->exit_jump_pad_bkpt = NULL;
2158
2159 unstop_all_lwps (1, lwp);
2160
2161 gdb_assert (lwp->suspended >= 0);
2162 }
2163 }
2164 }
2165
2166 if (debug_threads)
87ce2a04
DE
2167 debug_printf ("Checking whether LWP %ld needs to move out of the "
2168 "jump pad...no\n",
0bfdf32f 2169 lwpid_of (current_thread));
0cccb683 2170
0bfdf32f 2171 current_thread = saved_thread;
fa593d66
PA
2172 return 0;
2173}
2174
2175/* Enqueue one signal in the "signals to report later when out of the
2176 jump pad" list. */
2177
2178static void
2179enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2180{
2181 struct pending_signals *p_sig;
d86d4aaf 2182 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
2183
2184 if (debug_threads)
87ce2a04 2185 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 2186 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2187
2188 if (debug_threads)
2189 {
2190 struct pending_signals *sig;
2191
2192 for (sig = lwp->pending_signals_to_report;
2193 sig != NULL;
2194 sig = sig->prev)
87ce2a04
DE
2195 debug_printf (" Already queued %d\n",
2196 sig->signal);
fa593d66 2197
87ce2a04 2198 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
2199 }
2200
1a981360
PA
2201 /* Don't enqueue non-RT signals if they are already in the deferred
2202 queue. (SIGSTOP being the easiest signal to see ending up here
2203 twice) */
2204 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2205 {
2206 struct pending_signals *sig;
2207
2208 for (sig = lwp->pending_signals_to_report;
2209 sig != NULL;
2210 sig = sig->prev)
2211 {
2212 if (sig->signal == WSTOPSIG (*wstat))
2213 {
2214 if (debug_threads)
87ce2a04
DE
2215 debug_printf ("Not requeuing already queued non-RT signal %d"
2216 " for LWP %ld\n",
2217 sig->signal,
d86d4aaf 2218 lwpid_of (thread));
1a981360
PA
2219 return;
2220 }
2221 }
2222 }
2223
8d749320 2224 p_sig = XCNEW (struct pending_signals);
fa593d66
PA
2225 p_sig->prev = lwp->pending_signals_to_report;
2226 p_sig->signal = WSTOPSIG (*wstat);
8d749320 2227
d86d4aaf 2228 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2229 &p_sig->info);
fa593d66
PA
2230
2231 lwp->pending_signals_to_report = p_sig;
2232}
2233
2234/* Dequeue one signal from the "signals to report later when out of
2235 the jump pad" list. */
2236
2237static int
2238dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2239{
d86d4aaf
DE
2240 struct thread_info *thread = get_lwp_thread (lwp);
2241
fa593d66
PA
2242 if (lwp->pending_signals_to_report != NULL)
2243 {
2244 struct pending_signals **p_sig;
2245
2246 p_sig = &lwp->pending_signals_to_report;
2247 while ((*p_sig)->prev != NULL)
2248 p_sig = &(*p_sig)->prev;
2249
2250 *wstat = W_STOPCODE ((*p_sig)->signal);
2251 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 2252 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 2253 &(*p_sig)->info);
fa593d66
PA
2254 free (*p_sig);
2255 *p_sig = NULL;
2256
2257 if (debug_threads)
87ce2a04 2258 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 2259 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
2260
2261 if (debug_threads)
2262 {
2263 struct pending_signals *sig;
2264
2265 for (sig = lwp->pending_signals_to_report;
2266 sig != NULL;
2267 sig = sig->prev)
87ce2a04
DE
2268 debug_printf (" Still queued %d\n",
2269 sig->signal);
fa593d66 2270
87ce2a04 2271 debug_printf (" (no more queued signals)\n");
fa593d66
PA
2272 }
2273
2274 return 1;
2275 }
2276
2277 return 0;
2278}
2279
582511be
PA
2280/* Fetch the possibly triggered data watchpoint info and store it in
2281 CHILD.
d50171e4 2282
582511be
PA
2283 On some archs, like x86, that use debug registers to set
2284 watchpoints, it's possible that the way to know which watched
2285 address trapped, is to check the register that is used to select
2286 which address to watch. Problem is, between setting the watchpoint
2287 and reading back which data address trapped, the user may change
2288 the set of watchpoints, and, as a consequence, GDB changes the
2289 debug registers in the inferior. To avoid reading back a stale
2290 stopped-data-address when that happens, we cache in LP the fact
2291 that a watchpoint trapped, and the corresponding data address, as
2292 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2293 registers meanwhile, we have the cached data we can rely on. */
d50171e4 2294
582511be
PA
2295static int
2296check_stopped_by_watchpoint (struct lwp_info *child)
2297{
2298 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 2299 {
582511be 2300 struct thread_info *saved_thread;
d50171e4 2301
582511be
PA
2302 saved_thread = current_thread;
2303 current_thread = get_lwp_thread (child);
2304
2305 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2306 {
15c66dd6 2307 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2308
2309 if (the_low_target.stopped_data_address != NULL)
2310 child->stopped_data_address
2311 = the_low_target.stopped_data_address ();
2312 else
2313 child->stopped_data_address = 0;
d50171e4
PA
2314 }
2315
0bfdf32f 2316 current_thread = saved_thread;
d50171e4
PA
2317 }
2318
15c66dd6 2319 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2320}
2321
de0d863e
DB
2322/* Return the ptrace options that we want to try to enable. */
2323
2324static int
2325linux_low_ptrace_options (int attached)
2326{
c12a5089 2327 client_state &cs = get_client_state ();
de0d863e
DB
2328 int options = 0;
2329
2330 if (!attached)
2331 options |= PTRACE_O_EXITKILL;
2332
c12a5089 2333 if (cs.report_fork_events)
de0d863e
DB
2334 options |= PTRACE_O_TRACEFORK;
2335
c12a5089 2336 if (cs.report_vfork_events)
c269dbdb
DB
2337 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2338
c12a5089 2339 if (cs.report_exec_events)
94585166
DB
2340 options |= PTRACE_O_TRACEEXEC;
2341
82075af2
JS
2342 options |= PTRACE_O_TRACESYSGOOD;
2343
de0d863e
DB
2344 return options;
2345}
2346
fa96cb38
PA
2347/* Do low-level handling of the event, and check if we should go on
2348 and pass it to caller code. Return the affected lwp if we are, or
2349 NULL otherwise. */
2350
2351static struct lwp_info *
582511be 2352linux_low_filter_event (int lwpid, int wstat)
fa96cb38 2353{
c12a5089 2354 client_state &cs = get_client_state ();
fa96cb38
PA
2355 struct lwp_info *child;
2356 struct thread_info *thread;
582511be 2357 int have_stop_pc = 0;
fa96cb38 2358
f2907e49 2359 child = find_lwp_pid (ptid_t (lwpid));
fa96cb38 2360
94585166
DB
2361 /* Check for stop events reported by a process we didn't already
2362 know about - anything not already in our LWP list.
2363
2364 If we're expecting to receive stopped processes after
2365 fork, vfork, and clone events, then we'll just add the
2366 new one to our list and go back to waiting for the event
2367 to be reported - the stopped process might be returned
2368 from waitpid before or after the event is.
2369
2370 But note the case of a non-leader thread exec'ing after the
2371 leader having exited, and gone from our lists (because
2372 check_zombie_leaders deleted it). The non-leader thread
2373 changes its tid to the tgid. */
2374
2375 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2376 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2377 {
2378 ptid_t child_ptid;
2379
2380 /* A multi-thread exec after we had seen the leader exiting. */
2381 if (debug_threads)
2382 {
2383 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2384 "after exec.\n", lwpid);
2385 }
2386
fd79271b 2387 child_ptid = ptid_t (lwpid, lwpid, 0);
94585166
DB
2388 child = add_lwp (child_ptid);
2389 child->stopped = 1;
2390 current_thread = child->thread;
2391 }
2392
fa96cb38
PA
2393 /* If we didn't find a process, one of two things presumably happened:
2394 - A process we started and then detached from has exited. Ignore it.
2395 - A process we are controlling has forked and the new child's stop
2396 was reported to us by the kernel. Save its PID. */
2397 if (child == NULL && WIFSTOPPED (wstat))
2398 {
2399 add_to_pid_list (&stopped_pids, lwpid, wstat);
2400 return NULL;
2401 }
2402 else if (child == NULL)
2403 return NULL;
2404
2405 thread = get_lwp_thread (child);
2406
2407 child->stopped = 1;
2408
2409 child->last_status = wstat;
2410
582511be
PA
2411 /* Check if the thread has exited. */
2412 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2413 {
2414 if (debug_threads)
2415 debug_printf ("LLFE: %d exited.\n", lwpid);
f50bf8e5
YQ
2416
2417 if (finish_step_over (child))
2418 {
2419 /* Unsuspend all other LWPs, and set them back running again. */
2420 unsuspend_all_lwps (child);
2421 }
2422
65706a29
PA
2423 /* If there is at least one more LWP, then the exit signal was
2424 not the end of the debugged application and should be
2425 ignored, unless GDB wants to hear about thread exits. */
c12a5089 2426 if (cs.report_thread_events
65706a29 2427 || last_thread_of_process_p (pid_of (thread)))
582511be 2428 {
65706a29
PA
2429 /* Since events are serialized to GDB core, and we can't
2430 report this one right now. Leave the status pending for
2431 the next time we're able to report it. */
2432 mark_lwp_dead (child, wstat);
2433 return child;
582511be
PA
2434 }
2435 else
2436 {
65706a29
PA
2437 delete_lwp (child);
2438 return NULL;
582511be
PA
2439 }
2440 }
2441
2442 gdb_assert (WIFSTOPPED (wstat));
2443
fa96cb38
PA
2444 if (WIFSTOPPED (wstat))
2445 {
2446 struct process_info *proc;
2447
c06cbd92 2448 /* Architecture-specific setup after inferior is running. */
fa96cb38 2449 proc = find_process_pid (pid_of (thread));
c06cbd92 2450 if (proc->tdesc == NULL)
fa96cb38 2451 {
c06cbd92
YQ
2452 if (proc->attached)
2453 {
c06cbd92
YQ
2454 /* This needs to happen after we have attached to the
2455 inferior and it is stopped for the first time, but
2456 before we access any inferior registers. */
94585166 2457 linux_arch_setup_thread (thread);
c06cbd92
YQ
2458 }
2459 else
2460 {
2461 /* The process is started, but GDBserver will do
2462 architecture-specific setup after the program stops at
2463 the first instruction. */
2464 child->status_pending_p = 1;
2465 child->status_pending = wstat;
2466 return child;
2467 }
fa96cb38
PA
2468 }
2469 }
2470
fa96cb38
PA
2471 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2472 {
beed38b8 2473 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2474 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2475
de0d863e 2476 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2477 child->must_set_ptrace_flags = 0;
2478 }
2479
82075af2
JS
2480 /* Always update syscall_state, even if it will be filtered later. */
2481 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2482 {
2483 child->syscall_state
2484 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2485 ? TARGET_WAITKIND_SYSCALL_RETURN
2486 : TARGET_WAITKIND_SYSCALL_ENTRY);
2487 }
2488 else
2489 {
2490 /* Almost all other ptrace-stops are known to be outside of system
2491 calls, with further exceptions in handle_extended_wait. */
2492 child->syscall_state = TARGET_WAITKIND_IGNORE;
2493 }
2494
e7ad2f14
PA
2495 /* Be careful to not overwrite stop_pc until save_stop_reason is
2496 called. */
fa96cb38 2497 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2498 && linux_is_extended_waitstatus (wstat))
fa96cb38 2499 {
582511be 2500 child->stop_pc = get_pc (child);
94585166 2501 if (handle_extended_wait (&child, wstat))
de0d863e
DB
2502 {
2503 /* The event has been handled, so just return without
2504 reporting it. */
2505 return NULL;
2506 }
fa96cb38
PA
2507 }
2508
80aea927 2509 if (linux_wstatus_maybe_breakpoint (wstat))
582511be 2510 {
e7ad2f14 2511 if (save_stop_reason (child))
582511be
PA
2512 have_stop_pc = 1;
2513 }
2514
2515 if (!have_stop_pc)
2516 child->stop_pc = get_pc (child);
2517
fa96cb38
PA
2518 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2519 && child->stop_expected)
2520 {
2521 if (debug_threads)
2522 debug_printf ("Expected stop.\n");
2523 child->stop_expected = 0;
2524
2525 if (thread->last_resume_kind == resume_stop)
2526 {
2527 /* We want to report the stop to the core. Treat the
2528 SIGSTOP as a normal event. */
2bf6fb9d
PA
2529 if (debug_threads)
2530 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2531 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2532 }
2533 else if (stopping_threads != NOT_STOPPING_THREADS)
2534 {
2535 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2536 pending. */
2bf6fb9d
PA
2537 if (debug_threads)
2538 debug_printf ("LLW: SIGSTOP caught for %s "
2539 "while stopping threads.\n",
2540 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2541 return NULL;
2542 }
2543 else
2544 {
2bf6fb9d
PA
2545 /* This is a delayed SIGSTOP. Filter out the event. */
2546 if (debug_threads)
2547 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2548 child->stepping ? "step" : "continue",
2549 target_pid_to_str (ptid_of (thread)));
2550
fa96cb38
PA
2551 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2552 return NULL;
2553 }
2554 }
2555
582511be
PA
2556 child->status_pending_p = 1;
2557 child->status_pending = wstat;
fa96cb38
PA
2558 return child;
2559}
2560
f79b145d
YQ
2561/* Return true if THREAD is doing hardware single step. */
2562
2563static int
2564maybe_hw_step (struct thread_info *thread)
2565{
2566 if (can_hardware_single_step ())
2567 return 1;
2568 else
2569 {
3b9a79ef 2570 /* GDBserver must insert single-step breakpoint for software
f79b145d 2571 single step. */
3b9a79ef 2572 gdb_assert (has_single_step_breakpoints (thread));
f79b145d
YQ
2573 return 0;
2574 }
2575}
2576
20ba1ce6
PA
2577/* Resume LWPs that are currently stopped without any pending status
2578 to report, but are resumed from the core's perspective. */
2579
2580static void
9c80ecd6 2581resume_stopped_resumed_lwps (thread_info *thread)
20ba1ce6 2582{
20ba1ce6
PA
2583 struct lwp_info *lp = get_thread_lwp (thread);
2584
2585 if (lp->stopped
863d01bd 2586 && !lp->suspended
20ba1ce6 2587 && !lp->status_pending_p
20ba1ce6
PA
2588 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2589 {
8901d193
YQ
2590 int step = 0;
2591
2592 if (thread->last_resume_kind == resume_step)
2593 step = maybe_hw_step (thread);
20ba1ce6
PA
2594
2595 if (debug_threads)
2596 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2597 target_pid_to_str (ptid_of (thread)),
2598 paddress (lp->stop_pc),
2599 step);
2600
2601 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2602 }
2603}
2604
fa96cb38
PA
2605/* Wait for an event from child(ren) WAIT_PTID, and return any that
2606 match FILTER_PTID (leaving others pending). The PTIDs can be:
2607 minus_one_ptid, to specify any child; a pid PTID, specifying all
2608 lwps of a thread group; or a PTID representing a single lwp. Store
2609 the stop status through the status pointer WSTAT. OPTIONS is
2610 passed to the waitpid call. Return 0 if no event was found and
2611 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2612 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2613
0d62e5e8 2614static int
fa96cb38
PA
2615linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2616 int *wstatp, int options)
0d62e5e8 2617{
d86d4aaf 2618 struct thread_info *event_thread;
d50171e4 2619 struct lwp_info *event_child, *requested_child;
fa96cb38 2620 sigset_t block_mask, prev_mask;
d50171e4 2621
fa96cb38 2622 retry:
d86d4aaf
DE
2623 /* N.B. event_thread points to the thread_info struct that contains
2624 event_child. Keep them in sync. */
2625 event_thread = NULL;
d50171e4
PA
2626 event_child = NULL;
2627 requested_child = NULL;
0d62e5e8 2628
95954743 2629 /* Check for a lwp with a pending status. */
bd99dc85 2630
d7e15655 2631 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
0d62e5e8 2632 {
83e1b6c1
SM
2633 event_thread = find_thread_in_random ([&] (thread_info *thread)
2634 {
2635 return status_pending_p_callback (thread, filter_ptid);
2636 });
2637
d86d4aaf
DE
2638 if (event_thread != NULL)
2639 event_child = get_thread_lwp (event_thread);
2640 if (debug_threads && event_thread)
2641 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2642 }
d7e15655 2643 else if (filter_ptid != null_ptid)
0d62e5e8 2644 {
fa96cb38 2645 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2646
bde24c0a 2647 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66 2648 && requested_child->status_pending_p
229d26fc
SM
2649 && (requested_child->collecting_fast_tracepoint
2650 != fast_tpoint_collect_result::not_collecting))
fa593d66
PA
2651 {
2652 enqueue_one_deferred_signal (requested_child,
2653 &requested_child->status_pending);
2654 requested_child->status_pending_p = 0;
2655 requested_child->status_pending = 0;
2656 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2657 }
2658
2659 if (requested_child->suspended
2660 && requested_child->status_pending_p)
38e08fca
GB
2661 {
2662 internal_error (__FILE__, __LINE__,
2663 "requesting an event out of a"
2664 " suspended child?");
2665 }
fa593d66 2666
d50171e4 2667 if (requested_child->status_pending_p)
d86d4aaf
DE
2668 {
2669 event_child = requested_child;
2670 event_thread = get_lwp_thread (event_child);
2671 }
0d62e5e8 2672 }
611cb4a5 2673
0d62e5e8
DJ
2674 if (event_child != NULL)
2675 {
bd99dc85 2676 if (debug_threads)
87ce2a04 2677 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2678 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2679 *wstatp = event_child->status_pending;
bd99dc85
PA
2680 event_child->status_pending_p = 0;
2681 event_child->status_pending = 0;
0bfdf32f 2682 current_thread = event_thread;
d86d4aaf 2683 return lwpid_of (event_thread);
0d62e5e8
DJ
2684 }
2685
fa96cb38
PA
2686 /* But if we don't find a pending event, we'll have to wait.
2687
2688 We only enter this loop if no process has a pending wait status.
2689 Thus any action taken in response to a wait status inside this
2690 loop is responding as soon as we detect the status, not after any
2691 pending events. */
d8301ad1 2692
fa96cb38
PA
2693 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2694 all signals while here. */
2695 sigfillset (&block_mask);
2696 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2697
582511be
PA
2698 /* Always pull all events out of the kernel. We'll randomly select
2699 an event LWP out of all that have events, to prevent
2700 starvation. */
fa96cb38 2701 while (event_child == NULL)
0d62e5e8 2702 {
fa96cb38 2703 pid_t ret = 0;
0d62e5e8 2704
fa96cb38
PA
2705 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2706 quirks:
0d62e5e8 2707
fa96cb38
PA
2708 - If the thread group leader exits while other threads in the
2709 thread group still exist, waitpid(TGID, ...) hangs. That
2710 waitpid won't return an exit status until the other threads
2711 in the group are reaped.
611cb4a5 2712
fa96cb38
PA
2713 - When a non-leader thread execs, that thread just vanishes
2714 without reporting an exit (so we'd hang if we waited for it
2715 explicitly in that case). The exec event is reported to
94585166 2716 the TGID pid. */
fa96cb38
PA
2717 errno = 0;
2718 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2719
fa96cb38
PA
2720 if (debug_threads)
2721 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2722 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2723
fa96cb38 2724 if (ret > 0)
0d62e5e8 2725 {
89be2091 2726 if (debug_threads)
bd99dc85 2727 {
fa96cb38
PA
2728 debug_printf ("LLW: waitpid %ld received %s\n",
2729 (long) ret, status_to_str (*wstatp));
bd99dc85 2730 }
89be2091 2731
582511be
PA
2732 /* Filter all events. IOW, leave all events pending. We'll
2733 randomly select an event LWP out of all that have events
2734 below. */
2735 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2736 /* Retry until nothing comes out of waitpid. A single
2737 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2738 continue;
2739 }
2740
20ba1ce6
PA
2741 /* Now that we've pulled all events out of the kernel, resume
2742 LWPs that don't have an interesting event to report. */
2743 if (stopping_threads == NOT_STOPPING_THREADS)
f0045347 2744 for_each_thread (resume_stopped_resumed_lwps);
20ba1ce6
PA
2745
2746 /* ... and find an LWP with a status to report to the core, if
2747 any. */
83e1b6c1
SM
2748 event_thread = find_thread_in_random ([&] (thread_info *thread)
2749 {
2750 return status_pending_p_callback (thread, filter_ptid);
2751 });
2752
582511be
PA
2753 if (event_thread != NULL)
2754 {
2755 event_child = get_thread_lwp (event_thread);
2756 *wstatp = event_child->status_pending;
2757 event_child->status_pending_p = 0;
2758 event_child->status_pending = 0;
2759 break;
2760 }
2761
fa96cb38
PA
2762 /* Check for zombie thread group leaders. Those can't be reaped
2763 until all other threads in the thread group are. */
2764 check_zombie_leaders ();
2765
a1385b7b
SM
2766 auto not_stopped = [&] (thread_info *thread)
2767 {
2768 return not_stopped_callback (thread, wait_ptid);
2769 };
2770
fa96cb38
PA
2771 /* If there are no resumed children left in the set of LWPs we
2772 want to wait for, bail. We can't just block in
2773 waitpid/sigsuspend, because lwps might have been left stopped
2774 in trace-stop state, and we'd be stuck forever waiting for
2775 their status to change (which would only happen if we resumed
2776 them). Even if WNOHANG is set, this return code is preferred
2777 over 0 (below), as it is more detailed. */
a1385b7b 2778 if (find_thread (not_stopped) == NULL)
a6dbe5df 2779 {
fa96cb38
PA
2780 if (debug_threads)
2781 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2782 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2783 return -1;
a6dbe5df
PA
2784 }
2785
fa96cb38
PA
2786 /* No interesting event to report to the caller. */
2787 if ((options & WNOHANG))
24a09b5f 2788 {
fa96cb38
PA
2789 if (debug_threads)
2790 debug_printf ("WNOHANG set, no event found\n");
2791
2792 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2793 return 0;
24a09b5f
DJ
2794 }
2795
fa96cb38
PA
2796 /* Block until we get an event reported with SIGCHLD. */
2797 if (debug_threads)
2798 debug_printf ("sigsuspend'ing\n");
d50171e4 2799
fa96cb38
PA
2800 sigsuspend (&prev_mask);
2801 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2802 goto retry;
2803 }
d50171e4 2804
fa96cb38 2805 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2806
0bfdf32f 2807 current_thread = event_thread;
d50171e4 2808
fa96cb38
PA
2809 return lwpid_of (event_thread);
2810}
2811
2812/* Wait for an event from child(ren) PTID. PTIDs can be:
2813 minus_one_ptid, to specify any child; a pid PTID, specifying all
2814 lwps of a thread group; or a PTID representing a single lwp. Store
2815 the stop status through the status pointer WSTAT. OPTIONS is
2816 passed to the waitpid call. Return 0 if no event was found and
2817 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2818 was found. Return the PID of the stopped child otherwise. */
2819
2820static int
2821linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2822{
2823 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2824}
2825
6bf5e0ba
PA
2826/* Select one LWP out of those that have events pending. */
2827
2828static void
2829select_event_lwp (struct lwp_info **orig_lp)
2830{
6bf5e0ba 2831 int random_selector;
582511be
PA
2832 struct thread_info *event_thread = NULL;
2833
2834 /* In all-stop, give preference to the LWP that is being
2835 single-stepped. There will be at most one, and it's the LWP that
2836 the core is most interested in. If we didn't do this, then we'd
2837 have to handle pending step SIGTRAPs somehow in case the core
2838 later continues the previously-stepped thread, otherwise we'd
2839 report the pending SIGTRAP, and the core, not having stepped the
2840 thread, wouldn't understand what the trap was for, and therefore
2841 would report it to the user as a random signal. */
2842 if (!non_stop)
6bf5e0ba 2843 {
39a64da5
SM
2844 event_thread = find_thread ([] (thread_info *thread)
2845 {
2846 lwp_info *lp = get_thread_lwp (thread);
2847
2848 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2849 && thread->last_resume_kind == resume_step
2850 && lp->status_pending_p);
2851 });
2852
582511be
PA
2853 if (event_thread != NULL)
2854 {
2855 if (debug_threads)
2856 debug_printf ("SEL: Select single-step %s\n",
2857 target_pid_to_str (ptid_of (event_thread)));
2858 }
6bf5e0ba 2859 }
582511be 2860 if (event_thread == NULL)
6bf5e0ba
PA
2861 {
2862 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2863 which have had events. */
6bf5e0ba 2864
b90fc188 2865 /* First see how many events we have. */
39a64da5
SM
2866 int num_events = 0;
2867 for_each_thread ([&] (thread_info *thread)
2868 {
2869 lwp_info *lp = get_thread_lwp (thread);
2870
2871 /* Count only resumed LWPs that have an event pending. */
2872 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2873 && lp->status_pending_p)
2874 num_events++;
2875 });
8bf3b159 2876 gdb_assert (num_events > 0);
6bf5e0ba 2877
b90fc188
PA
2878 /* Now randomly pick a LWP out of those that have had
2879 events. */
6bf5e0ba
PA
2880 random_selector = (int)
2881 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2882
2883 if (debug_threads && num_events > 1)
87ce2a04
DE
2884 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2885 num_events, random_selector);
6bf5e0ba 2886
39a64da5
SM
2887 event_thread = find_thread ([&] (thread_info *thread)
2888 {
2889 lwp_info *lp = get_thread_lwp (thread);
2890
2891 /* Select only resumed LWPs that have an event pending. */
2892 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2893 && lp->status_pending_p)
2894 if (random_selector-- == 0)
2895 return true;
2896
2897 return false;
2898 });
6bf5e0ba
PA
2899 }
2900
d86d4aaf 2901 if (event_thread != NULL)
6bf5e0ba 2902 {
d86d4aaf
DE
2903 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2904
6bf5e0ba
PA
2905 /* Switch the event LWP. */
2906 *orig_lp = event_lp;
2907 }
2908}
2909
7984d532
PA
2910/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2911 NULL. */
2912
2913static void
2914unsuspend_all_lwps (struct lwp_info *except)
2915{
139720c5
SM
2916 for_each_thread ([&] (thread_info *thread)
2917 {
2918 lwp_info *lwp = get_thread_lwp (thread);
2919
2920 if (lwp != except)
2921 lwp_suspended_decr (lwp);
2922 });
7984d532
PA
2923}
2924
9c80ecd6 2925static void move_out_of_jump_pad_callback (thread_info *thread);
fcb056a5 2926static bool stuck_in_jump_pad_callback (thread_info *thread);
5a6b0a41 2927static bool lwp_running (thread_info *thread);
fa593d66
PA
2928static ptid_t linux_wait_1 (ptid_t ptid,
2929 struct target_waitstatus *ourstatus,
2930 int target_options);
2931
2932/* Stabilize threads (move out of jump pads).
2933
2934 If a thread is midway collecting a fast tracepoint, we need to
2935 finish the collection and move it out of the jump pad before
2936 reporting the signal.
2937
2938 This avoids recursion while collecting (when a signal arrives
2939 midway, and the signal handler itself collects), which would trash
2940 the trace buffer. In case the user set a breakpoint in a signal
2941 handler, this avoids the backtrace showing the jump pad, etc..
2942 Most importantly, there are certain things we can't do safely if
2943 threads are stopped in a jump pad (or in its callee's). For
2944 example:
2945
2946 - starting a new trace run. A thread still collecting the
2947 previous run, could trash the trace buffer when resumed. The trace
2948 buffer control structures would have been reset but the thread had
2949 no way to tell. The thread could even midway memcpy'ing to the
2950 buffer, which would mean that when resumed, it would clobber the
2951 trace buffer that had been set for a new run.
2952
2953 - we can't rewrite/reuse the jump pads for new tracepoints
2954 safely. Say you do tstart while a thread is stopped midway while
2955 collecting. When the thread is later resumed, it finishes the
2956 collection, and returns to the jump pad, to execute the original
2957 instruction that was under the tracepoint jump at the time the
2958 older run had been started. If the jump pad had been rewritten
2959 since for something else in the new run, the thread would now
2960 execute the wrong / random instructions. */
2961
2962static void
2963linux_stabilize_threads (void)
2964{
fcb056a5 2965 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
fa593d66 2966
d86d4aaf 2967 if (thread_stuck != NULL)
fa593d66 2968 {
b4d51a55 2969 if (debug_threads)
87ce2a04 2970 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2971 lwpid_of (thread_stuck));
fa593d66
PA
2972 return;
2973 }
2974
fcb056a5 2975 thread_info *saved_thread = current_thread;
fa593d66
PA
2976
2977 stabilizing_threads = 1;
2978
2979 /* Kick 'em all. */
f0045347 2980 for_each_thread (move_out_of_jump_pad_callback);
fa593d66
PA
2981
2982 /* Loop until all are stopped out of the jump pads. */
5a6b0a41 2983 while (find_thread (lwp_running) != NULL)
fa593d66
PA
2984 {
2985 struct target_waitstatus ourstatus;
2986 struct lwp_info *lwp;
fa593d66
PA
2987 int wstat;
2988
2989 /* Note that we go through the full wait even loop. While
2990 moving threads out of jump pad, we need to be able to step
2991 over internal breakpoints and such. */
32fcada3 2992 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2993
2994 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2995 {
0bfdf32f 2996 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2997
2998 /* Lock it. */
863d01bd 2999 lwp_suspended_inc (lwp);
fa593d66 3000
a493e3e2 3001 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 3002 || current_thread->last_resume_kind == resume_stop)
fa593d66 3003 {
2ea28649 3004 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
3005 enqueue_one_deferred_signal (lwp, &wstat);
3006 }
3007 }
3008 }
3009
fcdad592 3010 unsuspend_all_lwps (NULL);
fa593d66
PA
3011
3012 stabilizing_threads = 0;
3013
0bfdf32f 3014 current_thread = saved_thread;
fa593d66 3015
b4d51a55 3016 if (debug_threads)
fa593d66 3017 {
fcb056a5
SM
3018 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3019
d86d4aaf 3020 if (thread_stuck != NULL)
87ce2a04 3021 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 3022 lwpid_of (thread_stuck));
fa593d66
PA
3023 }
3024}
3025
582511be
PA
3026/* Convenience function that is called when the kernel reports an
3027 event that is not passed out to GDB. */
3028
3029static ptid_t
3030ignore_event (struct target_waitstatus *ourstatus)
3031{
3032 /* If we got an event, there may still be others, as a single
3033 SIGCHLD can indicate more than one child stopped. This forces
3034 another target_wait call. */
3035 async_file_mark ();
3036
3037 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3038 return null_ptid;
3039}
3040
65706a29
PA
3041/* Convenience function that is called when the kernel reports an exit
3042 event. This decides whether to report the event to GDB as a
3043 process exit event, a thread exit event, or to suppress the
3044 event. */
3045
3046static ptid_t
3047filter_exit_event (struct lwp_info *event_child,
3048 struct target_waitstatus *ourstatus)
3049{
c12a5089 3050 client_state &cs = get_client_state ();
65706a29
PA
3051 struct thread_info *thread = get_lwp_thread (event_child);
3052 ptid_t ptid = ptid_of (thread);
3053
3054 if (!last_thread_of_process_p (pid_of (thread)))
3055 {
c12a5089 3056 if (cs.report_thread_events)
65706a29
PA
3057 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3058 else
3059 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3060
3061 delete_lwp (event_child);
3062 }
3063 return ptid;
3064}
3065
82075af2
JS
3066/* Returns 1 if GDB is interested in any event_child syscalls. */
3067
3068static int
3069gdb_catching_syscalls_p (struct lwp_info *event_child)
3070{
3071 struct thread_info *thread = get_lwp_thread (event_child);
3072 struct process_info *proc = get_thread_process (thread);
3073
f27866ba 3074 return !proc->syscalls_to_catch.empty ();
82075af2
JS
3075}
3076
3077/* Returns 1 if GDB is interested in the event_child syscall.
3078 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3079
3080static int
3081gdb_catch_this_syscall_p (struct lwp_info *event_child)
3082{
4cc32bec 3083 int sysno;
82075af2
JS
3084 struct thread_info *thread = get_lwp_thread (event_child);
3085 struct process_info *proc = get_thread_process (thread);
3086
f27866ba 3087 if (proc->syscalls_to_catch.empty ())
82075af2
JS
3088 return 0;
3089
f27866ba 3090 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
82075af2
JS
3091 return 1;
3092
4cc32bec 3093 get_syscall_trapinfo (event_child, &sysno);
f27866ba
SM
3094
3095 for (int iter : proc->syscalls_to_catch)
82075af2
JS
3096 if (iter == sysno)
3097 return 1;
3098
3099 return 0;
3100}
3101
0d62e5e8 3102/* Wait for process, returns status. */
da6d8c04 3103
95954743
PA
3104static ptid_t
3105linux_wait_1 (ptid_t ptid,
3106 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 3107{
c12a5089 3108 client_state &cs = get_client_state ();
e5f1222d 3109 int w;
fc7238bb 3110 struct lwp_info *event_child;
bd99dc85 3111 int options;
bd99dc85 3112 int pid;
6bf5e0ba
PA
3113 int step_over_finished;
3114 int bp_explains_trap;
3115 int maybe_internal_trap;
3116 int report_to_gdb;
219f2f23 3117 int trace_event;
c2d6af84 3118 int in_step_range;
f2faf941 3119 int any_resumed;
bd99dc85 3120
87ce2a04
DE
3121 if (debug_threads)
3122 {
3123 debug_enter ();
3124 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3125 }
3126
bd99dc85
PA
3127 /* Translate generic target options into linux options. */
3128 options = __WALL;
3129 if (target_options & TARGET_WNOHANG)
3130 options |= WNOHANG;
0d62e5e8 3131
fa593d66
PA
3132 bp_explains_trap = 0;
3133 trace_event = 0;
c2d6af84 3134 in_step_range = 0;
bd99dc85
PA
3135 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3136
83e1b6c1
SM
3137 auto status_pending_p_any = [&] (thread_info *thread)
3138 {
3139 return status_pending_p_callback (thread, minus_one_ptid);
3140 };
3141
a1385b7b
SM
3142 auto not_stopped = [&] (thread_info *thread)
3143 {
3144 return not_stopped_callback (thread, minus_one_ptid);
3145 };
3146
f2faf941 3147 /* Find a resumed LWP, if any. */
83e1b6c1 3148 if (find_thread (status_pending_p_any) != NULL)
f2faf941 3149 any_resumed = 1;
a1385b7b 3150 else if (find_thread (not_stopped) != NULL)
f2faf941
PA
3151 any_resumed = 1;
3152 else
3153 any_resumed = 0;
3154
d7e15655 3155 if (step_over_bkpt == null_ptid)
6bf5e0ba
PA
3156 pid = linux_wait_for_event (ptid, &w, options);
3157 else
3158 {
3159 if (debug_threads)
87ce2a04
DE
3160 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3161 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
3162 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3163 }
3164
f2faf941 3165 if (pid == 0 || (pid == -1 && !any_resumed))
87ce2a04 3166 {
fa96cb38
PA
3167 gdb_assert (target_options & TARGET_WNOHANG);
3168
87ce2a04
DE
3169 if (debug_threads)
3170 {
fa96cb38
PA
3171 debug_printf ("linux_wait_1 ret = null_ptid, "
3172 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
3173 debug_exit ();
3174 }
fa96cb38
PA
3175
3176 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
3177 return null_ptid;
3178 }
fa96cb38
PA
3179 else if (pid == -1)
3180 {
3181 if (debug_threads)
3182 {
3183 debug_printf ("linux_wait_1 ret = null_ptid, "
3184 "TARGET_WAITKIND_NO_RESUMED\n");
3185 debug_exit ();
3186 }
bd99dc85 3187
fa96cb38
PA
3188 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3189 return null_ptid;
3190 }
0d62e5e8 3191
0bfdf32f 3192 event_child = get_thread_lwp (current_thread);
0d62e5e8 3193
fa96cb38
PA
3194 /* linux_wait_for_event only returns an exit status for the last
3195 child of a process. Report it. */
3196 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 3197 {
fa96cb38 3198 if (WIFEXITED (w))
0d62e5e8 3199 {
fa96cb38
PA
3200 ourstatus->kind = TARGET_WAITKIND_EXITED;
3201 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 3202
fa96cb38 3203 if (debug_threads)
bd99dc85 3204 {
fa96cb38
PA
3205 debug_printf ("linux_wait_1 ret = %s, exited with "
3206 "retcode %d\n",
0bfdf32f 3207 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3208 WEXITSTATUS (w));
3209 debug_exit ();
bd99dc85 3210 }
fa96cb38
PA
3211 }
3212 else
3213 {
3214 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3215 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 3216
fa96cb38
PA
3217 if (debug_threads)
3218 {
3219 debug_printf ("linux_wait_1 ret = %s, terminated with "
3220 "signal %d\n",
0bfdf32f 3221 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
3222 WTERMSIG (w));
3223 debug_exit ();
3224 }
0d62e5e8 3225 }
fa96cb38 3226
65706a29
PA
3227 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3228 return filter_exit_event (event_child, ourstatus);
3229
0bfdf32f 3230 return ptid_of (current_thread);
da6d8c04
DJ
3231 }
3232
2d97cd35
AT
3233 /* If step-over executes a breakpoint instruction, in the case of a
3234 hardware single step it means a gdb/gdbserver breakpoint had been
3235 planted on top of a permanent breakpoint, in the case of a software
3236 single step it may just mean that gdbserver hit the reinsert breakpoint.
e7ad2f14 3237 The PC has been adjusted by save_stop_reason to point at
2d97cd35
AT
3238 the breakpoint address.
3239 So in the case of the hardware single step advance the PC manually
3240 past the breakpoint and in the case of software single step advance only
3b9a79ef 3241 if it's not the single_step_breakpoint we are hitting.
2d97cd35
AT
3242 This avoids that a program would keep trapping a permanent breakpoint
3243 forever. */
d7e15655 3244 if (step_over_bkpt != null_ptid
2d97cd35
AT
3245 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3246 && (event_child->stepping
3b9a79ef 3247 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
8090aef2 3248 {
dd373349
AT
3249 int increment_pc = 0;
3250 int breakpoint_kind = 0;
3251 CORE_ADDR stop_pc = event_child->stop_pc;
3252
769ef81f
AT
3253 breakpoint_kind =
3254 the_target->breakpoint_kind_from_current_state (&stop_pc);
dd373349 3255 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
8090aef2
PA
3256
3257 if (debug_threads)
3258 {
3259 debug_printf ("step-over for %s executed software breakpoint\n",
3260 target_pid_to_str (ptid_of (current_thread)));
3261 }
3262
3263 if (increment_pc != 0)
3264 {
3265 struct regcache *regcache
3266 = get_thread_regcache (current_thread, 1);
3267
3268 event_child->stop_pc += increment_pc;
3269 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3270
3271 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 3272 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
3273 }
3274 }
3275
6bf5e0ba
PA
3276 /* If this event was not handled before, and is not a SIGTRAP, we
3277 report it. SIGILL and SIGSEGV are also treated as traps in case
3278 a breakpoint is inserted at the current PC. If this target does
3279 not support internal breakpoints at all, we also report the
3280 SIGTRAP without further processing; it's of no concern to us. */
3281 maybe_internal_trap
3282 = (supports_breakpoints ()
3283 && (WSTOPSIG (w) == SIGTRAP
3284 || ((WSTOPSIG (w) == SIGILL
3285 || WSTOPSIG (w) == SIGSEGV)
3286 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3287
3288 if (maybe_internal_trap)
3289 {
3290 /* Handle anything that requires bookkeeping before deciding to
3291 report the event or continue waiting. */
3292
3293 /* First check if we can explain the SIGTRAP with an internal
3294 breakpoint, or if we should possibly report the event to GDB.
3295 Do this before anything that may remove or insert a
3296 breakpoint. */
3297 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3298
3299 /* We have a SIGTRAP, possibly a step-over dance has just
3300 finished. If so, tweak the state machine accordingly,
3b9a79ef
YQ
3301 reinsert breakpoints and delete any single-step
3302 breakpoints. */
6bf5e0ba
PA
3303 step_over_finished = finish_step_over (event_child);
3304
3305 /* Now invoke the callbacks of any internal breakpoints there. */
3306 check_breakpoints (event_child->stop_pc);
3307
219f2f23
PA
3308 /* Handle tracepoint data collecting. This may overflow the
3309 trace buffer, and cause a tracing stop, removing
3310 breakpoints. */
3311 trace_event = handle_tracepoints (event_child);
3312
6bf5e0ba
PA
3313 if (bp_explains_trap)
3314 {
6bf5e0ba 3315 if (debug_threads)
87ce2a04 3316 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba
PA
3317 }
3318 }
3319 else
3320 {
3321 /* We have some other signal, possibly a step-over dance was in
3322 progress, and it should be cancelled too. */
3323 step_over_finished = finish_step_over (event_child);
fa593d66
PA
3324 }
3325
3326 /* We have all the data we need. Either report the event to GDB, or
3327 resume threads and keep waiting for more. */
3328
3329 /* If we're collecting a fast tracepoint, finish the collection and
3330 move out of the jump pad before delivering a signal. See
3331 linux_stabilize_threads. */
3332
3333 if (WIFSTOPPED (w)
3334 && WSTOPSIG (w) != SIGTRAP
3335 && supports_fast_tracepoints ()
58b4daa5 3336 && agent_loaded_p ())
fa593d66
PA
3337 {
3338 if (debug_threads)
87ce2a04
DE
3339 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3340 "to defer or adjust it.\n",
0bfdf32f 3341 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3342
3343 /* Allow debugging the jump pad itself. */
0bfdf32f 3344 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
3345 && maybe_move_out_of_jump_pad (event_child, &w))
3346 {
3347 enqueue_one_deferred_signal (event_child, &w);
3348
3349 if (debug_threads)
87ce2a04 3350 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 3351 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
3352
3353 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be 3354
edeeb602
YQ
3355 if (debug_threads)
3356 debug_exit ();
582511be 3357 return ignore_event (ourstatus);
fa593d66
PA
3358 }
3359 }
219f2f23 3360
229d26fc
SM
3361 if (event_child->collecting_fast_tracepoint
3362 != fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3363 {
3364 if (debug_threads)
87ce2a04
DE
3365 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3366 "Check if we're already there.\n",
0bfdf32f 3367 lwpid_of (current_thread),
229d26fc 3368 (int) event_child->collecting_fast_tracepoint);
fa593d66
PA
3369
3370 trace_event = 1;
3371
3372 event_child->collecting_fast_tracepoint
3373 = linux_fast_tracepoint_collecting (event_child, NULL);
3374
229d26fc
SM
3375 if (event_child->collecting_fast_tracepoint
3376 != fast_tpoint_collect_result::before_insn)
fa593d66
PA
3377 {
3378 /* No longer need this breakpoint. */
3379 if (event_child->exit_jump_pad_bkpt != NULL)
3380 {
3381 if (debug_threads)
87ce2a04
DE
3382 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3383 "stopping all threads momentarily.\n");
fa593d66
PA
3384
3385 /* Other running threads could hit this breakpoint.
3386 We don't handle moribund locations like GDB does,
3387 instead we always pause all threads when removing
3388 breakpoints, so that any step-over or
3389 decr_pc_after_break adjustment is always taken
3390 care of while the breakpoint is still
3391 inserted. */
3392 stop_all_lwps (1, event_child);
fa593d66
PA
3393
3394 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3395 event_child->exit_jump_pad_bkpt = NULL;
3396
3397 unstop_all_lwps (1, event_child);
3398
3399 gdb_assert (event_child->suspended >= 0);
3400 }
3401 }
3402
229d26fc
SM
3403 if (event_child->collecting_fast_tracepoint
3404 == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
3405 {
3406 if (debug_threads)
87ce2a04
DE
3407 debug_printf ("fast tracepoint finished "
3408 "collecting successfully.\n");
fa593d66
PA
3409
3410 /* We may have a deferred signal to report. */
3411 if (dequeue_one_deferred_signal (event_child, &w))
3412 {
3413 if (debug_threads)
87ce2a04 3414 debug_printf ("dequeued one signal.\n");
fa593d66 3415 }
3c11dd79 3416 else
fa593d66 3417 {
3c11dd79 3418 if (debug_threads)
87ce2a04 3419 debug_printf ("no deferred signals.\n");
fa593d66
PA
3420
3421 if (stabilizing_threads)
3422 {
3423 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3424 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3425
3426 if (debug_threads)
3427 {
3428 debug_printf ("linux_wait_1 ret = %s, stopped "
3429 "while stabilizing threads\n",
0bfdf32f 3430 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3431 debug_exit ();
3432 }
3433
0bfdf32f 3434 return ptid_of (current_thread);
fa593d66
PA
3435 }
3436 }
3437 }
6bf5e0ba
PA
3438 }
3439
e471f25b
PA
3440 /* Check whether GDB would be interested in this event. */
3441
82075af2
JS
3442 /* Check if GDB is interested in this syscall. */
3443 if (WIFSTOPPED (w)
3444 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3445 && !gdb_catch_this_syscall_p (event_child))
3446 {
3447 if (debug_threads)
3448 {
3449 debug_printf ("Ignored syscall for LWP %ld.\n",
3450 lwpid_of (current_thread));
3451 }
3452
3453 linux_resume_one_lwp (event_child, event_child->stepping,
3454 0, NULL);
edeeb602
YQ
3455
3456 if (debug_threads)
3457 debug_exit ();
82075af2
JS
3458 return ignore_event (ourstatus);
3459 }
3460
e471f25b
PA
3461 /* If GDB is not interested in this signal, don't stop other
3462 threads, and don't report it to GDB. Just resume the inferior
3463 right away. We do this for threading-related signals as well as
3464 any that GDB specifically requested we ignore. But never ignore
3465 SIGSTOP if we sent it ourselves, and do not ignore signals when
3466 stepping - they may require special handling to skip the signal
c9587f88
AT
3467 handler. Also never ignore signals that could be caused by a
3468 breakpoint. */
e471f25b 3469 if (WIFSTOPPED (w)
0bfdf32f 3470 && current_thread->last_resume_kind != resume_step
e471f25b 3471 && (
1a981360 3472#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3473 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3474 && (WSTOPSIG (w) == __SIGRTMIN
3475 || WSTOPSIG (w) == __SIGRTMIN + 1))
3476 ||
3477#endif
c12a5089 3478 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3479 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3480 && current_thread->last_resume_kind == resume_stop)
3481 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3482 {
3483 siginfo_t info, *info_p;
3484
3485 if (debug_threads)
87ce2a04 3486 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3487 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3488
0bfdf32f 3489 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3490 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3491 info_p = &info;
3492 else
3493 info_p = NULL;
863d01bd
PA
3494
3495 if (step_over_finished)
3496 {
3497 /* We cancelled this thread's step-over above. We still
3498 need to unsuspend all other LWPs, and set them back
3499 running again while the signal handler runs. */
3500 unsuspend_all_lwps (event_child);
3501
3502 /* Enqueue the pending signal info so that proceed_all_lwps
3503 doesn't lose it. */
3504 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3505
3506 proceed_all_lwps ();
3507 }
3508 else
3509 {
3510 linux_resume_one_lwp (event_child, event_child->stepping,
3511 WSTOPSIG (w), info_p);
3512 }
edeeb602
YQ
3513
3514 if (debug_threads)
3515 debug_exit ();
3516
582511be 3517 return ignore_event (ourstatus);
e471f25b
PA
3518 }
3519
c2d6af84
PA
3520 /* Note that all addresses are always "out of the step range" when
3521 there's no range to begin with. */
3522 in_step_range = lwp_in_step_range (event_child);
3523
3524 /* If GDB wanted this thread to single step, and the thread is out
3525 of the step range, we always want to report the SIGTRAP, and let
3526 GDB handle it. Watchpoints should always be reported. So should
3527 signals we can't explain. A SIGTRAP we can't explain could be a
3528 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3529 do, we're be able to handle GDB breakpoints on top of internal
3530 breakpoints, by handling the internal breakpoint and still
3531 reporting the event to GDB. If we don't, we're out of luck, GDB
863d01bd
PA
3532 won't see the breakpoint hit. If we see a single-step event but
3533 the thread should be continuing, don't pass the trap to gdb.
3534 That indicates that we had previously finished a single-step but
3535 left the single-step pending -- see
3536 complete_ongoing_step_over. */
6bf5e0ba 3537 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3538 || (current_thread->last_resume_kind == resume_step
c2d6af84 3539 && !in_step_range)
15c66dd6 3540 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
863d01bd
PA
3541 || (!in_step_range
3542 && !bp_explains_trap
3543 && !trace_event
3544 && !step_over_finished
3545 && !(current_thread->last_resume_kind == resume_continue
3546 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
9f3a5c85 3547 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3548 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e 3549 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
00db26fa 3550 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
d3ce09f5
SS
3551
3552 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3553
3554 /* We found no reason GDB would want us to stop. We either hit one
3555 of our own breakpoints, or finished an internal step GDB
3556 shouldn't know about. */
3557 if (!report_to_gdb)
3558 {
3559 if (debug_threads)
3560 {
3561 if (bp_explains_trap)
87ce2a04 3562 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3563 if (step_over_finished)
87ce2a04 3564 debug_printf ("Step-over finished.\n");
219f2f23 3565 if (trace_event)
87ce2a04 3566 debug_printf ("Tracepoint event.\n");
c2d6af84 3567 if (lwp_in_step_range (event_child))
87ce2a04
DE
3568 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3569 paddress (event_child->stop_pc),
3570 paddress (event_child->step_range_start),
3571 paddress (event_child->step_range_end));
6bf5e0ba
PA
3572 }
3573
3574 /* We're not reporting this breakpoint to GDB, so apply the
3575 decr_pc_after_break adjustment to the inferior's regcache
3576 ourselves. */
3577
3578 if (the_low_target.set_pc != NULL)
3579 {
3580 struct regcache *regcache
0bfdf32f 3581 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3582 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3583 }
3584
7984d532 3585 if (step_over_finished)
e3652c84
YQ
3586 {
3587 /* If we have finished stepping over a breakpoint, we've
3588 stopped and suspended all LWPs momentarily except the
3589 stepping one. This is where we resume them all again.
3590 We're going to keep waiting, so use proceed, which
3591 handles stepping over the next breakpoint. */
3592 unsuspend_all_lwps (event_child);
3593 }
3594 else
3595 {
3596 /* Remove the single-step breakpoints if any. Note that
3597 there isn't single-step breakpoint if we finished stepping
3598 over. */
3599 if (can_software_single_step ()
3600 && has_single_step_breakpoints (current_thread))
3601 {
3602 stop_all_lwps (0, event_child);
3603 delete_single_step_breakpoints (current_thread);
3604 unstop_all_lwps (0, event_child);
3605 }
3606 }
7984d532 3607
e3652c84
YQ
3608 if (debug_threads)
3609 debug_printf ("proceeding all threads.\n");
6bf5e0ba 3610 proceed_all_lwps ();
edeeb602
YQ
3611
3612 if (debug_threads)
3613 debug_exit ();
3614
582511be 3615 return ignore_event (ourstatus);
6bf5e0ba
PA
3616 }
3617
3618 if (debug_threads)
3619 {
00db26fa 3620 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
ad071a30 3621 {
23fdd69e
SM
3622 std::string str
3623 = target_waitstatus_to_string (&event_child->waitstatus);
ad071a30 3624
ad071a30 3625 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
23fdd69e 3626 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
ad071a30 3627 }
0bfdf32f 3628 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3629 {
3630 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3631 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3632 else if (!lwp_in_step_range (event_child))
87ce2a04 3633 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3634 }
15c66dd6 3635 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3636 debug_printf ("Stopped by watchpoint.\n");
582511be 3637 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3638 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3639 if (debug_threads)
87ce2a04 3640 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3641 }
3642
3643 /* Alright, we're going to report a stop. */
3644
3b9a79ef 3645 /* Remove single-step breakpoints. */
8901d193
YQ
3646 if (can_software_single_step ())
3647 {
3b9a79ef 3648 /* Remove single-step breakpoints or not. It it is true, stop all
8901d193
YQ
3649 lwps, so that other threads won't hit the breakpoint in the
3650 staled memory. */
3b9a79ef 3651 int remove_single_step_breakpoints_p = 0;
8901d193
YQ
3652
3653 if (non_stop)
3654 {
3b9a79ef
YQ
3655 remove_single_step_breakpoints_p
3656 = has_single_step_breakpoints (current_thread);
8901d193
YQ
3657 }
3658 else
3659 {
3660 /* In all-stop, a stop reply cancels all previous resume
3b9a79ef 3661 requests. Delete all single-step breakpoints. */
8901d193 3662
9c80ecd6
SM
3663 find_thread ([&] (thread_info *thread) {
3664 if (has_single_step_breakpoints (thread))
3665 {
3666 remove_single_step_breakpoints_p = 1;
3667 return true;
3668 }
8901d193 3669
9c80ecd6
SM
3670 return false;
3671 });
8901d193
YQ
3672 }
3673
3b9a79ef 3674 if (remove_single_step_breakpoints_p)
8901d193 3675 {
3b9a79ef 3676 /* If we remove single-step breakpoints from memory, stop all lwps,
8901d193
YQ
3677 so that other threads won't hit the breakpoint in the staled
3678 memory. */
3679 stop_all_lwps (0, event_child);
3680
3681 if (non_stop)
3682 {
3b9a79ef
YQ
3683 gdb_assert (has_single_step_breakpoints (current_thread));
3684 delete_single_step_breakpoints (current_thread);
8901d193
YQ
3685 }
3686 else
3687 {
9c80ecd6
SM
3688 for_each_thread ([] (thread_info *thread){
3689 if (has_single_step_breakpoints (thread))
3690 delete_single_step_breakpoints (thread);
3691 });
8901d193
YQ
3692 }
3693
3694 unstop_all_lwps (0, event_child);
3695 }
3696 }
3697
582511be 3698 if (!stabilizing_threads)
6bf5e0ba
PA
3699 {
3700 /* In all-stop, stop all threads. */
582511be
PA
3701 if (!non_stop)
3702 stop_all_lwps (0, NULL);
6bf5e0ba 3703
c03e6ccc 3704 if (step_over_finished)
582511be
PA
3705 {
3706 if (!non_stop)
3707 {
3708 /* If we were doing a step-over, all other threads but
3709 the stepping one had been paused in start_step_over,
3710 with their suspend counts incremented. We don't want
3711 to do a full unstop/unpause, because we're in
3712 all-stop mode (so we want threads stopped), but we
3713 still need to unsuspend the other threads, to
3714 decrement their `suspended' count back. */
3715 unsuspend_all_lwps (event_child);
3716 }
3717 else
3718 {
3719 /* If we just finished a step-over, then all threads had
3720 been momentarily paused. In all-stop, that's fine,
3721 we want threads stopped by now anyway. In non-stop,
3722 we need to re-resume threads that GDB wanted to be
3723 running. */
3724 unstop_all_lwps (1, event_child);
3725 }
3726 }
c03e6ccc 3727
3aa5cfa0
AT
3728 /* If we're not waiting for a specific LWP, choose an event LWP
3729 from among those that have had events. Giving equal priority
3730 to all LWPs that have had events helps prevent
3731 starvation. */
d7e15655 3732 if (ptid == minus_one_ptid)
3aa5cfa0
AT
3733 {
3734 event_child->status_pending_p = 1;
3735 event_child->status_pending = w;
3736
3737 select_event_lwp (&event_child);
3738
3739 /* current_thread and event_child must stay in sync. */
3740 current_thread = get_lwp_thread (event_child);
3741
3742 event_child->status_pending_p = 0;
3743 w = event_child->status_pending;
3744 }
3745
3746
fa593d66 3747 /* Stabilize threads (move out of jump pads). */
582511be
PA
3748 if (!non_stop)
3749 stabilize_threads ();
6bf5e0ba
PA
3750 }
3751 else
3752 {
3753 /* If we just finished a step-over, then all threads had been
3754 momentarily paused. In all-stop, that's fine, we want
3755 threads stopped by now anyway. In non-stop, we need to
3756 re-resume threads that GDB wanted to be running. */
3757 if (step_over_finished)
7984d532 3758 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3759 }
3760
00db26fa 3761 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
de0d863e 3762 {
00db26fa
PA
3763 /* If the reported event is an exit, fork, vfork or exec, let
3764 GDB know. */
5a04c4cf
PA
3765
3766 /* Break the unreported fork relationship chain. */
3767 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3768 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3769 {
3770 event_child->fork_relative->fork_relative = NULL;
3771 event_child->fork_relative = NULL;
3772 }
3773
00db26fa 3774 *ourstatus = event_child->waitstatus;
de0d863e
DB
3775 /* Clear the event lwp's waitstatus since we handled it already. */
3776 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3777 }
3778 else
3779 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3780
582511be 3781 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3782 it was a software breakpoint, and the client doesn't know we can
3783 adjust the breakpoint ourselves. */
3784 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
c12a5089 3785 && !cs.swbreak_feature)
582511be
PA
3786 {
3787 int decr_pc = the_low_target.decr_pc_after_break;
3788
3789 if (decr_pc != 0)
3790 {
3791 struct regcache *regcache
3792 = get_thread_regcache (current_thread, 1);
3793 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3794 }
3795 }
3796
82075af2
JS
3797 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3798 {
82075af2 3799 get_syscall_trapinfo (event_child,
4cc32bec 3800 &ourstatus->value.syscall_number);
82075af2
JS
3801 ourstatus->kind = event_child->syscall_state;
3802 }
3803 else if (current_thread->last_resume_kind == resume_stop
3804 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3805 {
3806 /* A thread that has been requested to stop by GDB with vCont;t,
3807 and it stopped cleanly, so report as SIG0. The use of
3808 SIGSTOP is an implementation detail. */
a493e3e2 3809 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3810 }
0bfdf32f 3811 else if (current_thread->last_resume_kind == resume_stop
8336d594 3812 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3813 {
3814 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3815 but, it stopped for other reasons. */
2ea28649 3816 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3817 }
de0d863e 3818 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3819 {
2ea28649 3820 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3821 }
3822
d7e15655 3823 gdb_assert (step_over_bkpt == null_ptid);
d50171e4 3824
bd99dc85 3825 if (debug_threads)
87ce2a04
DE
3826 {
3827 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3828 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3829 ourstatus->kind, ourstatus->value.sig);
3830 debug_exit ();
3831 }
bd99dc85 3832
65706a29
PA
3833 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3834 return filter_exit_event (event_child, ourstatus);
3835
0bfdf32f 3836 return ptid_of (current_thread);
bd99dc85
PA
3837}
3838
3839/* Get rid of any pending event in the pipe. */
3840static void
3841async_file_flush (void)
3842{
3843 int ret;
3844 char buf;
3845
3846 do
3847 ret = read (linux_event_pipe[0], &buf, 1);
3848 while (ret >= 0 || (ret == -1 && errno == EINTR));
3849}
3850
3851/* Put something in the pipe, so the event loop wakes up. */
3852static void
3853async_file_mark (void)
3854{
3855 int ret;
3856
3857 async_file_flush ();
3858
3859 do
3860 ret = write (linux_event_pipe[1], "+", 1);
3861 while (ret == 0 || (ret == -1 && errno == EINTR));
3862
3863 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3864 be awakened anyway. */
3865}
3866
95954743
PA
3867static ptid_t
3868linux_wait (ptid_t ptid,
3869 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3870{
95954743 3871 ptid_t event_ptid;
bd99dc85 3872
bd99dc85
PA
3873 /* Flush the async file first. */
3874 if (target_is_async_p ())
3875 async_file_flush ();
3876
582511be
PA
3877 do
3878 {
3879 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3880 }
3881 while ((target_options & TARGET_WNOHANG) == 0
d7e15655 3882 && event_ptid == null_ptid
582511be 3883 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3884
3885 /* If at least one stop was reported, there may be more. A single
3886 SIGCHLD can signal more than one child stop. */
3887 if (target_is_async_p ()
3888 && (target_options & TARGET_WNOHANG) != 0
d7e15655 3889 && event_ptid != null_ptid)
bd99dc85
PA
3890 async_file_mark ();
3891
3892 return event_ptid;
da6d8c04
DJ
3893}
3894
c5f62d5f 3895/* Send a signal to an LWP. */
fd500816
DJ
3896
3897static int
a1928bad 3898kill_lwp (unsigned long lwpid, int signo)
fd500816 3899{
4a6ed09b 3900 int ret;
fd500816 3901
4a6ed09b
PA
3902 errno = 0;
3903 ret = syscall (__NR_tkill, lwpid, signo);
3904 if (errno == ENOSYS)
3905 {
3906 /* If tkill fails, then we are not using nptl threads, a
3907 configuration we no longer support. */
3908 perror_with_name (("tkill"));
3909 }
3910 return ret;
fd500816
DJ
3911}
3912
964e4306
PA
3913void
3914linux_stop_lwp (struct lwp_info *lwp)
3915{
3916 send_sigstop (lwp);
3917}
3918
0d62e5e8 3919static void
02fc4de7 3920send_sigstop (struct lwp_info *lwp)
0d62e5e8 3921{
bd99dc85 3922 int pid;
0d62e5e8 3923
d86d4aaf 3924 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3925
0d62e5e8
DJ
3926 /* If we already have a pending stop signal for this process, don't
3927 send another. */
54a0b537 3928 if (lwp->stop_expected)
0d62e5e8 3929 {
ae13219e 3930 if (debug_threads)
87ce2a04 3931 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3932
0d62e5e8
DJ
3933 return;
3934 }
3935
3936 if (debug_threads)
87ce2a04 3937 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3938
d50171e4 3939 lwp->stop_expected = 1;
bd99dc85 3940 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3941}
3942
df3e4dbe
SM
3943static void
3944send_sigstop (thread_info *thread, lwp_info *except)
02fc4de7 3945{
d86d4aaf 3946 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3947
7984d532
PA
3948 /* Ignore EXCEPT. */
3949 if (lwp == except)
df3e4dbe 3950 return;
7984d532 3951
02fc4de7 3952 if (lwp->stopped)
df3e4dbe 3953 return;
02fc4de7
PA
3954
3955 send_sigstop (lwp);
7984d532
PA
3956}
3957
3958/* Increment the suspend count of an LWP, and stop it, if not stopped
3959 yet. */
df3e4dbe
SM
3960static void
3961suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
7984d532 3962{
d86d4aaf 3963 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3964
3965 /* Ignore EXCEPT. */
3966 if (lwp == except)
df3e4dbe 3967 return;
7984d532 3968
863d01bd 3969 lwp_suspended_inc (lwp);
7984d532 3970
df3e4dbe 3971 send_sigstop (thread, except);
02fc4de7
PA
3972}
3973
95954743
PA
3974static void
3975mark_lwp_dead (struct lwp_info *lwp, int wstat)
3976{
95954743
PA
3977 /* Store the exit status for later. */
3978 lwp->status_pending_p = 1;
3979 lwp->status_pending = wstat;
3980
00db26fa
PA
3981 /* Store in waitstatus as well, as there's nothing else to process
3982 for this event. */
3983 if (WIFEXITED (wstat))
3984 {
3985 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3986 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3987 }
3988 else if (WIFSIGNALED (wstat))
3989 {
3990 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3991 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3992 }
3993
95954743
PA
3994 /* Prevent trying to stop it. */
3995 lwp->stopped = 1;
3996
3997 /* No further stops are expected from a dead lwp. */
3998 lwp->stop_expected = 0;
3999}
4000
00db26fa
PA
4001/* Return true if LWP has exited already, and has a pending exit event
4002 to report to GDB. */
4003
4004static int
4005lwp_is_marked_dead (struct lwp_info *lwp)
4006{
4007 return (lwp->status_pending_p
4008 && (WIFEXITED (lwp->status_pending)
4009 || WIFSIGNALED (lwp->status_pending)));
4010}
4011
fa96cb38
PA
4012/* Wait for all children to stop for the SIGSTOPs we just queued. */
4013
0d62e5e8 4014static void
fa96cb38 4015wait_for_sigstop (void)
0d62e5e8 4016{
0bfdf32f 4017 struct thread_info *saved_thread;
95954743 4018 ptid_t saved_tid;
fa96cb38
PA
4019 int wstat;
4020 int ret;
0d62e5e8 4021
0bfdf32f
GB
4022 saved_thread = current_thread;
4023 if (saved_thread != NULL)
9c80ecd6 4024 saved_tid = saved_thread->id;
bd99dc85 4025 else
95954743 4026 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 4027
d50171e4 4028 if (debug_threads)
fa96cb38 4029 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 4030
fa96cb38
PA
4031 /* Passing NULL_PTID as filter indicates we want all events to be
4032 left pending. Eventually this returns when there are no
4033 unwaited-for children left. */
4034 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4035 &wstat, __WALL);
4036 gdb_assert (ret == -1);
0d62e5e8 4037
0bfdf32f
GB
4038 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4039 current_thread = saved_thread;
0d62e5e8
DJ
4040 else
4041 {
4042 if (debug_threads)
87ce2a04 4043 debug_printf ("Previously current thread died.\n");
0d62e5e8 4044
f0db101d
PA
4045 /* We can't change the current inferior behind GDB's back,
4046 otherwise, a subsequent command may apply to the wrong
4047 process. */
4048 current_thread = NULL;
0d62e5e8
DJ
4049 }
4050}
4051
fcb056a5 4052/* Returns true if THREAD is stopped in a jump pad, and we can't
fa593d66
PA
4053 move it out, because we need to report the stop event to GDB. For
4054 example, if the user puts a breakpoint in the jump pad, it's
4055 because she wants to debug it. */
4056
fcb056a5
SM
4057static bool
4058stuck_in_jump_pad_callback (thread_info *thread)
fa593d66 4059{
d86d4aaf 4060 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4061
863d01bd
PA
4062 if (lwp->suspended != 0)
4063 {
4064 internal_error (__FILE__, __LINE__,
4065 "LWP %ld is suspended, suspended=%d\n",
4066 lwpid_of (thread), lwp->suspended);
4067 }
fa593d66
PA
4068 gdb_assert (lwp->stopped);
4069
4070 /* Allow debugging the jump pad, gdb_collect, etc.. */
4071 return (supports_fast_tracepoints ()
58b4daa5 4072 && agent_loaded_p ()
fa593d66 4073 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4074 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66 4075 || thread->last_resume_kind == resume_step)
229d26fc
SM
4076 && (linux_fast_tracepoint_collecting (lwp, NULL)
4077 != fast_tpoint_collect_result::not_collecting));
fa593d66
PA
4078}
4079
4080static void
9c80ecd6 4081move_out_of_jump_pad_callback (thread_info *thread)
fa593d66 4082{
f0ce0d3a 4083 struct thread_info *saved_thread;
d86d4aaf 4084 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
4085 int *wstat;
4086
863d01bd
PA
4087 if (lwp->suspended != 0)
4088 {
4089 internal_error (__FILE__, __LINE__,
4090 "LWP %ld is suspended, suspended=%d\n",
4091 lwpid_of (thread), lwp->suspended);
4092 }
fa593d66
PA
4093 gdb_assert (lwp->stopped);
4094
f0ce0d3a
PA
4095 /* For gdb_breakpoint_here. */
4096 saved_thread = current_thread;
4097 current_thread = thread;
4098
fa593d66
PA
4099 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4100
4101 /* Allow debugging the jump pad, gdb_collect, etc. */
4102 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 4103 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
4104 && thread->last_resume_kind != resume_step
4105 && maybe_move_out_of_jump_pad (lwp, wstat))
4106 {
4107 if (debug_threads)
87ce2a04 4108 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 4109 lwpid_of (thread));
fa593d66
PA
4110
4111 if (wstat)
4112 {
4113 lwp->status_pending_p = 0;
4114 enqueue_one_deferred_signal (lwp, wstat);
4115
4116 if (debug_threads)
87ce2a04
DE
4117 debug_printf ("Signal %d for LWP %ld deferred "
4118 "(in jump pad)\n",
d86d4aaf 4119 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
4120 }
4121
4122 linux_resume_one_lwp (lwp, 0, 0, NULL);
4123 }
4124 else
863d01bd 4125 lwp_suspended_inc (lwp);
f0ce0d3a
PA
4126
4127 current_thread = saved_thread;
fa593d66
PA
4128}
4129
5a6b0a41
SM
4130static bool
4131lwp_running (thread_info *thread)
fa593d66 4132{
d86d4aaf 4133 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66 4134
00db26fa 4135 if (lwp_is_marked_dead (lwp))
5a6b0a41
SM
4136 return false;
4137
4138 return !lwp->stopped;
fa593d66
PA
4139}
4140
7984d532
PA
4141/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4142 If SUSPEND, then also increase the suspend count of every LWP,
4143 except EXCEPT. */
4144
0d62e5e8 4145static void
7984d532 4146stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 4147{
bde24c0a
PA
4148 /* Should not be called recursively. */
4149 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4150
87ce2a04
DE
4151 if (debug_threads)
4152 {
4153 debug_enter ();
4154 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4155 suspend ? "stop-and-suspend" : "stop",
4156 except != NULL
d86d4aaf 4157 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
4158 : "none");
4159 }
4160
bde24c0a
PA
4161 stopping_threads = (suspend
4162 ? STOPPING_AND_SUSPENDING_THREADS
4163 : STOPPING_THREADS);
7984d532
PA
4164
4165 if (suspend)
df3e4dbe
SM
4166 for_each_thread ([&] (thread_info *thread)
4167 {
4168 suspend_and_send_sigstop (thread, except);
4169 });
7984d532 4170 else
df3e4dbe
SM
4171 for_each_thread ([&] (thread_info *thread)
4172 {
4173 send_sigstop (thread, except);
4174 });
4175
fa96cb38 4176 wait_for_sigstop ();
bde24c0a 4177 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
4178
4179 if (debug_threads)
4180 {
4181 debug_printf ("stop_all_lwps done, setting stopping_threads "
4182 "back to !stopping\n");
4183 debug_exit ();
4184 }
0d62e5e8
DJ
4185}
4186
863d01bd
PA
4187/* Enqueue one signal in the chain of signals which need to be
4188 delivered to this process on next resume. */
4189
4190static void
4191enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4192{
8d749320 4193 struct pending_signals *p_sig = XNEW (struct pending_signals);
863d01bd 4194
863d01bd
PA
4195 p_sig->prev = lwp->pending_signals;
4196 p_sig->signal = signal;
4197 if (info == NULL)
4198 memset (&p_sig->info, 0, sizeof (siginfo_t));
4199 else
4200 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4201 lwp->pending_signals = p_sig;
4202}
4203
fa5308bd
AT
4204/* Install breakpoints for software single stepping. */
4205
4206static void
4207install_software_single_step_breakpoints (struct lwp_info *lwp)
4208{
984a2c04
YQ
4209 struct thread_info *thread = get_lwp_thread (lwp);
4210 struct regcache *regcache = get_thread_regcache (thread, 1);
8ce47547
TT
4211
4212 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
984a2c04 4213
984a2c04 4214 current_thread = thread;
a0ff9e1a 4215 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
fa5308bd 4216
a0ff9e1a 4217 for (CORE_ADDR pc : next_pcs)
3b9a79ef 4218 set_single_step_breakpoint (pc, current_ptid);
fa5308bd
AT
4219}
4220
7fe5e27e
AT
4221/* Single step via hardware or software single step.
4222 Return 1 if hardware single stepping, 0 if software single stepping
4223 or can't single step. */
4224
4225static int
4226single_step (struct lwp_info* lwp)
4227{
4228 int step = 0;
4229
4230 if (can_hardware_single_step ())
4231 {
4232 step = 1;
4233 }
4234 else if (can_software_single_step ())
4235 {
4236 install_software_single_step_breakpoints (lwp);
4237 step = 0;
4238 }
4239 else
4240 {
4241 if (debug_threads)
4242 debug_printf ("stepping is not implemented on this target");
4243 }
4244
4245 return step;
4246}
4247
35ac8b3e 4248/* The signal can be delivered to the inferior if we are not trying to
5b061e98
YQ
4249 finish a fast tracepoint collect. Since signal can be delivered in
4250 the step-over, the program may go to signal handler and trap again
4251 after return from the signal handler. We can live with the spurious
4252 double traps. */
35ac8b3e
YQ
4253
4254static int
4255lwp_signal_can_be_delivered (struct lwp_info *lwp)
4256{
229d26fc
SM
4257 return (lwp->collecting_fast_tracepoint
4258 == fast_tpoint_collect_result::not_collecting);
35ac8b3e
YQ
4259}
4260
23f238d3
PA
4261/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4262 SIGNAL is nonzero, give it that signal. */
da6d8c04 4263
ce3a066d 4264static void
23f238d3
PA
4265linux_resume_one_lwp_throw (struct lwp_info *lwp,
4266 int step, int signal, siginfo_t *info)
da6d8c04 4267{
d86d4aaf 4268 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4269 struct thread_info *saved_thread;
82075af2 4270 int ptrace_request;
c06cbd92
YQ
4271 struct process_info *proc = get_thread_process (thread);
4272
4273 /* Note that target description may not be initialised
4274 (proc->tdesc == NULL) at this point because the program hasn't
4275 stopped at the first instruction yet. It means GDBserver skips
4276 the extra traps from the wrapper program (see option --wrapper).
4277 Code in this function that requires register access should be
4278 guarded by proc->tdesc == NULL or something else. */
0d62e5e8 4279
54a0b537 4280 if (lwp->stopped == 0)
0d62e5e8
DJ
4281 return;
4282
65706a29
PA
4283 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4284
229d26fc
SM
4285 fast_tpoint_collect_result fast_tp_collecting
4286 = lwp->collecting_fast_tracepoint;
fa593d66 4287
229d26fc
SM
4288 gdb_assert (!stabilizing_threads
4289 || (fast_tp_collecting
4290 != fast_tpoint_collect_result::not_collecting));
fa593d66 4291
219f2f23
PA
4292 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4293 user used the "jump" command, or "set $pc = foo"). */
c06cbd92 4294 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
219f2f23
PA
4295 {
4296 /* Collecting 'while-stepping' actions doesn't make sense
4297 anymore. */
d86d4aaf 4298 release_while_stepping_state_list (thread);
219f2f23
PA
4299 }
4300
0d62e5e8 4301 /* If we have pending signals or status, and a new signal, enqueue the
35ac8b3e
YQ
4302 signal. Also enqueue the signal if it can't be delivered to the
4303 inferior right now. */
0d62e5e8 4304 if (signal != 0
fa593d66
PA
4305 && (lwp->status_pending_p
4306 || lwp->pending_signals != NULL
35ac8b3e 4307 || !lwp_signal_can_be_delivered (lwp)))
94610ec4
YQ
4308 {
4309 enqueue_pending_signal (lwp, signal, info);
4310
4311 /* Postpone any pending signal. It was enqueued above. */
4312 signal = 0;
4313 }
0d62e5e8 4314
d50171e4
PA
4315 if (lwp->status_pending_p)
4316 {
4317 if (debug_threads)
94610ec4 4318 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
87ce2a04 4319 " has pending status\n",
94610ec4 4320 lwpid_of (thread), step ? "step" : "continue",
87ce2a04 4321 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
4322 return;
4323 }
0d62e5e8 4324
0bfdf32f
GB
4325 saved_thread = current_thread;
4326 current_thread = thread;
0d62e5e8 4327
0d62e5e8
DJ
4328 /* This bit needs some thinking about. If we get a signal that
4329 we must report while a single-step reinsert is still pending,
4330 we often end up resuming the thread. It might be better to
4331 (ew) allow a stack of pending events; then we could be sure that
4332 the reinsert happened right away and not lose any signals.
4333
4334 Making this stack would also shrink the window in which breakpoints are
54a0b537 4335 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
4336 complete correctness, so it won't solve that problem. It may be
4337 worthwhile just to solve this one, however. */
54a0b537 4338 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
4339 {
4340 if (debug_threads)
87ce2a04
DE
4341 debug_printf (" pending reinsert at 0x%s\n",
4342 paddress (lwp->bp_reinsert));
d50171e4 4343
85e00e85 4344 if (can_hardware_single_step ())
d50171e4 4345 {
229d26fc 4346 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
fa593d66
PA
4347 {
4348 if (step == 0)
9986ba08 4349 warning ("BAD - reinserting but not stepping.");
fa593d66 4350 if (lwp->suspended)
9986ba08
PA
4351 warning ("BAD - reinserting and suspended(%d).",
4352 lwp->suspended);
fa593d66 4353 }
d50171e4 4354 }
f79b145d
YQ
4355
4356 step = maybe_hw_step (thread);
0d62e5e8
DJ
4357 }
4358
229d26fc 4359 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
fa593d66
PA
4360 {
4361 if (debug_threads)
87ce2a04
DE
4362 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4363 " (exit-jump-pad-bkpt)\n",
d86d4aaf 4364 lwpid_of (thread));
fa593d66 4365 }
229d26fc 4366 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
fa593d66
PA
4367 {
4368 if (debug_threads)
87ce2a04
DE
4369 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4370 " single-stepping\n",
d86d4aaf 4371 lwpid_of (thread));
fa593d66
PA
4372
4373 if (can_hardware_single_step ())
4374 step = 1;
4375 else
38e08fca
GB
4376 {
4377 internal_error (__FILE__, __LINE__,
4378 "moving out of jump pad single-stepping"
4379 " not implemented on this target");
4380 }
fa593d66
PA
4381 }
4382
219f2f23
PA
4383 /* If we have while-stepping actions in this thread set it stepping.
4384 If we have a signal to deliver, it may or may not be set to
4385 SIG_IGN, we don't know. Assume so, and allow collecting
4386 while-stepping into a signal handler. A possible smart thing to
4387 do would be to set an internal breakpoint at the signal return
4388 address, continue, and carry on catching this while-stepping
4389 action only when that breakpoint is hit. A future
4390 enhancement. */
7fe5e27e 4391 if (thread->while_stepping != NULL)
219f2f23
PA
4392 {
4393 if (debug_threads)
87ce2a04 4394 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 4395 lwpid_of (thread));
7fe5e27e
AT
4396
4397 step = single_step (lwp);
219f2f23
PA
4398 }
4399
c06cbd92 4400 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
0d62e5e8 4401 {
0bfdf32f 4402 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
4403
4404 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4405
4406 if (debug_threads)
4407 {
4408 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4409 (long) lwp->stop_pc);
4410 }
0d62e5e8
DJ
4411 }
4412
35ac8b3e
YQ
4413 /* If we have pending signals, consume one if it can be delivered to
4414 the inferior. */
4415 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
0d62e5e8
DJ
4416 {
4417 struct pending_signals **p_sig;
4418
54a0b537 4419 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
4420 while ((*p_sig)->prev != NULL)
4421 p_sig = &(*p_sig)->prev;
4422
4423 signal = (*p_sig)->signal;
32ca6d61 4424 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 4425 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4426 &(*p_sig)->info);
32ca6d61 4427
0d62e5e8
DJ
4428 free (*p_sig);
4429 *p_sig = NULL;
4430 }
4431
94610ec4
YQ
4432 if (debug_threads)
4433 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4434 lwpid_of (thread), step ? "step" : "continue", signal,
4435 lwp->stop_expected ? "expected" : "not expected");
4436
aa5ca48f
DE
4437 if (the_low_target.prepare_to_resume != NULL)
4438 the_low_target.prepare_to_resume (lwp);
4439
d86d4aaf 4440 regcache_invalidate_thread (thread);
da6d8c04 4441 errno = 0;
54a0b537 4442 lwp->stepping = step;
82075af2
JS
4443 if (step)
4444 ptrace_request = PTRACE_SINGLESTEP;
4445 else if (gdb_catching_syscalls_p (lwp))
4446 ptrace_request = PTRACE_SYSCALL;
4447 else
4448 ptrace_request = PTRACE_CONT;
4449 ptrace (ptrace_request,
4450 lwpid_of (thread),
b8e1b30e 4451 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
4452 /* Coerce to a uintptr_t first to avoid potential gcc warning
4453 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4454 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 4455
0bfdf32f 4456 current_thread = saved_thread;
da6d8c04 4457 if (errno)
23f238d3
PA
4458 perror_with_name ("resuming thread");
4459
4460 /* Successfully resumed. Clear state that no longer makes sense,
4461 and mark the LWP as running. Must not do this before resuming
4462 otherwise if that fails other code will be confused. E.g., we'd
4463 later try to stop the LWP and hang forever waiting for a stop
4464 status. Note that we must not throw after this is cleared,
4465 otherwise handle_zombie_lwp_error would get confused. */
4466 lwp->stopped = 0;
4467 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4468}
4469
4470/* Called when we try to resume a stopped LWP and that errors out. If
4471 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4472 or about to become), discard the error, clear any pending status
4473 the LWP may have, and return true (we'll collect the exit status
4474 soon enough). Otherwise, return false. */
4475
4476static int
4477check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4478{
4479 struct thread_info *thread = get_lwp_thread (lp);
4480
4481 /* If we get an error after resuming the LWP successfully, we'd
4482 confuse !T state for the LWP being gone. */
4483 gdb_assert (lp->stopped);
4484
4485 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4486 because even if ptrace failed with ESRCH, the tracee may be "not
4487 yet fully dead", but already refusing ptrace requests. In that
4488 case the tracee has 'R (Running)' state for a little bit
4489 (observed in Linux 3.18). See also the note on ESRCH in the
4490 ptrace(2) man page. Instead, check whether the LWP has any state
4491 other than ptrace-stopped. */
4492
4493 /* Don't assume anything if /proc/PID/status can't be read. */
4494 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 4495 {
23f238d3
PA
4496 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4497 lp->status_pending_p = 0;
4498 return 1;
4499 }
4500 return 0;
4501}
4502
4503/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4504 disappears while we try to resume it. */
3221518c 4505
23f238d3
PA
4506static void
4507linux_resume_one_lwp (struct lwp_info *lwp,
4508 int step, int signal, siginfo_t *info)
4509{
a70b8144 4510 try
23f238d3
PA
4511 {
4512 linux_resume_one_lwp_throw (lwp, step, signal, info);
4513 }
230d2906 4514 catch (const gdb_exception_error &ex)
23f238d3
PA
4515 {
4516 if (!check_ptrace_stopped_lwp_gone (lwp))
4517 throw_exception (ex);
3221518c 4518 }
da6d8c04
DJ
4519}
4520
5fdda392
SM
4521/* This function is called once per thread via for_each_thread.
4522 We look up which resume request applies to THREAD and mark it with a
4523 pointer to the appropriate resume request.
5544ad89
DJ
4524
4525 This algorithm is O(threads * resume elements), but resume elements
4526 is small (and will remain small at least until GDB supports thread
4527 suspension). */
ebcf782c 4528
5fdda392
SM
4529static void
4530linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
0d62e5e8 4531{
d86d4aaf 4532 struct lwp_info *lwp = get_thread_lwp (thread);
64386c31 4533
5fdda392 4534 for (int ndx = 0; ndx < n; ndx++)
95954743 4535 {
5fdda392 4536 ptid_t ptid = resume[ndx].thread;
d7e15655 4537 if (ptid == minus_one_ptid
9c80ecd6 4538 || ptid == thread->id
0c9070b3
YQ
4539 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4540 of PID'. */
e99b03dc 4541 || (ptid.pid () == pid_of (thread)
0e998d96 4542 && (ptid.is_pid ()
e38504b3 4543 || ptid.lwp () == -1)))
95954743 4544 {
5fdda392 4545 if (resume[ndx].kind == resume_stop
8336d594 4546 && thread->last_resume_kind == resume_stop)
d50171e4
PA
4547 {
4548 if (debug_threads)
87ce2a04
DE
4549 debug_printf ("already %s LWP %ld at GDB's request\n",
4550 (thread->last_status.kind
4551 == TARGET_WAITKIND_STOPPED)
4552 ? "stopped"
4553 : "stopping",
d86d4aaf 4554 lwpid_of (thread));
d50171e4
PA
4555
4556 continue;
4557 }
4558
5a04c4cf
PA
4559 /* Ignore (wildcard) resume requests for already-resumed
4560 threads. */
5fdda392 4561 if (resume[ndx].kind != resume_stop
5a04c4cf
PA
4562 && thread->last_resume_kind != resume_stop)
4563 {
4564 if (debug_threads)
4565 debug_printf ("already %s LWP %ld at GDB's request\n",
4566 (thread->last_resume_kind
4567 == resume_step)
4568 ? "stepping"
4569 : "continuing",
4570 lwpid_of (thread));
4571 continue;
4572 }
4573
4574 /* Don't let wildcard resumes resume fork children that GDB
4575 does not yet know are new fork children. */
4576 if (lwp->fork_relative != NULL)
4577 {
5a04c4cf
PA
4578 struct lwp_info *rel = lwp->fork_relative;
4579
4580 if (rel->status_pending_p
4581 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4582 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4583 {
4584 if (debug_threads)
4585 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4586 lwpid_of (thread));
4587 continue;
4588 }
4589 }
4590
4591 /* If the thread has a pending event that has already been
4592 reported to GDBserver core, but GDB has not pulled the
4593 event out of the vStopped queue yet, likewise, ignore the
4594 (wildcard) resume request. */
9c80ecd6 4595 if (in_queued_stop_replies (thread->id))
5a04c4cf
PA
4596 {
4597 if (debug_threads)
4598 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4599 lwpid_of (thread));
4600 continue;
4601 }
4602
5fdda392 4603 lwp->resume = &resume[ndx];
8336d594 4604 thread->last_resume_kind = lwp->resume->kind;
fa593d66 4605
c2d6af84
PA
4606 lwp->step_range_start = lwp->resume->step_range_start;
4607 lwp->step_range_end = lwp->resume->step_range_end;
4608
fa593d66
PA
4609 /* If we had a deferred signal to report, dequeue one now.
4610 This can happen if LWP gets more than one signal while
4611 trying to get out of a jump pad. */
4612 if (lwp->stopped
4613 && !lwp->status_pending_p
4614 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4615 {
4616 lwp->status_pending_p = 1;
4617
4618 if (debug_threads)
87ce2a04
DE
4619 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4620 "leaving status pending.\n",
d86d4aaf
DE
4621 WSTOPSIG (lwp->status_pending),
4622 lwpid_of (thread));
fa593d66
PA
4623 }
4624
5fdda392 4625 return;
95954743
PA
4626 }
4627 }
2bd7c093
PA
4628
4629 /* No resume action for this thread. */
4630 lwp->resume = NULL;
5544ad89
DJ
4631}
4632
8f86d7aa
SM
4633/* find_thread callback for linux_resume. Return true if this lwp has an
4634 interesting status pending. */
5544ad89 4635
25c28b4d
SM
4636static bool
4637resume_status_pending_p (thread_info *thread)
5544ad89 4638{
d86d4aaf 4639 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 4640
bd99dc85
PA
4641 /* LWPs which will not be resumed are not interesting, because
4642 we might not wait for them next time through linux_wait. */
2bd7c093 4643 if (lwp->resume == NULL)
25c28b4d 4644 return false;
64386c31 4645
25c28b4d 4646 return thread_still_has_status_pending_p (thread);
d50171e4
PA
4647}
4648
4649/* Return 1 if this lwp that GDB wants running is stopped at an
4650 internal breakpoint that we need to step over. It assumes that any
4651 required STOP_PC adjustment has already been propagated to the
4652 inferior's regcache. */
4653
eca55aec
SM
4654static bool
4655need_step_over_p (thread_info *thread)
d50171e4 4656{
d86d4aaf 4657 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4658 struct thread_info *saved_thread;
d50171e4 4659 CORE_ADDR pc;
c06cbd92
YQ
4660 struct process_info *proc = get_thread_process (thread);
4661
4662 /* GDBserver is skipping the extra traps from the wrapper program,
4663 don't have to do step over. */
4664 if (proc->tdesc == NULL)
eca55aec 4665 return false;
d50171e4
PA
4666
4667 /* LWPs which will not be resumed are not interesting, because we
4668 might not wait for them next time through linux_wait. */
4669
4670 if (!lwp->stopped)
4671 {
4672 if (debug_threads)
87ce2a04 4673 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4674 lwpid_of (thread));
eca55aec 4675 return false;
d50171e4
PA
4676 }
4677
8336d594 4678 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4679 {
4680 if (debug_threads)
87ce2a04
DE
4681 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4682 " stopped\n",
d86d4aaf 4683 lwpid_of (thread));
eca55aec 4684 return false;
d50171e4
PA
4685 }
4686
7984d532
PA
4687 gdb_assert (lwp->suspended >= 0);
4688
4689 if (lwp->suspended)
4690 {
4691 if (debug_threads)
87ce2a04 4692 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4693 lwpid_of (thread));
eca55aec 4694 return false;
7984d532
PA
4695 }
4696
bd99dc85 4697 if (lwp->status_pending_p)
d50171e4
PA
4698 {
4699 if (debug_threads)
87ce2a04
DE
4700 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4701 " status.\n",
d86d4aaf 4702 lwpid_of (thread));
eca55aec 4703 return false;
d50171e4
PA
4704 }
4705
4706 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4707 or we have. */
4708 pc = get_pc (lwp);
4709
4710 /* If the PC has changed since we stopped, then don't do anything,
4711 and let the breakpoint/tracepoint be hit. This happens if, for
4712 instance, GDB handled the decr_pc_after_break subtraction itself,
4713 GDB is OOL stepping this thread, or the user has issued a "jump"
4714 command, or poked thread's registers herself. */
4715 if (pc != lwp->stop_pc)
4716 {
4717 if (debug_threads)
87ce2a04
DE
4718 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4719 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4720 lwpid_of (thread),
4721 paddress (lwp->stop_pc), paddress (pc));
eca55aec 4722 return false;
d50171e4
PA
4723 }
4724
484b3c32
YQ
4725 /* On software single step target, resume the inferior with signal
4726 rather than stepping over. */
4727 if (can_software_single_step ()
4728 && lwp->pending_signals != NULL
4729 && lwp_signal_can_be_delivered (lwp))
4730 {
4731 if (debug_threads)
4732 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4733 " signals.\n",
4734 lwpid_of (thread));
4735
eca55aec 4736 return false;
484b3c32
YQ
4737 }
4738
0bfdf32f
GB
4739 saved_thread = current_thread;
4740 current_thread = thread;
d50171e4 4741
8b07ae33 4742 /* We can only step over breakpoints we know about. */
fa593d66 4743 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4744 {
8b07ae33 4745 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4746 though. If the condition is being evaluated on the target's side
4747 and it evaluate to false, step over this breakpoint as well. */
4748 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4749 && gdb_condition_true_at_breakpoint (pc)
4750 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4751 {
4752 if (debug_threads)
87ce2a04
DE
4753 debug_printf ("Need step over [LWP %ld]? yes, but found"
4754 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4755 lwpid_of (thread), paddress (pc));
d50171e4 4756
0bfdf32f 4757 current_thread = saved_thread;
eca55aec 4758 return false;
8b07ae33
PA
4759 }
4760 else
4761 {
4762 if (debug_threads)
87ce2a04
DE
4763 debug_printf ("Need step over [LWP %ld]? yes, "
4764 "found breakpoint at 0x%s\n",
d86d4aaf 4765 lwpid_of (thread), paddress (pc));
d50171e4 4766
8b07ae33 4767 /* We've found an lwp that needs stepping over --- return 1 so
8f86d7aa 4768 that find_thread stops looking. */
0bfdf32f 4769 current_thread = saved_thread;
8b07ae33 4770
eca55aec 4771 return true;
8b07ae33 4772 }
d50171e4
PA
4773 }
4774
0bfdf32f 4775 current_thread = saved_thread;
d50171e4
PA
4776
4777 if (debug_threads)
87ce2a04
DE
4778 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4779 " at 0x%s\n",
d86d4aaf 4780 lwpid_of (thread), paddress (pc));
c6ecbae5 4781
eca55aec 4782 return false;
5544ad89
DJ
4783}
4784
d50171e4
PA
4785/* Start a step-over operation on LWP. When LWP stopped at a
4786 breakpoint, to make progress, we need to remove the breakpoint out
4787 of the way. If we let other threads run while we do that, they may
4788 pass by the breakpoint location and miss hitting it. To avoid
4789 that, a step-over momentarily stops all threads while LWP is
c40c8d4b
YQ
4790 single-stepped by either hardware or software while the breakpoint
4791 is temporarily uninserted from the inferior. When the single-step
4792 finishes, we reinsert the breakpoint, and let all threads that are
4793 supposed to be running, run again. */
d50171e4
PA
4794
4795static int
4796start_step_over (struct lwp_info *lwp)
4797{
d86d4aaf 4798 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4799 struct thread_info *saved_thread;
d50171e4
PA
4800 CORE_ADDR pc;
4801 int step;
4802
4803 if (debug_threads)
87ce2a04 4804 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4805 lwpid_of (thread));
d50171e4 4806
7984d532 4807 stop_all_lwps (1, lwp);
863d01bd
PA
4808
4809 if (lwp->suspended != 0)
4810 {
4811 internal_error (__FILE__, __LINE__,
4812 "LWP %ld suspended=%d\n", lwpid_of (thread),
4813 lwp->suspended);
4814 }
d50171e4
PA
4815
4816 if (debug_threads)
87ce2a04 4817 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4818
4819 /* Note, we should always reach here with an already adjusted PC,
4820 either by GDB (if we're resuming due to GDB's request), or by our
4821 caller, if we just finished handling an internal breakpoint GDB
4822 shouldn't care about. */
4823 pc = get_pc (lwp);
4824
0bfdf32f
GB
4825 saved_thread = current_thread;
4826 current_thread = thread;
d50171e4
PA
4827
4828 lwp->bp_reinsert = pc;
4829 uninsert_breakpoints_at (pc);
fa593d66 4830 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4 4831
7fe5e27e 4832 step = single_step (lwp);
d50171e4 4833
0bfdf32f 4834 current_thread = saved_thread;
d50171e4
PA
4835
4836 linux_resume_one_lwp (lwp, step, 0, NULL);
4837
4838 /* Require next event from this LWP. */
9c80ecd6 4839 step_over_bkpt = thread->id;
d50171e4
PA
4840 return 1;
4841}
4842
4843/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3b9a79ef 4844 start_step_over, if still there, and delete any single-step
d50171e4
PA
4845 breakpoints we've set, on non hardware single-step targets. */
4846
4847static int
4848finish_step_over (struct lwp_info *lwp)
4849{
4850 if (lwp->bp_reinsert != 0)
4851 {
f79b145d
YQ
4852 struct thread_info *saved_thread = current_thread;
4853
d50171e4 4854 if (debug_threads)
87ce2a04 4855 debug_printf ("Finished step over.\n");
d50171e4 4856
f79b145d
YQ
4857 current_thread = get_lwp_thread (lwp);
4858
d50171e4
PA
4859 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4860 may be no breakpoint to reinsert there by now. */
4861 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4862 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4863
4864 lwp->bp_reinsert = 0;
4865
3b9a79ef
YQ
4866 /* Delete any single-step breakpoints. No longer needed. We
4867 don't have to worry about other threads hitting this trap,
4868 and later not being able to explain it, because we were
4869 stepping over a breakpoint, and we hold all threads but
4870 LWP stopped while doing that. */
d50171e4 4871 if (!can_hardware_single_step ())
f79b145d 4872 {
3b9a79ef
YQ
4873 gdb_assert (has_single_step_breakpoints (current_thread));
4874 delete_single_step_breakpoints (current_thread);
f79b145d 4875 }
d50171e4
PA
4876
4877 step_over_bkpt = null_ptid;
f79b145d 4878 current_thread = saved_thread;
d50171e4
PA
4879 return 1;
4880 }
4881 else
4882 return 0;
4883}
4884
863d01bd
PA
4885/* If there's a step over in progress, wait until all threads stop
4886 (that is, until the stepping thread finishes its step), and
4887 unsuspend all lwps. The stepping thread ends with its status
4888 pending, which is processed later when we get back to processing
4889 events. */
4890
4891static void
4892complete_ongoing_step_over (void)
4893{
d7e15655 4894 if (step_over_bkpt != null_ptid)
863d01bd
PA
4895 {
4896 struct lwp_info *lwp;
4897 int wstat;
4898 int ret;
4899
4900 if (debug_threads)
4901 debug_printf ("detach: step over in progress, finish it first\n");
4902
4903 /* Passing NULL_PTID as filter indicates we want all events to
4904 be left pending. Eventually this returns when there are no
4905 unwaited-for children left. */
4906 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4907 &wstat, __WALL);
4908 gdb_assert (ret == -1);
4909
4910 lwp = find_lwp_pid (step_over_bkpt);
4911 if (lwp != NULL)
4912 finish_step_over (lwp);
4913 step_over_bkpt = null_ptid;
4914 unsuspend_all_lwps (lwp);
4915 }
4916}
4917
5544ad89
DJ
4918/* This function is called once per thread. We check the thread's resume
4919 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4920 stopped; and what signal, if any, it should be sent.
5544ad89 4921
bd99dc85
PA
4922 For threads which we aren't explicitly told otherwise, we preserve
4923 the stepping flag; this is used for stepping over gdbserver-placed
4924 breakpoints.
4925
4926 If pending_flags was set in any thread, we queue any needed
4927 signals, since we won't actually resume. We already have a pending
4928 event to report, so we don't need to preserve any step requests;
4929 they should be re-issued if necessary. */
4930
c80825ff
SM
4931static void
4932linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
5544ad89 4933{
d86d4aaf 4934 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4 4935 int leave_pending;
5544ad89 4936
2bd7c093 4937 if (lwp->resume == NULL)
c80825ff 4938 return;
5544ad89 4939
bd99dc85 4940 if (lwp->resume->kind == resume_stop)
5544ad89 4941 {
bd99dc85 4942 if (debug_threads)
d86d4aaf 4943 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4944
4945 if (!lwp->stopped)
4946 {
4947 if (debug_threads)
d86d4aaf 4948 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4949
d50171e4
PA
4950 /* Stop the thread, and wait for the event asynchronously,
4951 through the event loop. */
02fc4de7 4952 send_sigstop (lwp);
bd99dc85
PA
4953 }
4954 else
4955 {
4956 if (debug_threads)
87ce2a04 4957 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4958 lwpid_of (thread));
d50171e4
PA
4959
4960 /* The LWP may have been stopped in an internal event that
4961 was not meant to be notified back to GDB (e.g., gdbserver
4962 breakpoint), so we should be reporting a stop event in
4963 this case too. */
4964
4965 /* If the thread already has a pending SIGSTOP, this is a
4966 no-op. Otherwise, something later will presumably resume
4967 the thread and this will cause it to cancel any pending
4968 operation, due to last_resume_kind == resume_stop. If
4969 the thread already has a pending status to report, we
4970 will still report it the next time we wait - see
4971 status_pending_p_callback. */
1a981360
PA
4972
4973 /* If we already have a pending signal to report, then
4974 there's no need to queue a SIGSTOP, as this means we're
4975 midway through moving the LWP out of the jumppad, and we
4976 will report the pending signal as soon as that is
4977 finished. */
4978 if (lwp->pending_signals_to_report == NULL)
4979 send_sigstop (lwp);
bd99dc85 4980 }
32ca6d61 4981
bd99dc85
PA
4982 /* For stop requests, we're done. */
4983 lwp->resume = NULL;
fc7238bb 4984 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
c80825ff 4985 return;
5544ad89
DJ
4986 }
4987
bd99dc85 4988 /* If this thread which is about to be resumed has a pending status,
863d01bd
PA
4989 then don't resume it - we can just report the pending status.
4990 Likewise if it is suspended, because e.g., another thread is
4991 stepping past a breakpoint. Make sure to queue any signals that
4992 would otherwise be sent. In all-stop mode, we do this decision
4993 based on if *any* thread has a pending status. If there's a
4994 thread that needs the step-over-breakpoint dance, then don't
4995 resume any other thread but that particular one. */
4996 leave_pending = (lwp->suspended
4997 || lwp->status_pending_p
4998 || leave_all_stopped);
5544ad89 4999
0e9a339e
YQ
5000 /* If we have a new signal, enqueue the signal. */
5001 if (lwp->resume->sig != 0)
5002 {
5003 siginfo_t info, *info_p;
5004
5005 /* If this is the same signal we were previously stopped by,
5006 make sure to queue its siginfo. */
5007 if (WIFSTOPPED (lwp->last_status)
5008 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5009 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5010 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5011 info_p = &info;
5012 else
5013 info_p = NULL;
5014
5015 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5016 }
5017
d50171e4 5018 if (!leave_pending)
bd99dc85
PA
5019 {
5020 if (debug_threads)
d86d4aaf 5021 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 5022
9c80ecd6 5023 proceed_one_lwp (thread, NULL);
bd99dc85
PA
5024 }
5025 else
5026 {
5027 if (debug_threads)
d86d4aaf 5028 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
bd99dc85 5029 }
5544ad89 5030
fc7238bb 5031 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 5032 lwp->resume = NULL;
0d62e5e8
DJ
5033}
5034
5035static void
2bd7c093 5036linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 5037{
d86d4aaf 5038 struct thread_info *need_step_over = NULL;
c6ecbae5 5039
87ce2a04
DE
5040 if (debug_threads)
5041 {
5042 debug_enter ();
5043 debug_printf ("linux_resume:\n");
5044 }
5045
5fdda392
SM
5046 for_each_thread ([&] (thread_info *thread)
5047 {
5048 linux_set_resume_request (thread, resume_info, n);
5049 });
5544ad89 5050
d50171e4
PA
5051 /* If there is a thread which would otherwise be resumed, which has
5052 a pending status, then don't resume any threads - we can just
5053 report the pending status. Make sure to queue any signals that
5054 would otherwise be sent. In non-stop mode, we'll apply this
5055 logic to each thread individually. We consume all pending events
5056 before considering to start a step-over (in all-stop). */
25c28b4d 5057 bool any_pending = false;
bd99dc85 5058 if (!non_stop)
25c28b4d 5059 any_pending = find_thread (resume_status_pending_p) != NULL;
d50171e4
PA
5060
5061 /* If there is a thread which would otherwise be resumed, which is
5062 stopped at a breakpoint that needs stepping over, then don't
5063 resume any threads - have it step over the breakpoint with all
5064 other threads stopped, then resume all threads again. Make sure
5065 to queue any signals that would otherwise be delivered or
5066 queued. */
5067 if (!any_pending && supports_breakpoints ())
eca55aec 5068 need_step_over = find_thread (need_step_over_p);
d50171e4 5069
c80825ff 5070 bool leave_all_stopped = (need_step_over != NULL || any_pending);
d50171e4
PA
5071
5072 if (debug_threads)
5073 {
5074 if (need_step_over != NULL)
87ce2a04 5075 debug_printf ("Not resuming all, need step over\n");
d50171e4 5076 else if (any_pending)
87ce2a04
DE
5077 debug_printf ("Not resuming, all-stop and found "
5078 "an LWP with pending status\n");
d50171e4 5079 else
87ce2a04 5080 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
5081 }
5082
5083 /* Even if we're leaving threads stopped, queue all signals we'd
5084 otherwise deliver. */
c80825ff
SM
5085 for_each_thread ([&] (thread_info *thread)
5086 {
5087 linux_resume_one_thread (thread, leave_all_stopped);
5088 });
d50171e4
PA
5089
5090 if (need_step_over)
d86d4aaf 5091 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
5092
5093 if (debug_threads)
5094 {
5095 debug_printf ("linux_resume done\n");
5096 debug_exit ();
5097 }
1bebeeca
PA
5098
5099 /* We may have events that were pending that can/should be sent to
5100 the client now. Trigger a linux_wait call. */
5101 if (target_is_async_p ())
5102 async_file_mark ();
d50171e4
PA
5103}
5104
5105/* This function is called once per thread. We check the thread's
5106 last resume request, which will tell us whether to resume, step, or
5107 leave the thread stopped. Any signal the client requested to be
5108 delivered has already been enqueued at this point.
5109
5110 If any thread that GDB wants running is stopped at an internal
5111 breakpoint that needs stepping over, we start a step-over operation
5112 on that particular thread, and leave all others stopped. */
5113
e2b44075
SM
5114static void
5115proceed_one_lwp (thread_info *thread, lwp_info *except)
d50171e4 5116{
d86d4aaf 5117 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
5118 int step;
5119
7984d532 5120 if (lwp == except)
e2b44075 5121 return;
d50171e4
PA
5122
5123 if (debug_threads)
d86d4aaf 5124 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
5125
5126 if (!lwp->stopped)
5127 {
5128 if (debug_threads)
d86d4aaf 5129 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
e2b44075 5130 return;
d50171e4
PA
5131 }
5132
02fc4de7
PA
5133 if (thread->last_resume_kind == resume_stop
5134 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
5135 {
5136 if (debug_threads)
87ce2a04 5137 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 5138 lwpid_of (thread));
e2b44075 5139 return;
d50171e4
PA
5140 }
5141
5142 if (lwp->status_pending_p)
5143 {
5144 if (debug_threads)
87ce2a04 5145 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 5146 lwpid_of (thread));
e2b44075 5147 return;
d50171e4
PA
5148 }
5149
7984d532
PA
5150 gdb_assert (lwp->suspended >= 0);
5151
d50171e4
PA
5152 if (lwp->suspended)
5153 {
5154 if (debug_threads)
d86d4aaf 5155 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
e2b44075 5156 return;
d50171e4
PA
5157 }
5158
1a981360
PA
5159 if (thread->last_resume_kind == resume_stop
5160 && lwp->pending_signals_to_report == NULL
229d26fc
SM
5161 && (lwp->collecting_fast_tracepoint
5162 == fast_tpoint_collect_result::not_collecting))
02fc4de7
PA
5163 {
5164 /* We haven't reported this LWP as stopped yet (otherwise, the
5165 last_status.kind check above would catch it, and we wouldn't
5166 reach here. This LWP may have been momentarily paused by a
5167 stop_all_lwps call while handling for example, another LWP's
5168 step-over. In that case, the pending expected SIGSTOP signal
5169 that was queued at vCont;t handling time will have already
5170 been consumed by wait_for_sigstop, and so we need to requeue
5171 another one here. Note that if the LWP already has a SIGSTOP
5172 pending, this is a no-op. */
5173
5174 if (debug_threads)
87ce2a04
DE
5175 debug_printf ("Client wants LWP %ld to stop. "
5176 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 5177 lwpid_of (thread));
02fc4de7
PA
5178
5179 send_sigstop (lwp);
5180 }
5181
863d01bd
PA
5182 if (thread->last_resume_kind == resume_step)
5183 {
5184 if (debug_threads)
5185 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5186 lwpid_of (thread));
8901d193 5187
3b9a79ef 5188 /* If resume_step is requested by GDB, install single-step
8901d193 5189 breakpoints when the thread is about to be actually resumed if
3b9a79ef
YQ
5190 the single-step breakpoints weren't removed. */
5191 if (can_software_single_step ()
5192 && !has_single_step_breakpoints (thread))
8901d193
YQ
5193 install_software_single_step_breakpoints (lwp);
5194
5195 step = maybe_hw_step (thread);
863d01bd
PA
5196 }
5197 else if (lwp->bp_reinsert != 0)
5198 {
5199 if (debug_threads)
5200 debug_printf (" stepping LWP %ld, reinsert set\n",
5201 lwpid_of (thread));
f79b145d
YQ
5202
5203 step = maybe_hw_step (thread);
863d01bd
PA
5204 }
5205 else
5206 step = 0;
5207
d50171e4 5208 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
5209}
5210
e2b44075
SM
5211static void
5212unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
7984d532 5213{
d86d4aaf 5214 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
5215
5216 if (lwp == except)
e2b44075 5217 return;
7984d532 5218
863d01bd 5219 lwp_suspended_decr (lwp);
7984d532 5220
e2b44075 5221 proceed_one_lwp (thread, except);
d50171e4
PA
5222}
5223
5224/* When we finish a step-over, set threads running again. If there's
5225 another thread that may need a step-over, now's the time to start
5226 it. Eventually, we'll move all threads past their breakpoints. */
5227
5228static void
5229proceed_all_lwps (void)
5230{
d86d4aaf 5231 struct thread_info *need_step_over;
d50171e4
PA
5232
5233 /* If there is a thread which would otherwise be resumed, which is
5234 stopped at a breakpoint that needs stepping over, then don't
5235 resume any threads - have it step over the breakpoint with all
5236 other threads stopped, then resume all threads again. */
5237
5238 if (supports_breakpoints ())
5239 {
eca55aec 5240 need_step_over = find_thread (need_step_over_p);
d50171e4
PA
5241
5242 if (need_step_over != NULL)
5243 {
5244 if (debug_threads)
87ce2a04
DE
5245 debug_printf ("proceed_all_lwps: found "
5246 "thread %ld needing a step-over\n",
5247 lwpid_of (need_step_over));
d50171e4 5248
d86d4aaf 5249 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
5250 return;
5251 }
5252 }
5544ad89 5253
d50171e4 5254 if (debug_threads)
87ce2a04 5255 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 5256
e2b44075
SM
5257 for_each_thread ([] (thread_info *thread)
5258 {
5259 proceed_one_lwp (thread, NULL);
5260 });
d50171e4
PA
5261}
5262
5263/* Stopped LWPs that the client wanted to be running, that don't have
5264 pending statuses, are set to run again, except for EXCEPT, if not
5265 NULL. This undoes a stop_all_lwps call. */
5266
5267static void
7984d532 5268unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 5269{
5544ad89
DJ
5270 if (debug_threads)
5271 {
87ce2a04 5272 debug_enter ();
d50171e4 5273 if (except)
87ce2a04 5274 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 5275 lwpid_of (get_lwp_thread (except)));
5544ad89 5276 else
87ce2a04 5277 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
5278 }
5279
7984d532 5280 if (unsuspend)
e2b44075
SM
5281 for_each_thread ([&] (thread_info *thread)
5282 {
5283 unsuspend_and_proceed_one_lwp (thread, except);
5284 });
7984d532 5285 else
e2b44075
SM
5286 for_each_thread ([&] (thread_info *thread)
5287 {
5288 proceed_one_lwp (thread, except);
5289 });
87ce2a04
DE
5290
5291 if (debug_threads)
5292 {
5293 debug_printf ("unstop_all_lwps done\n");
5294 debug_exit ();
5295 }
0d62e5e8
DJ
5296}
5297
58caa3dc
DJ
5298
5299#ifdef HAVE_LINUX_REGSETS
5300
1faeff08
MR
5301#define use_linux_regsets 1
5302
030031ee
PA
5303/* Returns true if REGSET has been disabled. */
5304
5305static int
5306regset_disabled (struct regsets_info *info, struct regset_info *regset)
5307{
5308 return (info->disabled_regsets != NULL
5309 && info->disabled_regsets[regset - info->regsets]);
5310}
5311
5312/* Disable REGSET. */
5313
5314static void
5315disable_regset (struct regsets_info *info, struct regset_info *regset)
5316{
5317 int dr_offset;
5318
5319 dr_offset = regset - info->regsets;
5320 if (info->disabled_regsets == NULL)
224c3ddb 5321 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
030031ee
PA
5322 info->disabled_regsets[dr_offset] = 1;
5323}
5324
58caa3dc 5325static int
3aee8918
PA
5326regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5327 struct regcache *regcache)
58caa3dc
DJ
5328{
5329 struct regset_info *regset;
e9d25b98 5330 int saw_general_regs = 0;
95954743 5331 int pid;
1570b33e 5332 struct iovec iov;
58caa3dc 5333
0bfdf32f 5334 pid = lwpid_of (current_thread);
28eef672 5335 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5336 {
1570b33e
L
5337 void *buf, *data;
5338 int nt_type, res;
58caa3dc 5339
030031ee 5340 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 5341 continue;
58caa3dc 5342
bca929d3 5343 buf = xmalloc (regset->size);
1570b33e
L
5344
5345 nt_type = regset->nt_type;
5346 if (nt_type)
5347 {
5348 iov.iov_base = buf;
5349 iov.iov_len = regset->size;
5350 data = (void *) &iov;
5351 }
5352 else
5353 data = buf;
5354
dfb64f85 5355#ifndef __sparc__
f15f9948 5356 res = ptrace (regset->get_request, pid,
b8e1b30e 5357 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5358#else
1570b33e 5359 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5360#endif
58caa3dc
DJ
5361 if (res < 0)
5362 {
1ef53e6b
AH
5363 if (errno == EIO
5364 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5365 {
1ef53e6b
AH
5366 /* If we get EIO on a regset, or an EINVAL and the regset is
5367 optional, do not try it again for this process mode. */
030031ee 5368 disable_regset (regsets_info, regset);
58caa3dc 5369 }
e5a9158d
AA
5370 else if (errno == ENODATA)
5371 {
5372 /* ENODATA may be returned if the regset is currently
5373 not "active". This can happen in normal operation,
5374 so suppress the warning in this case. */
5375 }
fcd4a73d
YQ
5376 else if (errno == ESRCH)
5377 {
5378 /* At this point, ESRCH should mean the process is
5379 already gone, in which case we simply ignore attempts
5380 to read its registers. */
5381 }
58caa3dc
DJ
5382 else
5383 {
0d62e5e8 5384 char s[256];
95954743
PA
5385 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5386 pid);
0d62e5e8 5387 perror (s);
58caa3dc
DJ
5388 }
5389 }
098dbe61
AA
5390 else
5391 {
5392 if (regset->type == GENERAL_REGS)
5393 saw_general_regs = 1;
5394 regset->store_function (regcache, buf);
5395 }
fdeb2a12 5396 free (buf);
58caa3dc 5397 }
e9d25b98
DJ
5398 if (saw_general_regs)
5399 return 0;
5400 else
5401 return 1;
58caa3dc
DJ
5402}
5403
5404static int
3aee8918
PA
5405regsets_store_inferior_registers (struct regsets_info *regsets_info,
5406 struct regcache *regcache)
58caa3dc
DJ
5407{
5408 struct regset_info *regset;
e9d25b98 5409 int saw_general_regs = 0;
95954743 5410 int pid;
1570b33e 5411 struct iovec iov;
58caa3dc 5412
0bfdf32f 5413 pid = lwpid_of (current_thread);
28eef672 5414 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 5415 {
1570b33e
L
5416 void *buf, *data;
5417 int nt_type, res;
58caa3dc 5418
feea5f36
AA
5419 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5420 || regset->fill_function == NULL)
28eef672 5421 continue;
58caa3dc 5422
bca929d3 5423 buf = xmalloc (regset->size);
545587ee
DJ
5424
5425 /* First fill the buffer with the current register set contents,
5426 in case there are any items in the kernel's regset that are
5427 not in gdbserver's regcache. */
1570b33e
L
5428
5429 nt_type = regset->nt_type;
5430 if (nt_type)
5431 {
5432 iov.iov_base = buf;
5433 iov.iov_len = regset->size;
5434 data = (void *) &iov;
5435 }
5436 else
5437 data = buf;
5438
dfb64f85 5439#ifndef __sparc__
f15f9948 5440 res = ptrace (regset->get_request, pid,
b8e1b30e 5441 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5442#else
689cc2ae 5443 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 5444#endif
545587ee
DJ
5445
5446 if (res == 0)
5447 {
5448 /* Then overlay our cached registers on that. */
442ea881 5449 regset->fill_function (regcache, buf);
545587ee
DJ
5450
5451 /* Only now do we write the register set. */
dfb64f85 5452#ifndef __sparc__
f15f9948 5453 res = ptrace (regset->set_request, pid,
b8e1b30e 5454 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 5455#else
1570b33e 5456 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 5457#endif
545587ee
DJ
5458 }
5459
58caa3dc
DJ
5460 if (res < 0)
5461 {
1ef53e6b
AH
5462 if (errno == EIO
5463 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
58caa3dc 5464 {
1ef53e6b
AH
5465 /* If we get EIO on a regset, or an EINVAL and the regset is
5466 optional, do not try it again for this process mode. */
030031ee 5467 disable_regset (regsets_info, regset);
58caa3dc 5468 }
3221518c
UW
5469 else if (errno == ESRCH)
5470 {
1b3f6016
PA
5471 /* At this point, ESRCH should mean the process is
5472 already gone, in which case we simply ignore attempts
5473 to change its registers. See also the related
5474 comment in linux_resume_one_lwp. */
fdeb2a12 5475 free (buf);
3221518c
UW
5476 return 0;
5477 }
58caa3dc
DJ
5478 else
5479 {
ce3a066d 5480 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
5481 }
5482 }
e9d25b98
DJ
5483 else if (regset->type == GENERAL_REGS)
5484 saw_general_regs = 1;
09ec9b38 5485 free (buf);
58caa3dc 5486 }
e9d25b98
DJ
5487 if (saw_general_regs)
5488 return 0;
5489 else
5490 return 1;
58caa3dc
DJ
5491}
5492
1faeff08 5493#else /* !HAVE_LINUX_REGSETS */
58caa3dc 5494
1faeff08 5495#define use_linux_regsets 0
3aee8918
PA
5496#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5497#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 5498
58caa3dc 5499#endif
1faeff08
MR
5500
5501/* Return 1 if register REGNO is supported by one of the regset ptrace
5502 calls or 0 if it has to be transferred individually. */
5503
5504static int
3aee8918 5505linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
5506{
5507 unsigned char mask = 1 << (regno % 8);
5508 size_t index = regno / 8;
5509
5510 return (use_linux_regsets
3aee8918
PA
5511 && (regs_info->regset_bitmap == NULL
5512 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
5513}
5514
58caa3dc 5515#ifdef HAVE_LINUX_USRREGS
1faeff08 5516
5b3da067 5517static int
3aee8918 5518register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
5519{
5520 int addr;
5521
3aee8918 5522 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
5523 error ("Invalid register number %d.", regnum);
5524
3aee8918 5525 addr = usrregs->regmap[regnum];
1faeff08
MR
5526
5527 return addr;
5528}
5529
5530/* Fetch one register. */
5531static void
3aee8918
PA
5532fetch_register (const struct usrregs_info *usrregs,
5533 struct regcache *regcache, int regno)
1faeff08
MR
5534{
5535 CORE_ADDR regaddr;
5536 int i, size;
5537 char *buf;
5538 int pid;
5539
3aee8918 5540 if (regno >= usrregs->num_regs)
1faeff08
MR
5541 return;
5542 if ((*the_low_target.cannot_fetch_register) (regno))
5543 return;
5544
3aee8918 5545 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5546 if (regaddr == -1)
5547 return;
5548
3aee8918
PA
5549 size = ((register_size (regcache->tdesc, regno)
5550 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5551 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5552 buf = (char *) alloca (size);
1faeff08 5553
0bfdf32f 5554 pid = lwpid_of (current_thread);
1faeff08
MR
5555 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5556 {
5557 errno = 0;
5558 *(PTRACE_XFER_TYPE *) (buf + i) =
5559 ptrace (PTRACE_PEEKUSER, pid,
5560 /* Coerce to a uintptr_t first to avoid potential gcc warning
5561 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5562 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
5563 regaddr += sizeof (PTRACE_XFER_TYPE);
5564 if (errno != 0)
9a70f35c
YQ
5565 {
5566 /* Mark register REGNO unavailable. */
5567 supply_register (regcache, regno, NULL);
5568 return;
5569 }
1faeff08
MR
5570 }
5571
5572 if (the_low_target.supply_ptrace_register)
5573 the_low_target.supply_ptrace_register (regcache, regno, buf);
5574 else
5575 supply_register (regcache, regno, buf);
5576}
5577
5578/* Store one register. */
5579static void
3aee8918
PA
5580store_register (const struct usrregs_info *usrregs,
5581 struct regcache *regcache, int regno)
1faeff08
MR
5582{
5583 CORE_ADDR regaddr;
5584 int i, size;
5585 char *buf;
5586 int pid;
5587
3aee8918 5588 if (regno >= usrregs->num_regs)
1faeff08
MR
5589 return;
5590 if ((*the_low_target.cannot_store_register) (regno))
5591 return;
5592
3aee8918 5593 regaddr = register_addr (usrregs, regno);
1faeff08
MR
5594 if (regaddr == -1)
5595 return;
5596
3aee8918
PA
5597 size = ((register_size (regcache->tdesc, regno)
5598 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08 5599 & -sizeof (PTRACE_XFER_TYPE));
224c3ddb 5600 buf = (char *) alloca (size);
1faeff08
MR
5601 memset (buf, 0, size);
5602
5603 if (the_low_target.collect_ptrace_register)
5604 the_low_target.collect_ptrace_register (regcache, regno, buf);
5605 else
5606 collect_register (regcache, regno, buf);
5607
0bfdf32f 5608 pid = lwpid_of (current_thread);
1faeff08
MR
5609 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5610 {
5611 errno = 0;
5612 ptrace (PTRACE_POKEUSER, pid,
5613 /* Coerce to a uintptr_t first to avoid potential gcc warning
5614 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5615 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5616 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
5617 if (errno != 0)
5618 {
5619 /* At this point, ESRCH should mean the process is
5620 already gone, in which case we simply ignore attempts
5621 to change its registers. See also the related
5622 comment in linux_resume_one_lwp. */
5623 if (errno == ESRCH)
5624 return;
5625
5626 if ((*the_low_target.cannot_store_register) (regno) == 0)
5627 error ("writing register %d: %s", regno, strerror (errno));
5628 }
5629 regaddr += sizeof (PTRACE_XFER_TYPE);
5630 }
5631}
5632
5633/* Fetch all registers, or just one, from the child process.
5634 If REGNO is -1, do this for all registers, skipping any that are
5635 assumed to have been retrieved by regsets_fetch_inferior_registers,
5636 unless ALL is non-zero.
5637 Otherwise, REGNO specifies which register (so we can save time). */
5638static void
3aee8918
PA
5639usr_fetch_inferior_registers (const struct regs_info *regs_info,
5640 struct regcache *regcache, int regno, int all)
1faeff08 5641{
3aee8918
PA
5642 struct usrregs_info *usr = regs_info->usrregs;
5643
1faeff08
MR
5644 if (regno == -1)
5645 {
3aee8918
PA
5646 for (regno = 0; regno < usr->num_regs; regno++)
5647 if (all || !linux_register_in_regsets (regs_info, regno))
5648 fetch_register (usr, regcache, regno);
1faeff08
MR
5649 }
5650 else
3aee8918 5651 fetch_register (usr, regcache, regno);
1faeff08
MR
5652}
5653
5654/* Store our register values back into the inferior.
5655 If REGNO is -1, do this for all registers, skipping any that are
5656 assumed to have been saved by regsets_store_inferior_registers,
5657 unless ALL is non-zero.
5658 Otherwise, REGNO specifies which register (so we can save time). */
5659static void
3aee8918
PA
5660usr_store_inferior_registers (const struct regs_info *regs_info,
5661 struct regcache *regcache, int regno, int all)
1faeff08 5662{
3aee8918
PA
5663 struct usrregs_info *usr = regs_info->usrregs;
5664
1faeff08
MR
5665 if (regno == -1)
5666 {
3aee8918
PA
5667 for (regno = 0; regno < usr->num_regs; regno++)
5668 if (all || !linux_register_in_regsets (regs_info, regno))
5669 store_register (usr, regcache, regno);
1faeff08
MR
5670 }
5671 else
3aee8918 5672 store_register (usr, regcache, regno);
1faeff08
MR
5673}
5674
5675#else /* !HAVE_LINUX_USRREGS */
5676
3aee8918
PA
5677#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5678#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 5679
58caa3dc 5680#endif
1faeff08
MR
5681
5682
5b3da067 5683static void
1faeff08
MR
5684linux_fetch_registers (struct regcache *regcache, int regno)
5685{
5686 int use_regsets;
5687 int all = 0;
3aee8918 5688 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5689
5690 if (regno == -1)
5691 {
3aee8918
PA
5692 if (the_low_target.fetch_register != NULL
5693 && regs_info->usrregs != NULL)
5694 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
5695 (*the_low_target.fetch_register) (regcache, regno);
5696
3aee8918
PA
5697 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5698 if (regs_info->usrregs != NULL)
5699 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
5700 }
5701 else
5702 {
c14dfd32
PA
5703 if (the_low_target.fetch_register != NULL
5704 && (*the_low_target.fetch_register) (regcache, regno))
5705 return;
5706
3aee8918 5707 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5708 if (use_regsets)
3aee8918
PA
5709 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5710 regcache);
5711 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5712 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5713 }
58caa3dc
DJ
5714}
5715
5b3da067 5716static void
442ea881 5717linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 5718{
1faeff08
MR
5719 int use_regsets;
5720 int all = 0;
3aee8918 5721 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
5722
5723 if (regno == -1)
5724 {
3aee8918
PA
5725 all = regsets_store_inferior_registers (regs_info->regsets_info,
5726 regcache);
5727 if (regs_info->usrregs != NULL)
5728 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
5729 }
5730 else
5731 {
3aee8918 5732 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5733 if (use_regsets)
3aee8918
PA
5734 all = regsets_store_inferior_registers (regs_info->regsets_info,
5735 regcache);
5736 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5737 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5738 }
58caa3dc
DJ
5739}
5740
da6d8c04 5741
da6d8c04
DJ
5742/* Copy LEN bytes from inferior's memory starting at MEMADDR
5743 to debugger memory starting at MYADDR. */
5744
c3e735a6 5745static int
f450004a 5746linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5747{
0bfdf32f 5748 int pid = lwpid_of (current_thread);
ae3e2ccf
SM
5749 PTRACE_XFER_TYPE *buffer;
5750 CORE_ADDR addr;
5751 int count;
4934b29e 5752 char filename[64];
ae3e2ccf 5753 int i;
4934b29e 5754 int ret;
fd462a61 5755 int fd;
fd462a61
DJ
5756
5757 /* Try using /proc. Don't bother for one word. */
5758 if (len >= 3 * sizeof (long))
5759 {
4934b29e
MR
5760 int bytes;
5761
fd462a61
DJ
5762 /* We could keep this file open and cache it - possibly one per
5763 thread. That requires some juggling, but is even faster. */
95954743 5764 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5765 fd = open (filename, O_RDONLY | O_LARGEFILE);
5766 if (fd == -1)
5767 goto no_proc;
5768
5769 /* If pread64 is available, use it. It's faster if the kernel
5770 supports it (only one syscall), and it's 64-bit safe even on
5771 32-bit platforms (for instance, SPARC debugging a SPARC64
5772 application). */
5773#ifdef HAVE_PREAD64
4934b29e 5774 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5775#else
4934b29e
MR
5776 bytes = -1;
5777 if (lseek (fd, memaddr, SEEK_SET) != -1)
5778 bytes = read (fd, myaddr, len);
fd462a61 5779#endif
fd462a61
DJ
5780
5781 close (fd);
4934b29e
MR
5782 if (bytes == len)
5783 return 0;
5784
5785 /* Some data was read, we'll try to get the rest with ptrace. */
5786 if (bytes > 0)
5787 {
5788 memaddr += bytes;
5789 myaddr += bytes;
5790 len -= bytes;
5791 }
fd462a61 5792 }
da6d8c04 5793
fd462a61 5794 no_proc:
4934b29e
MR
5795 /* Round starting address down to longword boundary. */
5796 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5797 /* Round ending address up; get number of longwords that makes. */
5798 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5799 / sizeof (PTRACE_XFER_TYPE));
5800 /* Allocate buffer of that many longwords. */
8d749320 5801 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
4934b29e 5802
da6d8c04 5803 /* Read all the longwords */
4934b29e 5804 errno = 0;
da6d8c04
DJ
5805 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5806 {
14ce3065
DE
5807 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5808 about coercing an 8 byte integer to a 4 byte pointer. */
5809 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5810 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5811 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5812 if (errno)
4934b29e 5813 break;
da6d8c04 5814 }
4934b29e 5815 ret = errno;
da6d8c04
DJ
5816
5817 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5818 if (i > 0)
5819 {
5820 i *= sizeof (PTRACE_XFER_TYPE);
5821 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5822 memcpy (myaddr,
5823 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5824 i < len ? i : len);
5825 }
c3e735a6 5826
4934b29e 5827 return ret;
da6d8c04
DJ
5828}
5829
93ae6fdc
PA
5830/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5831 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5832 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5833
ce3a066d 5834static int
f450004a 5835linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04 5836{
ae3e2ccf 5837 int i;
da6d8c04 5838 /* Round starting address down to longword boundary. */
ae3e2ccf 5839 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
da6d8c04 5840 /* Round ending address up; get number of longwords that makes. */
ae3e2ccf 5841 int count
493e2a69
MS
5842 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5843 / sizeof (PTRACE_XFER_TYPE);
5844
da6d8c04 5845 /* Allocate buffer of that many longwords. */
ae3e2ccf 5846 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
493e2a69 5847
0bfdf32f 5848 int pid = lwpid_of (current_thread);
da6d8c04 5849
f0ae6fc3
PA
5850 if (len == 0)
5851 {
5852 /* Zero length write always succeeds. */
5853 return 0;
5854 }
5855
0d62e5e8
DJ
5856 if (debug_threads)
5857 {
58d6951d 5858 /* Dump up to four bytes. */
bf47e248
PA
5859 char str[4 * 2 + 1];
5860 char *p = str;
5861 int dump = len < 4 ? len : 4;
5862
5863 for (i = 0; i < dump; i++)
5864 {
5865 sprintf (p, "%02x", myaddr[i]);
5866 p += 2;
5867 }
5868 *p = '\0';
5869
5870 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5871 str, (long) memaddr, pid);
0d62e5e8
DJ
5872 }
5873
da6d8c04
DJ
5874 /* Fill start and end extra bytes of buffer with existing memory data. */
5875
93ae6fdc 5876 errno = 0;
14ce3065
DE
5877 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5878 about coercing an 8 byte integer to a 4 byte pointer. */
5879 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5880 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5881 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5882 if (errno)
5883 return errno;
da6d8c04
DJ
5884
5885 if (count > 1)
5886 {
93ae6fdc 5887 errno = 0;
da6d8c04 5888 buffer[count - 1]
95954743 5889 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5890 /* Coerce to a uintptr_t first to avoid potential gcc warning
5891 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5892 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5893 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5894 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5895 if (errno)
5896 return errno;
da6d8c04
DJ
5897 }
5898
93ae6fdc 5899 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5900
493e2a69
MS
5901 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5902 myaddr, len);
da6d8c04
DJ
5903
5904 /* Write the entire buffer. */
5905
5906 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5907 {
5908 errno = 0;
14ce3065
DE
5909 ptrace (PTRACE_POKETEXT, pid,
5910 /* Coerce to a uintptr_t first to avoid potential gcc warning
5911 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5912 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5913 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5914 if (errno)
5915 return errno;
5916 }
5917
5918 return 0;
5919}
2f2893d9
DJ
5920
5921static void
5922linux_look_up_symbols (void)
5923{
0d62e5e8 5924#ifdef USE_THREAD_DB
95954743
PA
5925 struct process_info *proc = current_process ();
5926
fe978cb0 5927 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5928 return;
5929
9b4c5f87 5930 thread_db_init ();
0d62e5e8
DJ
5931#endif
5932}
5933
e5379b03 5934static void
ef57601b 5935linux_request_interrupt (void)
e5379b03 5936{
78708b7c
PA
5937 /* Send a SIGINT to the process group. This acts just like the user
5938 typed a ^C on the controlling terminal. */
5939 kill (-signal_pid, SIGINT);
e5379b03
DJ
5940}
5941
aa691b87
RM
5942/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5943 to debugger memory starting at MYADDR. */
5944
5945static int
f450004a 5946linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5947{
5948 char filename[PATH_MAX];
5949 int fd, n;
0bfdf32f 5950 int pid = lwpid_of (current_thread);
aa691b87 5951
6cebaf6e 5952 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5953
5954 fd = open (filename, O_RDONLY);
5955 if (fd < 0)
5956 return -1;
5957
5958 if (offset != (CORE_ADDR) 0
5959 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5960 n = -1;
5961 else
5962 n = read (fd, myaddr, len);
5963
5964 close (fd);
5965
5966 return n;
5967}
5968
d993e290
PA
5969/* These breakpoint and watchpoint related wrapper functions simply
5970 pass on the function call if the target has registered a
5971 corresponding function. */
e013ee27
OF
5972
5973static int
802e8e6d
PA
5974linux_supports_z_point_type (char z_type)
5975{
5976 return (the_low_target.supports_z_point_type != NULL
5977 && the_low_target.supports_z_point_type (z_type));
5978}
5979
5980static int
5981linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5982 int size, struct raw_breakpoint *bp)
e013ee27 5983{
c8f4bfdd
YQ
5984 if (type == raw_bkpt_type_sw)
5985 return insert_memory_breakpoint (bp);
5986 else if (the_low_target.insert_point != NULL)
802e8e6d 5987 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5988 else
5989 /* Unsupported (see target.h). */
5990 return 1;
5991}
5992
5993static int
802e8e6d
PA
5994linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5995 int size, struct raw_breakpoint *bp)
e013ee27 5996{
c8f4bfdd
YQ
5997 if (type == raw_bkpt_type_sw)
5998 return remove_memory_breakpoint (bp);
5999 else if (the_low_target.remove_point != NULL)
802e8e6d 6000 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
6001 else
6002 /* Unsupported (see target.h). */
6003 return 1;
6004}
6005
3e572f71
PA
6006/* Implement the to_stopped_by_sw_breakpoint target_ops
6007 method. */
6008
6009static int
6010linux_stopped_by_sw_breakpoint (void)
6011{
6012 struct lwp_info *lwp = get_thread_lwp (current_thread);
6013
6014 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6015}
6016
6017/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6018 method. */
6019
6020static int
6021linux_supports_stopped_by_sw_breakpoint (void)
6022{
6023 return USE_SIGTRAP_SIGINFO;
6024}
6025
6026/* Implement the to_stopped_by_hw_breakpoint target_ops
6027 method. */
6028
6029static int
6030linux_stopped_by_hw_breakpoint (void)
6031{
6032 struct lwp_info *lwp = get_thread_lwp (current_thread);
6033
6034 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6035}
6036
6037/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6038 method. */
6039
6040static int
6041linux_supports_stopped_by_hw_breakpoint (void)
6042{
6043 return USE_SIGTRAP_SIGINFO;
6044}
6045
70b90b91 6046/* Implement the supports_hardware_single_step target_ops method. */
45614f15
YQ
6047
6048static int
70b90b91 6049linux_supports_hardware_single_step (void)
45614f15 6050{
45614f15
YQ
6051 return can_hardware_single_step ();
6052}
6053
7d00775e
AT
6054static int
6055linux_supports_software_single_step (void)
6056{
6057 return can_software_single_step ();
6058}
6059
e013ee27
OF
6060static int
6061linux_stopped_by_watchpoint (void)
6062{
0bfdf32f 6063 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 6064
15c66dd6 6065 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
6066}
6067
6068static CORE_ADDR
6069linux_stopped_data_address (void)
6070{
0bfdf32f 6071 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
6072
6073 return lwp->stopped_data_address;
e013ee27
OF
6074}
6075
db0dfaa0
LM
6076#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6077 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6078 && defined(PT_TEXT_END_ADDR)
6079
6080/* This is only used for targets that define PT_TEXT_ADDR,
6081 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6082 the target has different ways of acquiring this information, like
6083 loadmaps. */
52fb6437
NS
6084
6085/* Under uClinux, programs are loaded at non-zero offsets, which we need
6086 to tell gdb about. */
6087
6088static int
6089linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6090{
52fb6437 6091 unsigned long text, text_end, data;
62828379 6092 int pid = lwpid_of (current_thread);
52fb6437
NS
6093
6094 errno = 0;
6095
b8e1b30e
LM
6096 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6097 (PTRACE_TYPE_ARG4) 0);
6098 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6099 (PTRACE_TYPE_ARG4) 0);
6100 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6101 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
6102
6103 if (errno == 0)
6104 {
6105 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
6106 used by gdb) are relative to the beginning of the program,
6107 with the data segment immediately following the text segment.
6108 However, the actual runtime layout in memory may put the data
6109 somewhere else, so when we send gdb a data base-address, we
6110 use the real data base address and subtract the compile-time
6111 data base-address from it (which is just the length of the
6112 text segment). BSS immediately follows data in both
6113 cases. */
52fb6437
NS
6114 *text_p = text;
6115 *data_p = data - (text_end - text);
1b3f6016 6116
52fb6437
NS
6117 return 1;
6118 }
52fb6437
NS
6119 return 0;
6120}
6121#endif
6122
07e059b5
VP
6123static int
6124linux_qxfer_osdata (const char *annex,
1b3f6016
PA
6125 unsigned char *readbuf, unsigned const char *writebuf,
6126 CORE_ADDR offset, int len)
07e059b5 6127{
d26e3629 6128 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
6129}
6130
d0722149
DE
6131/* Convert a native/host siginfo object, into/from the siginfo in the
6132 layout of the inferiors' architecture. */
6133
6134static void
8adce034 6135siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
d0722149
DE
6136{
6137 int done = 0;
6138
6139 if (the_low_target.siginfo_fixup != NULL)
6140 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6141
6142 /* If there was no callback, or the callback didn't do anything,
6143 then just do a straight memcpy. */
6144 if (!done)
6145 {
6146 if (direction == 1)
a5362b9a 6147 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 6148 else
a5362b9a 6149 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
6150 }
6151}
6152
4aa995e1
PA
6153static int
6154linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6155 unsigned const char *writebuf, CORE_ADDR offset, int len)
6156{
d0722149 6157 int pid;
a5362b9a 6158 siginfo_t siginfo;
8adce034 6159 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1 6160
0bfdf32f 6161 if (current_thread == NULL)
4aa995e1
PA
6162 return -1;
6163
0bfdf32f 6164 pid = lwpid_of (current_thread);
4aa995e1
PA
6165
6166 if (debug_threads)
87ce2a04
DE
6167 debug_printf ("%s siginfo for lwp %d.\n",
6168 readbuf != NULL ? "Reading" : "Writing",
6169 pid);
4aa995e1 6170
0adea5f7 6171 if (offset >= sizeof (siginfo))
4aa995e1
PA
6172 return -1;
6173
b8e1b30e 6174 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6175 return -1;
6176
d0722149
DE
6177 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6178 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6179 inferior with a 64-bit GDBSERVER should look the same as debugging it
6180 with a 32-bit GDBSERVER, we need to convert it. */
6181 siginfo_fixup (&siginfo, inf_siginfo, 0);
6182
4aa995e1
PA
6183 if (offset + len > sizeof (siginfo))
6184 len = sizeof (siginfo) - offset;
6185
6186 if (readbuf != NULL)
d0722149 6187 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
6188 else
6189 {
d0722149
DE
6190 memcpy (inf_siginfo + offset, writebuf, len);
6191
6192 /* Convert back to ptrace layout before flushing it out. */
6193 siginfo_fixup (&siginfo, inf_siginfo, 1);
6194
b8e1b30e 6195 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
6196 return -1;
6197 }
6198
6199 return len;
6200}
6201
bd99dc85
PA
6202/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6203 so we notice when children change state; as the handler for the
6204 sigsuspend in my_waitpid. */
6205
6206static void
6207sigchld_handler (int signo)
6208{
6209 int old_errno = errno;
6210
6211 if (debug_threads)
e581f2b4
PA
6212 {
6213 do
6214 {
6215 /* fprintf is not async-signal-safe, so call write
6216 directly. */
6217 if (write (2, "sigchld_handler\n",
6218 sizeof ("sigchld_handler\n") - 1) < 0)
6219 break; /* just ignore */
6220 } while (0);
6221 }
bd99dc85
PA
6222
6223 if (target_is_async_p ())
6224 async_file_mark (); /* trigger a linux_wait */
6225
6226 errno = old_errno;
6227}
6228
6229static int
6230linux_supports_non_stop (void)
6231{
6232 return 1;
6233}
6234
6235static int
6236linux_async (int enable)
6237{
7089dca4 6238 int previous = target_is_async_p ();
bd99dc85 6239
8336d594 6240 if (debug_threads)
87ce2a04
DE
6241 debug_printf ("linux_async (%d), previous=%d\n",
6242 enable, previous);
8336d594 6243
bd99dc85
PA
6244 if (previous != enable)
6245 {
6246 sigset_t mask;
6247 sigemptyset (&mask);
6248 sigaddset (&mask, SIGCHLD);
6249
6250 sigprocmask (SIG_BLOCK, &mask, NULL);
6251
6252 if (enable)
6253 {
6254 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
6255 {
6256 linux_event_pipe[0] = -1;
6257 linux_event_pipe[1] = -1;
6258 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6259
6260 warning ("creating event pipe failed.");
6261 return previous;
6262 }
bd99dc85
PA
6263
6264 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6265 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6266
6267 /* Register the event loop handler. */
6268 add_file_handler (linux_event_pipe[0],
6269 handle_target_event, NULL);
6270
6271 /* Always trigger a linux_wait. */
6272 async_file_mark ();
6273 }
6274 else
6275 {
6276 delete_file_handler (linux_event_pipe[0]);
6277
6278 close (linux_event_pipe[0]);
6279 close (linux_event_pipe[1]);
6280 linux_event_pipe[0] = -1;
6281 linux_event_pipe[1] = -1;
6282 }
6283
6284 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6285 }
6286
6287 return previous;
6288}
6289
6290static int
6291linux_start_non_stop (int nonstop)
6292{
6293 /* Register or unregister from event-loop accordingly. */
6294 linux_async (nonstop);
aa96c426
GB
6295
6296 if (target_is_async_p () != (nonstop != 0))
6297 return -1;
6298
bd99dc85
PA
6299 return 0;
6300}
6301
cf8fd78b
PA
6302static int
6303linux_supports_multi_process (void)
6304{
6305 return 1;
6306}
6307
89245bc0
DB
6308/* Check if fork events are supported. */
6309
6310static int
6311linux_supports_fork_events (void)
6312{
6313 return linux_supports_tracefork ();
6314}
6315
6316/* Check if vfork events are supported. */
6317
6318static int
6319linux_supports_vfork_events (void)
6320{
6321 return linux_supports_tracefork ();
6322}
6323
94585166
DB
6324/* Check if exec events are supported. */
6325
6326static int
6327linux_supports_exec_events (void)
6328{
6329 return linux_supports_traceexec ();
6330}
6331
de0d863e
DB
6332/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6333 ptrace flags for all inferiors. This is in case the new GDB connection
6334 doesn't support the same set of events that the previous one did. */
6335
6336static void
6337linux_handle_new_gdb_connection (void)
6338{
de0d863e 6339 /* Request that all the lwps reset their ptrace options. */
bbf550d5
SM
6340 for_each_thread ([] (thread_info *thread)
6341 {
6342 struct lwp_info *lwp = get_thread_lwp (thread);
6343
6344 if (!lwp->stopped)
6345 {
6346 /* Stop the lwp so we can modify its ptrace options. */
6347 lwp->must_set_ptrace_flags = 1;
6348 linux_stop_lwp (lwp);
6349 }
6350 else
6351 {
6352 /* Already stopped; go ahead and set the ptrace options. */
6353 struct process_info *proc = find_process_pid (pid_of (thread));
6354 int options = linux_low_ptrace_options (proc->attached);
6355
6356 linux_enable_event_reporting (lwpid_of (thread), options);
6357 lwp->must_set_ptrace_flags = 0;
6358 }
6359 });
de0d863e
DB
6360}
6361
03583c20
UW
6362static int
6363linux_supports_disable_randomization (void)
6364{
6365#ifdef HAVE_PERSONALITY
6366 return 1;
6367#else
6368 return 0;
6369#endif
6370}
efcbbd14 6371
d1feda86
YQ
6372static int
6373linux_supports_agent (void)
6374{
6375 return 1;
6376}
6377
c2d6af84
PA
6378static int
6379linux_supports_range_stepping (void)
6380{
c3805894
YQ
6381 if (can_software_single_step ())
6382 return 1;
c2d6af84
PA
6383 if (*the_low_target.supports_range_stepping == NULL)
6384 return 0;
6385
6386 return (*the_low_target.supports_range_stepping) ();
6387}
6388
efcbbd14
UW
6389/* Enumerate spufs IDs for process PID. */
6390static int
6391spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6392{
6393 int pos = 0;
6394 int written = 0;
6395 char path[128];
6396 DIR *dir;
6397 struct dirent *entry;
6398
6399 sprintf (path, "/proc/%ld/fd", pid);
6400 dir = opendir (path);
6401 if (!dir)
6402 return -1;
6403
6404 rewinddir (dir);
6405 while ((entry = readdir (dir)) != NULL)
6406 {
6407 struct stat st;
6408 struct statfs stfs;
6409 int fd;
6410
6411 fd = atoi (entry->d_name);
6412 if (!fd)
6413 continue;
6414
6415 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6416 if (stat (path, &st) != 0)
6417 continue;
6418 if (!S_ISDIR (st.st_mode))
6419 continue;
6420
6421 if (statfs (path, &stfs) != 0)
6422 continue;
6423 if (stfs.f_type != SPUFS_MAGIC)
6424 continue;
6425
6426 if (pos >= offset && pos + 4 <= offset + len)
6427 {
6428 *(unsigned int *)(buf + pos - offset) = fd;
6429 written += 4;
6430 }
6431 pos += 4;
6432 }
6433
6434 closedir (dir);
6435 return written;
6436}
6437
6438/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6439 object type, using the /proc file system. */
6440static int
6441linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6442 unsigned const char *writebuf,
6443 CORE_ADDR offset, int len)
6444{
0bfdf32f 6445 long pid = lwpid_of (current_thread);
efcbbd14
UW
6446 char buf[128];
6447 int fd = 0;
6448 int ret = 0;
6449
6450 if (!writebuf && !readbuf)
6451 return -1;
6452
6453 if (!*annex)
6454 {
6455 if (!readbuf)
6456 return -1;
6457 else
6458 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6459 }
6460
6461 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6462 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6463 if (fd <= 0)
6464 return -1;
6465
6466 if (offset != 0
6467 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6468 {
6469 close (fd);
6470 return 0;
6471 }
6472
6473 if (writebuf)
6474 ret = write (fd, writebuf, (size_t) len);
6475 else
6476 ret = read (fd, readbuf, (size_t) len);
6477
6478 close (fd);
6479 return ret;
6480}
6481
723b724b 6482#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
6483struct target_loadseg
6484{
6485 /* Core address to which the segment is mapped. */
6486 Elf32_Addr addr;
6487 /* VMA recorded in the program header. */
6488 Elf32_Addr p_vaddr;
6489 /* Size of this segment in memory. */
6490 Elf32_Word p_memsz;
6491};
6492
723b724b 6493# if defined PT_GETDSBT
78d85199
YQ
6494struct target_loadmap
6495{
6496 /* Protocol version number, must be zero. */
6497 Elf32_Word version;
6498 /* Pointer to the DSBT table, its size, and the DSBT index. */
6499 unsigned *dsbt_table;
6500 unsigned dsbt_size, dsbt_index;
6501 /* Number of segments in this map. */
6502 Elf32_Word nsegs;
6503 /* The actual memory map. */
6504 struct target_loadseg segs[/*nsegs*/];
6505};
723b724b
MF
6506# define LINUX_LOADMAP PT_GETDSBT
6507# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6508# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6509# else
6510struct target_loadmap
6511{
6512 /* Protocol version number, must be zero. */
6513 Elf32_Half version;
6514 /* Number of segments in this map. */
6515 Elf32_Half nsegs;
6516 /* The actual memory map. */
6517 struct target_loadseg segs[/*nsegs*/];
6518};
6519# define LINUX_LOADMAP PTRACE_GETFDPIC
6520# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6521# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6522# endif
78d85199 6523
78d85199
YQ
6524static int
6525linux_read_loadmap (const char *annex, CORE_ADDR offset,
6526 unsigned char *myaddr, unsigned int len)
6527{
0bfdf32f 6528 int pid = lwpid_of (current_thread);
78d85199
YQ
6529 int addr = -1;
6530 struct target_loadmap *data = NULL;
6531 unsigned int actual_length, copy_length;
6532
6533 if (strcmp (annex, "exec") == 0)
723b724b 6534 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 6535 else if (strcmp (annex, "interp") == 0)
723b724b 6536 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
6537 else
6538 return -1;
6539
723b724b 6540 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
6541 return -1;
6542
6543 if (data == NULL)
6544 return -1;
6545
6546 actual_length = sizeof (struct target_loadmap)
6547 + sizeof (struct target_loadseg) * data->nsegs;
6548
6549 if (offset < 0 || offset > actual_length)
6550 return -1;
6551
6552 copy_length = actual_length - offset < len ? actual_length - offset : len;
6553 memcpy (myaddr, (char *) data + offset, copy_length);
6554 return copy_length;
6555}
723b724b
MF
6556#else
6557# define linux_read_loadmap NULL
6558#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 6559
1570b33e 6560static void
06e03fff 6561linux_process_qsupported (char **features, int count)
1570b33e
L
6562{
6563 if (the_low_target.process_qsupported != NULL)
06e03fff 6564 the_low_target.process_qsupported (features, count);
1570b33e
L
6565}
6566
82075af2
JS
6567static int
6568linux_supports_catch_syscall (void)
6569{
6570 return (the_low_target.get_syscall_trapinfo != NULL
6571 && linux_supports_tracesysgood ());
6572}
6573
ae91f625
MK
6574static int
6575linux_get_ipa_tdesc_idx (void)
6576{
6577 if (the_low_target.get_ipa_tdesc_idx == NULL)
6578 return 0;
6579
6580 return (*the_low_target.get_ipa_tdesc_idx) ();
6581}
6582
219f2f23
PA
6583static int
6584linux_supports_tracepoints (void)
6585{
6586 if (*the_low_target.supports_tracepoints == NULL)
6587 return 0;
6588
6589 return (*the_low_target.supports_tracepoints) ();
6590}
6591
6592static CORE_ADDR
6593linux_read_pc (struct regcache *regcache)
6594{
6595 if (the_low_target.get_pc == NULL)
6596 return 0;
6597
6598 return (*the_low_target.get_pc) (regcache);
6599}
6600
6601static void
6602linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6603{
6604 gdb_assert (the_low_target.set_pc != NULL);
6605
6606 (*the_low_target.set_pc) (regcache, pc);
6607}
6608
8336d594
PA
6609static int
6610linux_thread_stopped (struct thread_info *thread)
6611{
6612 return get_thread_lwp (thread)->stopped;
6613}
6614
6615/* This exposes stop-all-threads functionality to other modules. */
6616
6617static void
7984d532 6618linux_pause_all (int freeze)
8336d594 6619{
7984d532
PA
6620 stop_all_lwps (freeze, NULL);
6621}
6622
6623/* This exposes unstop-all-threads functionality to other gdbserver
6624 modules. */
6625
6626static void
6627linux_unpause_all (int unfreeze)
6628{
6629 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
6630}
6631
90d74c30
PA
6632static int
6633linux_prepare_to_access_memory (void)
6634{
6635 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6636 running LWP. */
6637 if (non_stop)
6638 linux_pause_all (1);
6639 return 0;
6640}
6641
6642static void
0146f85b 6643linux_done_accessing_memory (void)
90d74c30
PA
6644{
6645 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6646 running LWP. */
6647 if (non_stop)
6648 linux_unpause_all (1);
6649}
6650
fa593d66
PA
6651static int
6652linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6653 CORE_ADDR collector,
6654 CORE_ADDR lockaddr,
6655 ULONGEST orig_size,
6656 CORE_ADDR *jump_entry,
405f8e94
SS
6657 CORE_ADDR *trampoline,
6658 ULONGEST *trampoline_size,
fa593d66
PA
6659 unsigned char *jjump_pad_insn,
6660 ULONGEST *jjump_pad_insn_size,
6661 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
6662 CORE_ADDR *adjusted_insn_addr_end,
6663 char *err)
fa593d66
PA
6664{
6665 return (*the_low_target.install_fast_tracepoint_jump_pad)
6666 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
6667 jump_entry, trampoline, trampoline_size,
6668 jjump_pad_insn, jjump_pad_insn_size,
6669 adjusted_insn_addr, adjusted_insn_addr_end,
6670 err);
fa593d66
PA
6671}
6672
6a271cae
PA
6673static struct emit_ops *
6674linux_emit_ops (void)
6675{
6676 if (the_low_target.emit_ops != NULL)
6677 return (*the_low_target.emit_ops) ();
6678 else
6679 return NULL;
6680}
6681
405f8e94
SS
6682static int
6683linux_get_min_fast_tracepoint_insn_len (void)
6684{
6685 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6686}
6687
2268b414
JK
6688/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6689
6690static int
6691get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6692 CORE_ADDR *phdr_memaddr, int *num_phdr)
6693{
6694 char filename[PATH_MAX];
6695 int fd;
6696 const int auxv_size = is_elf64
6697 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6698 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6699
6700 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6701
6702 fd = open (filename, O_RDONLY);
6703 if (fd < 0)
6704 return 1;
6705
6706 *phdr_memaddr = 0;
6707 *num_phdr = 0;
6708 while (read (fd, buf, auxv_size) == auxv_size
6709 && (*phdr_memaddr == 0 || *num_phdr == 0))
6710 {
6711 if (is_elf64)
6712 {
6713 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6714
6715 switch (aux->a_type)
6716 {
6717 case AT_PHDR:
6718 *phdr_memaddr = aux->a_un.a_val;
6719 break;
6720 case AT_PHNUM:
6721 *num_phdr = aux->a_un.a_val;
6722 break;
6723 }
6724 }
6725 else
6726 {
6727 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6728
6729 switch (aux->a_type)
6730 {
6731 case AT_PHDR:
6732 *phdr_memaddr = aux->a_un.a_val;
6733 break;
6734 case AT_PHNUM:
6735 *num_phdr = aux->a_un.a_val;
6736 break;
6737 }
6738 }
6739 }
6740
6741 close (fd);
6742
6743 if (*phdr_memaddr == 0 || *num_phdr == 0)
6744 {
6745 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6746 "phdr_memaddr = %ld, phdr_num = %d",
6747 (long) *phdr_memaddr, *num_phdr);
6748 return 2;
6749 }
6750
6751 return 0;
6752}
6753
6754/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6755
6756static CORE_ADDR
6757get_dynamic (const int pid, const int is_elf64)
6758{
6759 CORE_ADDR phdr_memaddr, relocation;
db1ff28b 6760 int num_phdr, i;
2268b414 6761 unsigned char *phdr_buf;
db1ff28b 6762 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
2268b414
JK
6763
6764 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6765 return 0;
6766
6767 gdb_assert (num_phdr < 100); /* Basic sanity check. */
224c3ddb 6768 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
2268b414
JK
6769
6770 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6771 return 0;
6772
6773 /* Compute relocation: it is expected to be 0 for "regular" executables,
6774 non-zero for PIE ones. */
6775 relocation = -1;
db1ff28b
JK
6776 for (i = 0; relocation == -1 && i < num_phdr; i++)
6777 if (is_elf64)
6778 {
6779 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6780
6781 if (p->p_type == PT_PHDR)
6782 relocation = phdr_memaddr - p->p_vaddr;
6783 }
6784 else
6785 {
6786 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6787
6788 if (p->p_type == PT_PHDR)
6789 relocation = phdr_memaddr - p->p_vaddr;
6790 }
6791
2268b414
JK
6792 if (relocation == -1)
6793 {
e237a7e2
JK
6794 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6795 any real world executables, including PIE executables, have always
6796 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6797 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6798 or present DT_DEBUG anyway (fpc binaries are statically linked).
6799
6800 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6801
6802 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6803
2268b414
JK
6804 return 0;
6805 }
6806
db1ff28b
JK
6807 for (i = 0; i < num_phdr; i++)
6808 {
6809 if (is_elf64)
6810 {
6811 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6812
6813 if (p->p_type == PT_DYNAMIC)
6814 return p->p_vaddr + relocation;
6815 }
6816 else
6817 {
6818 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
2268b414 6819
db1ff28b
JK
6820 if (p->p_type == PT_DYNAMIC)
6821 return p->p_vaddr + relocation;
6822 }
6823 }
2268b414
JK
6824
6825 return 0;
6826}
6827
6828/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6829 can be 0 if the inferior does not yet have the library list initialized.
6830 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6831 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6832
6833static CORE_ADDR
6834get_r_debug (const int pid, const int is_elf64)
6835{
6836 CORE_ADDR dynamic_memaddr;
6837 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6838 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6839 CORE_ADDR map = -1;
2268b414
JK
6840
6841 dynamic_memaddr = get_dynamic (pid, is_elf64);
6842 if (dynamic_memaddr == 0)
367ba2c2 6843 return map;
2268b414
JK
6844
6845 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6846 {
6847 if (is_elf64)
6848 {
6849 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
a738da3a 6850#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6851 union
6852 {
6853 Elf64_Xword map;
6854 unsigned char buf[sizeof (Elf64_Xword)];
6855 }
6856 rld_map;
a738da3a
MF
6857#endif
6858#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6859 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6860 {
6861 if (linux_read_memory (dyn->d_un.d_val,
6862 rld_map.buf, sizeof (rld_map.buf)) == 0)
6863 return rld_map.map;
6864 else
6865 break;
6866 }
75f62ce7 6867#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6868#ifdef DT_MIPS_RLD_MAP_REL
6869 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6870 {
6871 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6872 rld_map.buf, sizeof (rld_map.buf)) == 0)
6873 return rld_map.map;
6874 else
6875 break;
6876 }
6877#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6878
367ba2c2
MR
6879 if (dyn->d_tag == DT_DEBUG && map == -1)
6880 map = dyn->d_un.d_val;
2268b414
JK
6881
6882 if (dyn->d_tag == DT_NULL)
6883 break;
6884 }
6885 else
6886 {
6887 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
a738da3a 6888#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
367ba2c2
MR
6889 union
6890 {
6891 Elf32_Word map;
6892 unsigned char buf[sizeof (Elf32_Word)];
6893 }
6894 rld_map;
a738da3a
MF
6895#endif
6896#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6897 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6898 {
6899 if (linux_read_memory (dyn->d_un.d_val,
6900 rld_map.buf, sizeof (rld_map.buf)) == 0)
6901 return rld_map.map;
6902 else
6903 break;
6904 }
75f62ce7 6905#endif /* DT_MIPS_RLD_MAP */
a738da3a
MF
6906#ifdef DT_MIPS_RLD_MAP_REL
6907 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6908 {
6909 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6910 rld_map.buf, sizeof (rld_map.buf)) == 0)
6911 return rld_map.map;
6912 else
6913 break;
6914 }
6915#endif /* DT_MIPS_RLD_MAP_REL */
2268b414 6916
367ba2c2
MR
6917 if (dyn->d_tag == DT_DEBUG && map == -1)
6918 map = dyn->d_un.d_val;
2268b414
JK
6919
6920 if (dyn->d_tag == DT_NULL)
6921 break;
6922 }
6923
6924 dynamic_memaddr += dyn_size;
6925 }
6926
367ba2c2 6927 return map;
2268b414
JK
6928}
6929
6930/* Read one pointer from MEMADDR in the inferior. */
6931
6932static int
6933read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6934{
485f1ee4
PA
6935 int ret;
6936
6937 /* Go through a union so this works on either big or little endian
6938 hosts, when the inferior's pointer size is smaller than the size
6939 of CORE_ADDR. It is assumed the inferior's endianness is the
6940 same of the superior's. */
6941 union
6942 {
6943 CORE_ADDR core_addr;
6944 unsigned int ui;
6945 unsigned char uc;
6946 } addr;
6947
6948 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6949 if (ret == 0)
6950 {
6951 if (ptr_size == sizeof (CORE_ADDR))
6952 *ptr = addr.core_addr;
6953 else if (ptr_size == sizeof (unsigned int))
6954 *ptr = addr.ui;
6955 else
6956 gdb_assert_not_reached ("unhandled pointer size");
6957 }
6958 return ret;
2268b414
JK
6959}
6960
6961struct link_map_offsets
6962 {
6963 /* Offset and size of r_debug.r_version. */
6964 int r_version_offset;
6965
6966 /* Offset and size of r_debug.r_map. */
6967 int r_map_offset;
6968
6969 /* Offset to l_addr field in struct link_map. */
6970 int l_addr_offset;
6971
6972 /* Offset to l_name field in struct link_map. */
6973 int l_name_offset;
6974
6975 /* Offset to l_ld field in struct link_map. */
6976 int l_ld_offset;
6977
6978 /* Offset to l_next field in struct link_map. */
6979 int l_next_offset;
6980
6981 /* Offset to l_prev field in struct link_map. */
6982 int l_prev_offset;
6983 };
6984
fb723180 6985/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6986
6987static int
6988linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6989 unsigned const char *writebuf,
6990 CORE_ADDR offset, int len)
6991{
fe978cb0 6992 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6993 char filename[PATH_MAX];
6994 int pid, is_elf64;
6995
6996 static const struct link_map_offsets lmo_32bit_offsets =
6997 {
6998 0, /* r_version offset. */
6999 4, /* r_debug.r_map offset. */
7000 0, /* l_addr offset in link_map. */
7001 4, /* l_name offset in link_map. */
7002 8, /* l_ld offset in link_map. */
7003 12, /* l_next offset in link_map. */
7004 16 /* l_prev offset in link_map. */
7005 };
7006
7007 static const struct link_map_offsets lmo_64bit_offsets =
7008 {
7009 0, /* r_version offset. */
7010 8, /* r_debug.r_map offset. */
7011 0, /* l_addr offset in link_map. */
7012 8, /* l_name offset in link_map. */
7013 16, /* l_ld offset in link_map. */
7014 24, /* l_next offset in link_map. */
7015 32 /* l_prev offset in link_map. */
7016 };
7017 const struct link_map_offsets *lmo;
214d508e 7018 unsigned int machine;
b1fbec62
GB
7019 int ptr_size;
7020 CORE_ADDR lm_addr = 0, lm_prev = 0;
b1fbec62
GB
7021 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7022 int header_done = 0;
2268b414
JK
7023
7024 if (writebuf != NULL)
7025 return -2;
7026 if (readbuf == NULL)
7027 return -1;
7028
0bfdf32f 7029 pid = lwpid_of (current_thread);
2268b414 7030 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 7031 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 7032 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 7033 ptr_size = is_elf64 ? 8 : 4;
2268b414 7034
b1fbec62
GB
7035 while (annex[0] != '\0')
7036 {
7037 const char *sep;
7038 CORE_ADDR *addrp;
da4ae14a 7039 int name_len;
2268b414 7040
b1fbec62
GB
7041 sep = strchr (annex, '=');
7042 if (sep == NULL)
7043 break;
0c5bf5a9 7044
da4ae14a
TT
7045 name_len = sep - annex;
7046 if (name_len == 5 && startswith (annex, "start"))
b1fbec62 7047 addrp = &lm_addr;
da4ae14a 7048 else if (name_len == 4 && startswith (annex, "prev"))
b1fbec62
GB
7049 addrp = &lm_prev;
7050 else
7051 {
7052 annex = strchr (sep, ';');
7053 if (annex == NULL)
7054 break;
7055 annex++;
7056 continue;
7057 }
7058
7059 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 7060 }
b1fbec62
GB
7061
7062 if (lm_addr == 0)
2268b414 7063 {
b1fbec62
GB
7064 int r_version = 0;
7065
7066 if (priv->r_debug == 0)
7067 priv->r_debug = get_r_debug (pid, is_elf64);
7068
7069 /* We failed to find DT_DEBUG. Such situation will not change
7070 for this inferior - do not retry it. Report it to GDB as
7071 E01, see for the reasons at the GDB solib-svr4.c side. */
7072 if (priv->r_debug == (CORE_ADDR) -1)
7073 return -1;
7074
7075 if (priv->r_debug != 0)
2268b414 7076 {
b1fbec62
GB
7077 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7078 (unsigned char *) &r_version,
7079 sizeof (r_version)) != 0
7080 || r_version != 1)
7081 {
7082 warning ("unexpected r_debug version %d", r_version);
7083 }
7084 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7085 &lm_addr, ptr_size) != 0)
7086 {
7087 warning ("unable to read r_map from 0x%lx",
7088 (long) priv->r_debug + lmo->r_map_offset);
7089 }
2268b414 7090 }
b1fbec62 7091 }
2268b414 7092
f6e8a41e 7093 std::string document = "<library-list-svr4 version=\"1.0\"";
b1fbec62
GB
7094
7095 while (lm_addr
7096 && read_one_ptr (lm_addr + lmo->l_name_offset,
7097 &l_name, ptr_size) == 0
7098 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7099 &l_addr, ptr_size) == 0
7100 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7101 &l_ld, ptr_size) == 0
7102 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7103 &l_prev, ptr_size) == 0
7104 && read_one_ptr (lm_addr + lmo->l_next_offset,
7105 &l_next, ptr_size) == 0)
7106 {
7107 unsigned char libname[PATH_MAX];
7108
7109 if (lm_prev != l_prev)
2268b414 7110 {
b1fbec62
GB
7111 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7112 (long) lm_prev, (long) l_prev);
7113 break;
2268b414
JK
7114 }
7115
d878444c
JK
7116 /* Ignore the first entry even if it has valid name as the first entry
7117 corresponds to the main executable. The first entry should not be
7118 skipped if the dynamic loader was loaded late by a static executable
7119 (see solib-svr4.c parameter ignore_first). But in such case the main
7120 executable does not have PT_DYNAMIC present and this function already
7121 exited above due to failed get_r_debug. */
7122 if (lm_prev == 0)
f6e8a41e 7123 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
d878444c
JK
7124 else
7125 {
7126 /* Not checking for error because reading may stop before
7127 we've got PATH_MAX worth of characters. */
7128 libname[0] = '\0';
7129 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7130 libname[sizeof (libname) - 1] = '\0';
7131 if (libname[0] != '\0')
2268b414 7132 {
d878444c
JK
7133 if (!header_done)
7134 {
7135 /* Terminate `<library-list-svr4'. */
f6e8a41e 7136 document += '>';
d878444c
JK
7137 header_done = 1;
7138 }
2268b414 7139
e6a58aa8
SM
7140 string_appendf (document, "<library name=\"");
7141 xml_escape_text_append (&document, (char *) libname);
7142 string_appendf (document, "\" lm=\"0x%lx\" "
f6e8a41e 7143 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
e6a58aa8
SM
7144 (unsigned long) lm_addr, (unsigned long) l_addr,
7145 (unsigned long) l_ld);
d878444c 7146 }
0afae3cf 7147 }
b1fbec62
GB
7148
7149 lm_prev = lm_addr;
7150 lm_addr = l_next;
2268b414
JK
7151 }
7152
b1fbec62
GB
7153 if (!header_done)
7154 {
7155 /* Empty list; terminate `<library-list-svr4'. */
f6e8a41e 7156 document += "/>";
b1fbec62
GB
7157 }
7158 else
f6e8a41e 7159 document += "</library-list-svr4>";
b1fbec62 7160
f6e8a41e 7161 int document_len = document.length ();
2268b414
JK
7162 if (offset < document_len)
7163 document_len -= offset;
7164 else
7165 document_len = 0;
7166 if (len > document_len)
7167 len = document_len;
7168
f6e8a41e 7169 memcpy (readbuf, document.data () + offset, len);
2268b414
JK
7170
7171 return len;
7172}
7173
9accd112
MM
7174#ifdef HAVE_LINUX_BTRACE
7175
969c39fb 7176/* See to_disable_btrace target method. */
9accd112 7177
969c39fb
MM
7178static int
7179linux_low_disable_btrace (struct btrace_target_info *tinfo)
7180{
7181 enum btrace_error err;
7182
7183 err = linux_disable_btrace (tinfo);
7184 return (err == BTRACE_ERR_NONE ? 0 : -1);
7185}
7186
bc504a31 7187/* Encode an Intel Processor Trace configuration. */
b20a6524
MM
7188
7189static void
7190linux_low_encode_pt_config (struct buffer *buffer,
7191 const struct btrace_data_pt_config *config)
7192{
7193 buffer_grow_str (buffer, "<pt-config>\n");
7194
7195 switch (config->cpu.vendor)
7196 {
7197 case CV_INTEL:
7198 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7199 "model=\"%u\" stepping=\"%u\"/>\n",
7200 config->cpu.family, config->cpu.model,
7201 config->cpu.stepping);
7202 break;
7203
7204 default:
7205 break;
7206 }
7207
7208 buffer_grow_str (buffer, "</pt-config>\n");
7209}
7210
7211/* Encode a raw buffer. */
7212
7213static void
7214linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7215 unsigned int size)
7216{
7217 if (size == 0)
7218 return;
7219
7220 /* We use hex encoding - see common/rsp-low.h. */
7221 buffer_grow_str (buffer, "<raw>\n");
7222
7223 while (size-- > 0)
7224 {
7225 char elem[2];
7226
7227 elem[0] = tohex ((*data >> 4) & 0xf);
7228 elem[1] = tohex (*data++ & 0xf);
7229
7230 buffer_grow (buffer, elem, 2);
7231 }
7232
7233 buffer_grow_str (buffer, "</raw>\n");
7234}
7235
969c39fb
MM
7236/* See to_read_btrace target method. */
7237
7238static int
9accd112 7239linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
add67df8 7240 enum btrace_read_type type)
9accd112 7241{
734b0e4b 7242 struct btrace_data btrace;
9accd112 7243 struct btrace_block *block;
969c39fb 7244 enum btrace_error err;
9accd112
MM
7245 int i;
7246
969c39fb
MM
7247 err = linux_read_btrace (&btrace, tinfo, type);
7248 if (err != BTRACE_ERR_NONE)
7249 {
7250 if (err == BTRACE_ERR_OVERFLOW)
7251 buffer_grow_str0 (buffer, "E.Overflow.");
7252 else
7253 buffer_grow_str0 (buffer, "E.Generic Error.");
7254
8dcc53b3 7255 return -1;
969c39fb 7256 }
9accd112 7257
734b0e4b
MM
7258 switch (btrace.format)
7259 {
7260 case BTRACE_FORMAT_NONE:
7261 buffer_grow_str0 (buffer, "E.No Trace.");
8dcc53b3 7262 return -1;
734b0e4b
MM
7263
7264 case BTRACE_FORMAT_BTS:
7265 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7266 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 7267
734b0e4b
MM
7268 for (i = 0;
7269 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7270 i++)
7271 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7272 paddress (block->begin), paddress (block->end));
9accd112 7273
734b0e4b
MM
7274 buffer_grow_str0 (buffer, "</btrace>\n");
7275 break;
7276
b20a6524
MM
7277 case BTRACE_FORMAT_PT:
7278 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7279 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7280 buffer_grow_str (buffer, "<pt>\n");
7281
7282 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
9accd112 7283
b20a6524
MM
7284 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7285 btrace.variant.pt.size);
7286
7287 buffer_grow_str (buffer, "</pt>\n");
7288 buffer_grow_str0 (buffer, "</btrace>\n");
7289 break;
7290
7291 default:
7292 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
8dcc53b3 7293 return -1;
734b0e4b 7294 }
969c39fb
MM
7295
7296 return 0;
9accd112 7297}
f4abbc16
MM
7298
7299/* See to_btrace_conf target method. */
7300
7301static int
7302linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7303 struct buffer *buffer)
7304{
7305 const struct btrace_config *conf;
7306
7307 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7308 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7309
7310 conf = linux_btrace_conf (tinfo);
7311 if (conf != NULL)
7312 {
7313 switch (conf->format)
7314 {
7315 case BTRACE_FORMAT_NONE:
7316 break;
7317
7318 case BTRACE_FORMAT_BTS:
d33501a5
MM
7319 buffer_xml_printf (buffer, "<bts");
7320 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7321 buffer_xml_printf (buffer, " />\n");
f4abbc16 7322 break;
b20a6524
MM
7323
7324 case BTRACE_FORMAT_PT:
7325 buffer_xml_printf (buffer, "<pt");
7326 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7327 buffer_xml_printf (buffer, "/>\n");
7328 break;
f4abbc16
MM
7329 }
7330 }
7331
7332 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7333 return 0;
7334}
9accd112
MM
7335#endif /* HAVE_LINUX_BTRACE */
7336
7b669087
GB
7337/* See nat/linux-nat.h. */
7338
7339ptid_t
7340current_lwp_ptid (void)
7341{
7342 return ptid_of (current_thread);
7343}
7344
dd373349
AT
7345/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7346
7347static int
7348linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7349{
7350 if (the_low_target.breakpoint_kind_from_pc != NULL)
7351 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7352 else
1652a986 7353 return default_breakpoint_kind_from_pc (pcptr);
dd373349
AT
7354}
7355
7356/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7357
7358static const gdb_byte *
7359linux_sw_breakpoint_from_kind (int kind, int *size)
7360{
7361 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7362
7363 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7364}
7365
769ef81f
AT
7366/* Implementation of the target_ops method
7367 "breakpoint_kind_from_current_state". */
7368
7369static int
7370linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7371{
7372 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7373 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7374 else
7375 return linux_breakpoint_kind_from_pc (pcptr);
7376}
7377
276d4552
YQ
7378/* Default implementation of linux_target_ops method "set_pc" for
7379 32-bit pc register which is literally named "pc". */
7380
7381void
7382linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7383{
7384 uint32_t newpc = pc;
7385
7386 supply_register_by_name (regcache, "pc", &newpc);
7387}
7388
7389/* Default implementation of linux_target_ops method "get_pc" for
7390 32-bit pc register which is literally named "pc". */
7391
7392CORE_ADDR
7393linux_get_pc_32bit (struct regcache *regcache)
7394{
7395 uint32_t pc;
7396
7397 collect_register_by_name (regcache, "pc", &pc);
7398 if (debug_threads)
7399 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7400 return pc;
7401}
7402
6f69e520
YQ
7403/* Default implementation of linux_target_ops method "set_pc" for
7404 64-bit pc register which is literally named "pc". */
7405
7406void
7407linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7408{
7409 uint64_t newpc = pc;
7410
7411 supply_register_by_name (regcache, "pc", &newpc);
7412}
7413
7414/* Default implementation of linux_target_ops method "get_pc" for
7415 64-bit pc register which is literally named "pc". */
7416
7417CORE_ADDR
7418linux_get_pc_64bit (struct regcache *regcache)
7419{
7420 uint64_t pc;
7421
7422 collect_register_by_name (regcache, "pc", &pc);
7423 if (debug_threads)
7424 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7425 return pc;
7426}
7427
0570503d 7428/* See linux-low.h. */
974c89e0 7429
0570503d
PFC
7430int
7431linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
974c89e0
AH
7432{
7433 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7434 int offset = 0;
7435
7436 gdb_assert (wordsize == 4 || wordsize == 8);
7437
7438 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
7439 {
7440 if (wordsize == 4)
7441 {
0570503d 7442 uint32_t *data_p = (uint32_t *) data;
974c89e0 7443 if (data_p[0] == match)
0570503d
PFC
7444 {
7445 *valp = data_p[1];
7446 return 1;
7447 }
974c89e0
AH
7448 }
7449 else
7450 {
0570503d 7451 uint64_t *data_p = (uint64_t *) data;
974c89e0 7452 if (data_p[0] == match)
0570503d
PFC
7453 {
7454 *valp = data_p[1];
7455 return 1;
7456 }
974c89e0
AH
7457 }
7458
7459 offset += 2 * wordsize;
7460 }
7461
7462 return 0;
7463}
7464
7465/* See linux-low.h. */
7466
7467CORE_ADDR
7468linux_get_hwcap (int wordsize)
7469{
0570503d
PFC
7470 CORE_ADDR hwcap = 0;
7471 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7472 return hwcap;
974c89e0
AH
7473}
7474
7475/* See linux-low.h. */
7476
7477CORE_ADDR
7478linux_get_hwcap2 (int wordsize)
7479{
0570503d
PFC
7480 CORE_ADDR hwcap2 = 0;
7481 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7482 return hwcap2;
974c89e0 7483}
6f69e520 7484
ce3a066d
DJ
7485static struct target_ops linux_target_ops = {
7486 linux_create_inferior,
ece66d65 7487 linux_post_create_inferior,
ce3a066d
DJ
7488 linux_attach,
7489 linux_kill,
6ad8ae5c 7490 linux_detach,
8336d594 7491 linux_mourn,
444d6139 7492 linux_join,
ce3a066d
DJ
7493 linux_thread_alive,
7494 linux_resume,
7495 linux_wait,
7496 linux_fetch_registers,
7497 linux_store_registers,
90d74c30 7498 linux_prepare_to_access_memory,
0146f85b 7499 linux_done_accessing_memory,
ce3a066d
DJ
7500 linux_read_memory,
7501 linux_write_memory,
2f2893d9 7502 linux_look_up_symbols,
ef57601b 7503 linux_request_interrupt,
aa691b87 7504 linux_read_auxv,
802e8e6d 7505 linux_supports_z_point_type,
d993e290
PA
7506 linux_insert_point,
7507 linux_remove_point,
3e572f71
PA
7508 linux_stopped_by_sw_breakpoint,
7509 linux_supports_stopped_by_sw_breakpoint,
7510 linux_stopped_by_hw_breakpoint,
7511 linux_supports_stopped_by_hw_breakpoint,
70b90b91 7512 linux_supports_hardware_single_step,
e013ee27
OF
7513 linux_stopped_by_watchpoint,
7514 linux_stopped_data_address,
db0dfaa0
LM
7515#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7516 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7517 && defined(PT_TEXT_END_ADDR)
52fb6437 7518 linux_read_offsets,
dae5f5cf
DJ
7519#else
7520 NULL,
7521#endif
7522#ifdef USE_THREAD_DB
7523 thread_db_get_tls_address,
7524#else
7525 NULL,
52fb6437 7526#endif
efcbbd14 7527 linux_qxfer_spu,
59a016f0 7528 hostio_last_error_from_errno,
07e059b5 7529 linux_qxfer_osdata,
4aa995e1 7530 linux_xfer_siginfo,
bd99dc85
PA
7531 linux_supports_non_stop,
7532 linux_async,
7533 linux_start_non_stop,
cdbfd419 7534 linux_supports_multi_process,
89245bc0
DB
7535 linux_supports_fork_events,
7536 linux_supports_vfork_events,
94585166 7537 linux_supports_exec_events,
de0d863e 7538 linux_handle_new_gdb_connection,
cdbfd419 7539#ifdef USE_THREAD_DB
dc146f7c 7540 thread_db_handle_monitor_command,
cdbfd419 7541#else
dc146f7c 7542 NULL,
cdbfd419 7543#endif
d26e3629 7544 linux_common_core_of_thread,
78d85199 7545 linux_read_loadmap,
219f2f23
PA
7546 linux_process_qsupported,
7547 linux_supports_tracepoints,
7548 linux_read_pc,
8336d594
PA
7549 linux_write_pc,
7550 linux_thread_stopped,
7984d532 7551 NULL,
711e434b 7552 linux_pause_all,
7984d532 7553 linux_unpause_all,
fa593d66 7554 linux_stabilize_threads,
6a271cae 7555 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
7556 linux_emit_ops,
7557 linux_supports_disable_randomization,
405f8e94 7558 linux_get_min_fast_tracepoint_insn_len,
2268b414 7559 linux_qxfer_libraries_svr4,
d1feda86 7560 linux_supports_agent,
9accd112 7561#ifdef HAVE_LINUX_BTRACE
0568462b 7562 linux_enable_btrace,
969c39fb 7563 linux_low_disable_btrace,
9accd112 7564 linux_low_read_btrace,
f4abbc16 7565 linux_low_btrace_conf,
9accd112
MM
7566#else
7567 NULL,
7568 NULL,
7569 NULL,
7570 NULL,
9accd112 7571#endif
c2d6af84 7572 linux_supports_range_stepping,
e57f1de3 7573 linux_proc_pid_to_exec_file,
14d2069a
GB
7574 linux_mntns_open_cloexec,
7575 linux_mntns_unlink,
7576 linux_mntns_readlink,
dd373349 7577 linux_breakpoint_kind_from_pc,
79efa585
SM
7578 linux_sw_breakpoint_from_kind,
7579 linux_proc_tid_get_name,
7d00775e 7580 linux_breakpoint_kind_from_current_state,
82075af2
JS
7581 linux_supports_software_single_step,
7582 linux_supports_catch_syscall,
ae91f625 7583 linux_get_ipa_tdesc_idx,
f6327dcb
KB
7584#if USE_THREAD_DB
7585 thread_db_thread_handle,
7586#else
7587 NULL,
7588#endif
ce3a066d
DJ
7589};
7590
3aee8918
PA
7591#ifdef HAVE_LINUX_REGSETS
7592void
7593initialize_regsets_info (struct regsets_info *info)
7594{
7595 for (info->num_regsets = 0;
7596 info->regsets[info->num_regsets].size >= 0;
7597 info->num_regsets++)
7598 ;
3aee8918
PA
7599}
7600#endif
7601
da6d8c04
DJ
7602void
7603initialize_low (void)
7604{
bd99dc85 7605 struct sigaction sigchld_action;
dd373349 7606
bd99dc85 7607 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 7608 set_target_ops (&linux_target_ops);
dd373349 7609
aa7c7447 7610 linux_ptrace_init_warnings ();
1b919490 7611 linux_proc_init_warnings ();
bd99dc85
PA
7612
7613 sigchld_action.sa_handler = sigchld_handler;
7614 sigemptyset (&sigchld_action.sa_mask);
7615 sigchld_action.sa_flags = SA_RESTART;
7616 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
7617
7618 initialize_low_arch ();
89245bc0
DB
7619
7620 linux_check_ptrace_features ();
da6d8c04 7621}
This page took 2.18783 seconds and 4 git commands to generate.