Remove usage of find_inferior in iterate_over_lwps
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19#include "server.h"
20#include "linux-low.h"
21#include "nat/linux-osdata.h"
22#include "agent.h"
23#include "tdesc.h"
24#include "rsp-low.h"
25#include "signals-state-save-restore.h"
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
28#include "gdb_wait.h"
29#include "nat/gdb_ptrace.h"
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
32#include "nat/linux-personality.h"
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <sys/syscall.h>
38#include <sched.h>
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
43#include <sys/stat.h>
44#include <sys/vfs.h>
45#include <sys/uio.h>
46#include "filestuff.h"
47#include "tracepoint.h"
48#include "hostio.h"
49#include <inttypes.h>
50#include "common-inferior.h"
51#include "nat/fork-inferior.h"
52#include "environ.h"
53#ifndef ELFMAG0
54/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
55 then ELFMAG0 will have been defined. If it didn't get included by
56 gdb_proc_service.h then including it will likely introduce a duplicate
57 definition of elf_fpregset_t. */
58#include <elf.h>
59#endif
60#include "nat/linux-namespaces.h"
61
62#ifndef SPUFS_MAGIC
63#define SPUFS_MAGIC 0x23c9b64e
64#endif
65
66#ifdef HAVE_PERSONALITY
67# include <sys/personality.h>
68# if !HAVE_DECL_ADDR_NO_RANDOMIZE
69# define ADDR_NO_RANDOMIZE 0x0040000
70# endif
71#endif
72
73#ifndef O_LARGEFILE
74#define O_LARGEFILE 0
75#endif
76
77/* Some targets did not define these ptrace constants from the start,
78 so gdbserver defines them locally here. In the future, these may
79 be removed after they are added to asm/ptrace.h. */
80#if !(defined(PT_TEXT_ADDR) \
81 || defined(PT_DATA_ADDR) \
82 || defined(PT_TEXT_END_ADDR))
83#if defined(__mcoldfire__)
84/* These are still undefined in 3.10 kernels. */
85#define PT_TEXT_ADDR 49*4
86#define PT_DATA_ADDR 50*4
87#define PT_TEXT_END_ADDR 51*4
88/* BFIN already defines these since at least 2.6.32 kernels. */
89#elif defined(BFIN)
90#define PT_TEXT_ADDR 220
91#define PT_TEXT_END_ADDR 224
92#define PT_DATA_ADDR 228
93/* These are still undefined in 3.10 kernels. */
94#elif defined(__TMS320C6X__)
95#define PT_TEXT_ADDR (0x10000*4)
96#define PT_DATA_ADDR (0x10004*4)
97#define PT_TEXT_END_ADDR (0x10008*4)
98#endif
99#endif
100
101#ifdef HAVE_LINUX_BTRACE
102# include "nat/linux-btrace.h"
103# include "btrace-common.h"
104#endif
105
106#ifndef HAVE_ELF32_AUXV_T
107/* Copied from glibc's elf.h. */
108typedef struct
109{
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118} Elf32_auxv_t;
119#endif
120
121#ifndef HAVE_ELF64_AUXV_T
122/* Copied from glibc's elf.h. */
123typedef struct
124{
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133} Elf64_auxv_t;
134#endif
135
136/* Does the current host support PTRACE_GETREGSET? */
137int have_ptrace_getregset = -1;
138
139/* LWP accessors. */
140
141/* See nat/linux-nat.h. */
142
143ptid_t
144ptid_of_lwp (struct lwp_info *lwp)
145{
146 return ptid_of (get_lwp_thread (lwp));
147}
148
149/* See nat/linux-nat.h. */
150
151void
152lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154{
155 lwp->arch_private = info;
156}
157
158/* See nat/linux-nat.h. */
159
160struct arch_lwp_info *
161lwp_arch_private_info (struct lwp_info *lwp)
162{
163 return lwp->arch_private;
164}
165
166/* See nat/linux-nat.h. */
167
168int
169lwp_is_stopped (struct lwp_info *lwp)
170{
171 return lwp->stopped;
172}
173
174/* See nat/linux-nat.h. */
175
176enum target_stop_reason
177lwp_stop_reason (struct lwp_info *lwp)
178{
179 return lwp->stop_reason;
180}
181
182/* See nat/linux-nat.h. */
183
184int
185lwp_is_stepping (struct lwp_info *lwp)
186{
187 return lwp->stepping;
188}
189
190/* A list of all unknown processes which receive stop signals. Some
191 other process will presumably claim each of these as forked
192 children momentarily. */
193
194struct simple_pid_list
195{
196 /* The process ID. */
197 int pid;
198
199 /* The status as reported by waitpid. */
200 int status;
201
202 /* Next in chain. */
203 struct simple_pid_list *next;
204};
205struct simple_pid_list *stopped_pids;
206
207/* Trivial list manipulation functions to keep track of a list of new
208 stopped processes. */
209
210static void
211add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
212{
213 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
214
215 new_pid->pid = pid;
216 new_pid->status = status;
217 new_pid->next = *listp;
218 *listp = new_pid;
219}
220
221static int
222pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
223{
224 struct simple_pid_list **p;
225
226 for (p = listp; *p != NULL; p = &(*p)->next)
227 if ((*p)->pid == pid)
228 {
229 struct simple_pid_list *next = (*p)->next;
230
231 *statusp = (*p)->status;
232 xfree (*p);
233 *p = next;
234 return 1;
235 }
236 return 0;
237}
238
239enum stopping_threads_kind
240 {
241 /* Not stopping threads presently. */
242 NOT_STOPPING_THREADS,
243
244 /* Stopping threads. */
245 STOPPING_THREADS,
246
247 /* Stopping and suspending threads. */
248 STOPPING_AND_SUSPENDING_THREADS
249 };
250
251/* This is set while stop_all_lwps is in effect. */
252enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
253
254/* FIXME make into a target method? */
255int using_threads = 1;
256
257/* True if we're presently stabilizing threads (moving them out of
258 jump pads). */
259static int stabilizing_threads;
260
261static void linux_resume_one_lwp (struct lwp_info *lwp,
262 int step, int signal, siginfo_t *info);
263static void linux_resume (struct thread_resume *resume_info, size_t n);
264static void stop_all_lwps (int suspend, struct lwp_info *except);
265static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
266static void unsuspend_all_lwps (struct lwp_info *except);
267static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
268 int *wstat, int options);
269static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
270static struct lwp_info *add_lwp (ptid_t ptid);
271static void linux_mourn (struct process_info *process);
272static int linux_stopped_by_watchpoint (void);
273static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274static int lwp_is_marked_dead (struct lwp_info *lwp);
275static void proceed_all_lwps (void);
276static int finish_step_over (struct lwp_info *lwp);
277static int kill_lwp (unsigned long lwpid, int signo);
278static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279static void complete_ongoing_step_over (void);
280static int linux_low_ptrace_options (int attached);
281static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282static int proceed_one_lwp (thread_info *thread, void *except);
283
284/* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286ptid_t step_over_bkpt;
287
288/* True if the low target can hardware single-step. */
289
290static int
291can_hardware_single_step (void)
292{
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297}
298
299/* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302static int
303can_software_single_step (void)
304{
305 return (the_low_target.get_next_pcs != NULL);
306}
307
308/* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311static int
312supports_breakpoints (void)
313{
314 return (the_low_target.get_pc != NULL);
315}
316
317/* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321static int
322supports_fast_tracepoints (void)
323{
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325}
326
327/* True if LWP is stopped in its stepping range. */
328
329static int
330lwp_in_step_range (struct lwp_info *lwp)
331{
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335}
336
337struct pending_signals
338{
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342};
343
344/* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346static int linux_event_pipe[2] = { -1, -1 };
347
348/* True if we're currently in async mode. */
349#define target_is_async_p() (linux_event_pipe[0] != -1)
350
351static void send_sigstop (struct lwp_info *lwp);
352static void wait_for_sigstop (void);
353
354/* Return non-zero if HEADER is a 64-bit ELF file. */
355
356static int
357elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358{
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370}
371
372/* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376static int
377elf_64_file_p (const char *file, unsigned int *machine)
378{
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394}
395
396/* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399int
400linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401{
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406}
407
408static void
409delete_lwp (struct lwp_info *lwp)
410{
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424}
425
426/* Add a process to the common process list, and set its private
427 data. */
428
429static struct process_info *
430linux_add_process (int pid, int attached)
431{
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441}
442
443static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445/* Call the target arch_setup function on the current thread. */
446
447static void
448linux_arch_setup (void)
449{
450 the_low_target.arch_setup ();
451}
452
453/* Call the target arch_setup function on THREAD. */
454
455static void
456linux_arch_setup_thread (struct thread_info *thread)
457{
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466}
467
468/* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474static int
475handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476{
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522 struct target_desc *tdesc;
523
524 ptid = ptid_build (new_pid, new_pid, 0);
525
526 if (debug_threads)
527 {
528 debug_printf ("HEW: Got fork event from LWP %ld, "
529 "new child is %d\n",
530 ptid_get_lwp (ptid_of (event_thr)),
531 ptid_get_pid (ptid));
532 }
533
534 /* Add the new process to the tables and clone the breakpoint
535 lists of the parent. We need to do this even if the new process
536 will be detached, since we will need the process object and the
537 breakpoints to remove any breakpoints from memory when we
538 detach, and the client side will access registers. */
539 child_proc = linux_add_process (new_pid, 0);
540 gdb_assert (child_proc != NULL);
541 child_lwp = add_lwp (ptid);
542 gdb_assert (child_lwp != NULL);
543 child_lwp->stopped = 1;
544 child_lwp->must_set_ptrace_flags = 1;
545 child_lwp->status_pending_p = 0;
546 child_thr = get_lwp_thread (child_lwp);
547 child_thr->last_resume_kind = resume_stop;
548 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
549
550 /* If we're suspending all threads, leave this one suspended
551 too. If the fork/clone parent is stepping over a breakpoint,
552 all other threads have been suspended already. Leave the
553 child suspended too. */
554 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
555 || event_lwp->bp_reinsert != 0)
556 {
557 if (debug_threads)
558 debug_printf ("HEW: leaving child suspended\n");
559 child_lwp->suspended = 1;
560 }
561
562 parent_proc = get_thread_process (event_thr);
563 child_proc->attached = parent_proc->attached;
564
565 if (event_lwp->bp_reinsert != 0
566 && can_software_single_step ()
567 && event == PTRACE_EVENT_VFORK)
568 {
569 /* If we leave single-step breakpoints there, child will
570 hit it, so uninsert single-step breakpoints from parent
571 (and child). Once vfork child is done, reinsert
572 them back to parent. */
573 uninsert_single_step_breakpoints (event_thr);
574 }
575
576 clone_all_breakpoints (child_thr, event_thr);
577
578 tdesc = allocate_target_description ();
579 copy_target_description (tdesc, parent_proc->tdesc);
580 child_proc->tdesc = tdesc;
581
582 /* Clone arch-specific process data. */
583 if (the_low_target.new_fork != NULL)
584 the_low_target.new_fork (parent_proc, child_proc);
585
586 /* Save fork info in the parent thread. */
587 if (event == PTRACE_EVENT_FORK)
588 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
589 else if (event == PTRACE_EVENT_VFORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
591
592 event_lwp->waitstatus.value.related_pid = ptid;
593
594 /* The status_pending field contains bits denoting the
595 extended event, so when the pending event is handled,
596 the handler will look at lwp->waitstatus. */
597 event_lwp->status_pending_p = 1;
598 event_lwp->status_pending = wstat;
599
600 /* Link the threads until the parent event is passed on to
601 higher layers. */
602 event_lwp->fork_relative = child_lwp;
603 child_lwp->fork_relative = event_lwp;
604
605 /* If the parent thread is doing step-over with single-step
606 breakpoints, the list of single-step breakpoints are cloned
607 from the parent's. Remove them from the child process.
608 In case of vfork, we'll reinsert them back once vforked
609 child is done. */
610 if (event_lwp->bp_reinsert != 0
611 && can_software_single_step ())
612 {
613 /* The child process is forked and stopped, so it is safe
614 to access its memory without stopping all other threads
615 from other processes. */
616 delete_single_step_breakpoints (child_thr);
617
618 gdb_assert (has_single_step_breakpoints (event_thr));
619 gdb_assert (!has_single_step_breakpoints (child_thr));
620 }
621
622 /* Report the event. */
623 return 0;
624 }
625
626 if (debug_threads)
627 debug_printf ("HEW: Got clone event "
628 "from LWP %ld, new child is LWP %ld\n",
629 lwpid_of (event_thr), new_pid);
630
631 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
632 new_lwp = add_lwp (ptid);
633
634 /* Either we're going to immediately resume the new thread
635 or leave it stopped. linux_resume_one_lwp is a nop if it
636 thinks the thread is currently running, so set this first
637 before calling linux_resume_one_lwp. */
638 new_lwp->stopped = 1;
639
640 /* If we're suspending all threads, leave this one suspended
641 too. If the fork/clone parent is stepping over a breakpoint,
642 all other threads have been suspended already. Leave the
643 child suspended too. */
644 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
645 || event_lwp->bp_reinsert != 0)
646 new_lwp->suspended = 1;
647
648 /* Normally we will get the pending SIGSTOP. But in some cases
649 we might get another signal delivered to the group first.
650 If we do get another signal, be sure not to lose it. */
651 if (WSTOPSIG (status) != SIGSTOP)
652 {
653 new_lwp->stop_expected = 1;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657 else if (report_thread_events)
658 {
659 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663
664 thread_db_notice_clone (event_thr, ptid);
665
666 /* Don't report the event. */
667 return 1;
668 }
669 else if (event == PTRACE_EVENT_VFORK_DONE)
670 {
671 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
672
673 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
674 {
675 reinsert_single_step_breakpoints (event_thr);
676
677 gdb_assert (has_single_step_breakpoints (event_thr));
678 }
679
680 /* Report the event. */
681 return 0;
682 }
683 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
684 {
685 struct process_info *proc;
686 std::vector<int> syscalls_to_catch;
687 ptid_t event_ptid;
688 pid_t event_pid;
689
690 if (debug_threads)
691 {
692 debug_printf ("HEW: Got exec event from LWP %ld\n",
693 lwpid_of (event_thr));
694 }
695
696 /* Get the event ptid. */
697 event_ptid = ptid_of (event_thr);
698 event_pid = ptid_get_pid (event_ptid);
699
700 /* Save the syscall list from the execing process. */
701 proc = get_thread_process (event_thr);
702 syscalls_to_catch = std::move (proc->syscalls_to_catch);
703
704 /* Delete the execing process and all its threads. */
705 linux_mourn (proc);
706 current_thread = NULL;
707
708 /* Create a new process/lwp/thread. */
709 proc = linux_add_process (event_pid, 0);
710 event_lwp = add_lwp (event_ptid);
711 event_thr = get_lwp_thread (event_lwp);
712 gdb_assert (current_thread == event_thr);
713 linux_arch_setup_thread (event_thr);
714
715 /* Set the event status. */
716 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
717 event_lwp->waitstatus.value.execd_pathname
718 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
719
720 /* Mark the exec status as pending. */
721 event_lwp->stopped = 1;
722 event_lwp->status_pending_p = 1;
723 event_lwp->status_pending = wstat;
724 event_thr->last_resume_kind = resume_continue;
725 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
726
727 /* Update syscall state in the new lwp, effectively mid-syscall too. */
728 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
729
730 /* Restore the list to catch. Don't rely on the client, which is free
731 to avoid sending a new list when the architecture doesn't change.
732 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
733 proc->syscalls_to_catch = std::move (syscalls_to_catch);
734
735 /* Report the event. */
736 *orig_event_lwp = event_lwp;
737 return 0;
738 }
739
740 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
741}
742
743/* Return the PC as read from the regcache of LWP, without any
744 adjustment. */
745
746static CORE_ADDR
747get_pc (struct lwp_info *lwp)
748{
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (the_low_target.get_pc == NULL)
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = (*the_low_target.get_pc) (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767}
768
769/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772static void
773get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774{
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796}
797
798static int check_stopped_by_watchpoint (struct lwp_info *child);
799
800/* Called when the LWP stopped for a signal/trap. If it stopped for a
801 trap check what caused it (breakpoint, watchpoint, trace, etc.),
802 and save the result in the LWP's stop_reason field. If it stopped
803 for a breakpoint, decrement the PC if necessary on the lwp's
804 architecture. Returns true if we now have the LWP's stop PC. */
805
806static int
807save_stop_reason (struct lwp_info *lwp)
808{
809 CORE_ADDR pc;
810 CORE_ADDR sw_breakpoint_pc;
811 struct thread_info *saved_thread;
812#if USE_SIGTRAP_SIGINFO
813 siginfo_t siginfo;
814#endif
815
816 if (the_low_target.get_pc == NULL)
817 return 0;
818
819 pc = get_pc (lwp);
820 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
821
822 /* breakpoint_at reads from the current thread. */
823 saved_thread = current_thread;
824 current_thread = get_lwp_thread (lwp);
825
826#if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
834 {
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
848 }
849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 {
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855 }
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
865 }
866 }
867 }
868#else
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
874 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882#endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
885 {
886 if (debug_threads)
887 {
888 struct thread_info *thr = get_lwp_thread (lwp);
889
890 debug_printf ("CSBB: %s stopped by software breakpoint\n",
891 target_pid_to_str (ptid_of (thr)));
892 }
893
894 /* Back up the PC if necessary. */
895 if (pc != sw_breakpoint_pc)
896 {
897 struct regcache *regcache
898 = get_thread_regcache (current_thread, 1);
899 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
900 }
901
902 /* Update this so we record the correct stop PC below. */
903 pc = sw_breakpoint_pc;
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
930
931 debug_printf ("CSBB: %s stopped by trace\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
934 }
935
936 lwp->stop_pc = pc;
937 current_thread = saved_thread;
938 return 1;
939}
940
941static struct lwp_info *
942add_lwp (ptid_t ptid)
943{
944 struct lwp_info *lwp;
945
946 lwp = XCNEW (struct lwp_info);
947
948 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
949
950 if (the_low_target.new_thread != NULL)
951 the_low_target.new_thread (lwp);
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 return lwp;
956}
957
958/* Callback to be used when calling fork_inferior, responsible for
959 actually initiating the tracing of the inferior. */
960
961static void
962linux_ptrace_fun ()
963{
964 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
965 (PTRACE_TYPE_ARG4) 0) < 0)
966 trace_start_error_with_name ("ptrace");
967
968 if (setpgid (0, 0) < 0)
969 trace_start_error_with_name ("setpgid");
970
971 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 if (close (0) < 0)
977 trace_start_error_with_name ("close");
978 if (open ("/dev/null", O_RDONLY) < 0)
979 trace_start_error_with_name ("open");
980 if (dup2 (2, 1) < 0)
981 trace_start_error_with_name ("dup2");
982 if (write (2, "stdin/stdout redirected\n",
983 sizeof ("stdin/stdout redirected\n") - 1) < 0)
984 {
985 /* Errors ignored. */;
986 }
987 }
988}
989
990/* Start an inferior process and returns its pid.
991 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
992 are its arguments. */
993
994static int
995linux_create_inferior (const char *program,
996 const std::vector<char *> &program_args)
997{
998 struct lwp_info *new_lwp;
999 int pid;
1000 ptid_t ptid;
1001 struct cleanup *restore_personality
1002 = maybe_disable_address_space_randomization (disable_randomization);
1003 std::string str_program_args = stringify_argv (program_args);
1004
1005 pid = fork_inferior (program,
1006 str_program_args.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun,
1008 NULL, NULL, NULL, NULL);
1009
1010 do_cleanups (restore_personality);
1011
1012 linux_add_process (pid, 0);
1013
1014 ptid = ptid_build (pid, pid, 0);
1015 new_lwp = add_lwp (ptid);
1016 new_lwp->must_set_ptrace_flags = 1;
1017
1018 post_fork_inferior (pid, program);
1019
1020 return pid;
1021}
1022
1023/* Implement the post_create_inferior target_ops method. */
1024
1025static void
1026linux_post_create_inferior (void)
1027{
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
1030 linux_arch_setup ();
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040}
1041
1042/* Attach to an inferior process. Returns 0 on success, ERRNO on
1043 error. */
1044
1045int
1046linux_attach_lwp (ptid_t ptid)
1047{
1048 struct lwp_info *new_lwp;
1049 int lwpid = ptid_get_lwp (ptid);
1050
1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1052 != 0)
1053 return errno;
1054
1055 new_lwp = add_lwp (ptid);
1056
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1060
1061 if (linux_proc_pid_is_stopped (lwpid))
1062 {
1063 if (debug_threads)
1064 debug_printf ("Attached to a stopped process\n");
1065
1066 /* The process is definitely stopped. It is in a job control
1067 stop, unless the kernel predates the TASK_STOPPED /
1068 TASK_TRACED distinction, in which case it might be in a
1069 ptrace stop. Make sure it is in a ptrace stop; from there we
1070 can kill it, signal it, et cetera.
1071
1072 First make sure there is a pending SIGSTOP. Since we are
1073 already attached, the process can not transition from stopped
1074 to running without a PTRACE_CONT; so we know this signal will
1075 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1076 probably already in the queue (unless this kernel is old
1077 enough to use TASK_STOPPED for ptrace stops); but since
1078 SIGSTOP is not an RT signal, it can only be queued once. */
1079 kill_lwp (lwpid, SIGSTOP);
1080
1081 /* Finally, resume the stopped process. This will deliver the
1082 SIGSTOP (or a higher priority signal, just like normal
1083 PTRACE_ATTACH), which we'll catch later on. */
1084 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1085 }
1086
1087 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1088 brings it to a halt.
1089
1090 There are several cases to consider here:
1091
1092 1) gdbserver has already attached to the process and is being notified
1093 of a new thread that is being created.
1094 In this case we should ignore that SIGSTOP and resume the
1095 process. This is handled below by setting stop_expected = 1,
1096 and the fact that add_thread sets last_resume_kind ==
1097 resume_continue.
1098
1099 2) This is the first thread (the process thread), and we're attaching
1100 to it via attach_inferior.
1101 In this case we want the process thread to stop.
1102 This is handled by having linux_attach set last_resume_kind ==
1103 resume_stop after we return.
1104
1105 If the pid we are attaching to is also the tgid, we attach to and
1106 stop all the existing threads. Otherwise, we attach to pid and
1107 ignore any other threads in the same group as this pid.
1108
1109 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1110 existing threads.
1111 In this case we want the thread to stop.
1112 FIXME: This case is currently not properly handled.
1113 We should wait for the SIGSTOP but don't. Things work apparently
1114 because enough time passes between when we ptrace (ATTACH) and when
1115 gdb makes the next ptrace call on the thread.
1116
1117 On the other hand, if we are currently trying to stop all threads, we
1118 should treat the new thread as if we had sent it a SIGSTOP. This works
1119 because we are guaranteed that the add_lwp call above added us to the
1120 end of the list, and so the new thread has not yet reached
1121 wait_for_sigstop (but will). */
1122 new_lwp->stop_expected = 1;
1123
1124 return 0;
1125}
1126
1127/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1128 already attached. Returns true if a new LWP is found, false
1129 otherwise. */
1130
1131static int
1132attach_proc_task_lwp_callback (ptid_t ptid)
1133{
1134 /* Is this a new thread? */
1135 if (find_thread_ptid (ptid) == NULL)
1136 {
1137 int lwpid = ptid_get_lwp (ptid);
1138 int err;
1139
1140 if (debug_threads)
1141 debug_printf ("Found new lwp %d\n", lwpid);
1142
1143 err = linux_attach_lwp (ptid);
1144
1145 /* Be quiet if we simply raced with the thread exiting. EPERM
1146 is returned if the thread's task still exists, and is marked
1147 as exited or zombie, as well as other conditions, so in that
1148 case, confirm the status in /proc/PID/status. */
1149 if (err == ESRCH
1150 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1151 {
1152 if (debug_threads)
1153 {
1154 debug_printf ("Cannot attach to lwp %d: "
1155 "thread is gone (%d: %s)\n",
1156 lwpid, err, strerror (err));
1157 }
1158 }
1159 else if (err != 0)
1160 {
1161 warning (_("Cannot attach to lwp %d: %s"),
1162 lwpid,
1163 linux_ptrace_attach_fail_reason_string (ptid, err));
1164 }
1165
1166 return 1;
1167 }
1168 return 0;
1169}
1170
1171static void async_file_mark (void);
1172
1173/* Attach to PID. If PID is the tgid, attach to it and all
1174 of its threads. */
1175
1176static int
1177linux_attach (unsigned long pid)
1178{
1179 struct process_info *proc;
1180 struct thread_info *initial_thread;
1181 ptid_t ptid = ptid_build (pid, pid, 0);
1182 int err;
1183
1184 /* Attach to PID. We will check for other threads
1185 soon. */
1186 err = linux_attach_lwp (ptid);
1187 if (err != 0)
1188 error ("Cannot attach to process %ld: %s",
1189 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Don't ignore the initial SIGSTOP if we just attached to this
1194 process. It will be collected by wait shortly. */
1195 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1196 initial_thread->last_resume_kind = resume_stop;
1197
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
1219 ptid_t pid_ptid = pid_to_ptid (pid);
1220
1221 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1222 &wstat, __WALL);
1223 gdb_assert (lwpid > 0);
1224
1225 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
1240 return 0;
1241}
1242
1243struct counter
1244{
1245 int pid;
1246 int count;
1247};
1248
1249static int
1250second_thread_of_pid_p (thread_info *thread, void *args)
1251{
1252 struct counter *counter = (struct counter *) args;
1253
1254 if (thread->id.pid () == counter->pid)
1255 {
1256 if (++counter->count > 1)
1257 return 1;
1258 }
1259
1260 return 0;
1261}
1262
1263static int
1264last_thread_of_process_p (int pid)
1265{
1266 struct counter counter = { pid , 0 };
1267
1268 return (find_inferior (&all_threads,
1269 second_thread_of_pid_p, &counter) == NULL);
1270}
1271
1272/* Kill LWP. */
1273
1274static void
1275linux_kill_one_lwp (struct lwp_info *lwp)
1276{
1277 struct thread_info *thr = get_lwp_thread (lwp);
1278 int pid = lwpid_of (thr);
1279
1280 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1281 there is no signal context, and ptrace(PTRACE_KILL) (or
1282 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1283 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1284 alternative is to kill with SIGKILL. We only need one SIGKILL
1285 per process, not one for each thread. But since we still support
1286 support debugging programs using raw clone without CLONE_THREAD,
1287 we send one for each thread. For years, we used PTRACE_KILL
1288 only, so we're being a bit paranoid about some old kernels where
1289 PTRACE_KILL might work better (dubious if there are any such, but
1290 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1291 second, and so we're fine everywhere. */
1292
1293 errno = 0;
1294 kill_lwp (pid, SIGKILL);
1295 if (debug_threads)
1296 {
1297 int save_errno = errno;
1298
1299 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1300 target_pid_to_str (ptid_of (thr)),
1301 save_errno ? strerror (save_errno) : "OK");
1302 }
1303
1304 errno = 0;
1305 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1306 if (debug_threads)
1307 {
1308 int save_errno = errno;
1309
1310 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1311 target_pid_to_str (ptid_of (thr)),
1312 save_errno ? strerror (save_errno) : "OK");
1313 }
1314}
1315
1316/* Kill LWP and wait for it to die. */
1317
1318static void
1319kill_wait_lwp (struct lwp_info *lwp)
1320{
1321 struct thread_info *thr = get_lwp_thread (lwp);
1322 int pid = ptid_get_pid (ptid_of (thr));
1323 int lwpid = ptid_get_lwp (ptid_of (thr));
1324 int wstat;
1325 int res;
1326
1327 if (debug_threads)
1328 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1329
1330 do
1331 {
1332 linux_kill_one_lwp (lwp);
1333
1334 /* Make sure it died. Notes:
1335
1336 - The loop is most likely unnecessary.
1337
1338 - We don't use linux_wait_for_event as that could delete lwps
1339 while we're iterating over them. We're not interested in
1340 any pending status at this point, only in making sure all
1341 wait status on the kernel side are collected until the
1342 process is reaped.
1343
1344 - We don't use __WALL here as the __WALL emulation relies on
1345 SIGCHLD, and killing a stopped process doesn't generate
1346 one, nor an exit status.
1347 */
1348 res = my_waitpid (lwpid, &wstat, 0);
1349 if (res == -1 && errno == ECHILD)
1350 res = my_waitpid (lwpid, &wstat, __WCLONE);
1351 } while (res > 0 && WIFSTOPPED (wstat));
1352
1353 /* Even if it was stopped, the child may have already disappeared.
1354 E.g., if it was killed by SIGKILL. */
1355 if (res < 0 && errno != ECHILD)
1356 perror_with_name ("kill_wait_lwp");
1357}
1358
1359/* Callback for `find_inferior'. Kills an lwp of a given process,
1360 except the leader. */
1361
1362static int
1363kill_one_lwp_callback (thread_info *thread, void *args)
1364{
1365 struct lwp_info *lwp = get_thread_lwp (thread);
1366 int pid = * (int *) args;
1367
1368 if (thread->id.pid () != pid)
1369 return 0;
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return 0;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 return 0;
1386}
1387
1388static int
1389linux_kill (int pid)
1390{
1391 struct process_info *process;
1392 struct lwp_info *lwp;
1393
1394 process = find_process_pid (pid);
1395 if (process == NULL)
1396 return -1;
1397
1398 /* If we're killing a running inferior, make sure it is stopped
1399 first, as PTRACE_KILL will not work otherwise. */
1400 stop_all_lwps (0, NULL);
1401
1402 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1403
1404 /* See the comment in linux_kill_one_lwp. We did not kill the first
1405 thread in the list, so do so now. */
1406 lwp = find_lwp_pid (pid_to_ptid (pid));
1407
1408 if (lwp == NULL)
1409 {
1410 if (debug_threads)
1411 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1412 pid);
1413 }
1414 else
1415 kill_wait_lwp (lwp);
1416
1417 the_target->mourn (process);
1418
1419 /* Since we presently can only stop all lwps of all processes, we
1420 need to unstop lwps of other processes. */
1421 unstop_all_lwps (0, NULL);
1422 return 0;
1423}
1424
1425/* Get pending signal of THREAD, for detaching purposes. This is the
1426 signal the thread last stopped for, which we need to deliver to the
1427 thread when detaching, otherwise, it'd be suppressed/lost. */
1428
1429static int
1430get_detach_signal (struct thread_info *thread)
1431{
1432 enum gdb_signal signo = GDB_SIGNAL_0;
1433 int status;
1434 struct lwp_info *lp = get_thread_lwp (thread);
1435
1436 if (lp->status_pending_p)
1437 status = lp->status_pending;
1438 else
1439 {
1440 /* If the thread had been suspended by gdbserver, and it stopped
1441 cleanly, then it'll have stopped with SIGSTOP. But we don't
1442 want to deliver that SIGSTOP. */
1443 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1444 || thread->last_status.value.sig == GDB_SIGNAL_0)
1445 return 0;
1446
1447 /* Otherwise, we may need to deliver the signal we
1448 intercepted. */
1449 status = lp->last_status;
1450 }
1451
1452 if (!WIFSTOPPED (status))
1453 {
1454 if (debug_threads)
1455 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1456 target_pid_to_str (ptid_of (thread)));
1457 return 0;
1458 }
1459
1460 /* Extended wait statuses aren't real SIGTRAPs. */
1461 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had stopped with extended "
1465 "status: no pending signal\n",
1466 target_pid_to_str (ptid_of (thread)));
1467 return 0;
1468 }
1469
1470 signo = gdb_signal_from_host (WSTOPSIG (status));
1471
1472 if (program_signals_p && !program_signals[signo])
1473 {
1474 if (debug_threads)
1475 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1476 target_pid_to_str (ptid_of (thread)),
1477 gdb_signal_to_string (signo));
1478 return 0;
1479 }
1480 else if (!program_signals_p
1481 /* If we have no way to know which signals GDB does not
1482 want to have passed to the program, assume
1483 SIGTRAP/SIGINT, which is GDB's default. */
1484 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1485 {
1486 if (debug_threads)
1487 debug_printf ("GPS: lwp %s had signal %s, "
1488 "but we don't know if we should pass it. "
1489 "Default to not.\n",
1490 target_pid_to_str (ptid_of (thread)),
1491 gdb_signal_to_string (signo));
1492 return 0;
1493 }
1494 else
1495 {
1496 if (debug_threads)
1497 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1498 target_pid_to_str (ptid_of (thread)),
1499 gdb_signal_to_string (signo));
1500
1501 return WSTOPSIG (status);
1502 }
1503}
1504
1505/* Detach from LWP. */
1506
1507static void
1508linux_detach_one_lwp (struct lwp_info *lwp)
1509{
1510 struct thread_info *thread = get_lwp_thread (lwp);
1511 int sig;
1512 int lwpid;
1513
1514 /* If there is a pending SIGSTOP, get rid of it. */
1515 if (lwp->stop_expected)
1516 {
1517 if (debug_threads)
1518 debug_printf ("Sending SIGCONT to %s\n",
1519 target_pid_to_str (ptid_of (thread)));
1520
1521 kill_lwp (lwpid_of (thread), SIGCONT);
1522 lwp->stop_expected = 0;
1523 }
1524
1525 /* Pass on any pending signal for this thread. */
1526 sig = get_detach_signal (thread);
1527
1528 /* Preparing to resume may try to write registers, and fail if the
1529 lwp is zombie. If that happens, ignore the error. We'll handle
1530 it below, when detach fails with ESRCH. */
1531 TRY
1532 {
1533 /* Flush any pending changes to the process's registers. */
1534 regcache_invalidate_thread (thread);
1535
1536 /* Finally, let it resume. */
1537 if (the_low_target.prepare_to_resume != NULL)
1538 the_low_target.prepare_to_resume (lwp);
1539 }
1540 CATCH (ex, RETURN_MASK_ERROR)
1541 {
1542 if (!check_ptrace_stopped_lwp_gone (lwp))
1543 throw_exception (ex);
1544 }
1545 END_CATCH
1546
1547 lwpid = lwpid_of (thread);
1548 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1549 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1550 {
1551 int save_errno = errno;
1552
1553 /* We know the thread exists, so ESRCH must mean the lwp is
1554 zombie. This can happen if one of the already-detached
1555 threads exits the whole thread group. In that case we're
1556 still attached, and must reap the lwp. */
1557 if (save_errno == ESRCH)
1558 {
1559 int ret, status;
1560
1561 ret = my_waitpid (lwpid, &status, __WALL);
1562 if (ret == -1)
1563 {
1564 warning (_("Couldn't reap LWP %d while detaching: %s"),
1565 lwpid, strerror (errno));
1566 }
1567 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1568 {
1569 warning (_("Reaping LWP %d while detaching "
1570 "returned unexpected status 0x%x"),
1571 lwpid, status);
1572 }
1573 }
1574 else
1575 {
1576 error (_("Can't detach %s: %s"),
1577 target_pid_to_str (ptid_of (thread)),
1578 strerror (save_errno));
1579 }
1580 }
1581 else if (debug_threads)
1582 {
1583 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1584 target_pid_to_str (ptid_of (thread)),
1585 strsignal (sig));
1586 }
1587
1588 delete_lwp (lwp);
1589}
1590
1591/* Callback for find_inferior. Detaches from non-leader threads of a
1592 given process. */
1593
1594static int
1595linux_detach_lwp_callback (thread_info *thread, void *args)
1596{
1597 struct lwp_info *lwp = get_thread_lwp (thread);
1598 int pid = *(int *) args;
1599 int lwpid = lwpid_of (thread);
1600
1601 /* Skip other processes. */
1602 if (thread->id.pid () != pid)
1603 return 0;
1604
1605 /* We don't actually detach from the thread group leader just yet.
1606 If the thread group exits, we must reap the zombie clone lwps
1607 before we're able to reap the leader. */
1608 if (thread->id.pid () == lwpid)
1609 return 0;
1610
1611 linux_detach_one_lwp (lwp);
1612 return 0;
1613}
1614
1615static int
1616linux_detach (int pid)
1617{
1618 struct process_info *process;
1619 struct lwp_info *main_lwp;
1620
1621 process = find_process_pid (pid);
1622 if (process == NULL)
1623 return -1;
1624
1625 /* As there's a step over already in progress, let it finish first,
1626 otherwise nesting a stabilize_threads operation on top gets real
1627 messy. */
1628 complete_ongoing_step_over ();
1629
1630 /* Stop all threads before detaching. First, ptrace requires that
1631 the thread is stopped to sucessfully detach. Second, thread_db
1632 may need to uninstall thread event breakpoints from memory, which
1633 only works with a stopped process anyway. */
1634 stop_all_lwps (0, NULL);
1635
1636#ifdef USE_THREAD_DB
1637 thread_db_detach (process);
1638#endif
1639
1640 /* Stabilize threads (move out of jump pads). */
1641 stabilize_threads ();
1642
1643 /* Detach from the clone lwps first. If the thread group exits just
1644 while we're detaching, we must reap the clone lwps before we're
1645 able to reap the leader. */
1646 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1647
1648 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1649 linux_detach_one_lwp (main_lwp);
1650
1651 the_target->mourn (process);
1652
1653 /* Since we presently can only stop all lwps of all processes, we
1654 need to unstop lwps of other processes. */
1655 unstop_all_lwps (0, NULL);
1656 return 0;
1657}
1658
1659/* Remove all LWPs that belong to process PROC from the lwp list. */
1660
1661static int
1662delete_lwp_callback (thread_info *thread, void *proc)
1663{
1664 struct lwp_info *lwp = get_thread_lwp (thread);
1665 struct process_info *process = (struct process_info *) proc;
1666
1667 if (pid_of (thread) == pid_of (process))
1668 delete_lwp (lwp);
1669
1670 return 0;
1671}
1672
1673static void
1674linux_mourn (struct process_info *process)
1675{
1676 struct process_info_private *priv;
1677
1678#ifdef USE_THREAD_DB
1679 thread_db_mourn (process);
1680#endif
1681
1682 find_inferior (&all_threads, delete_lwp_callback, process);
1683
1684 /* Freeing all private data. */
1685 priv = process->priv;
1686 if (the_low_target.delete_process != NULL)
1687 the_low_target.delete_process (priv->arch_private);
1688 else
1689 gdb_assert (priv->arch_private == NULL);
1690 free (priv);
1691 process->priv = NULL;
1692
1693 remove_process (process);
1694}
1695
1696static void
1697linux_join (int pid)
1698{
1699 int status, ret;
1700
1701 do {
1702 ret = my_waitpid (pid, &status, 0);
1703 if (WIFEXITED (status) || WIFSIGNALED (status))
1704 break;
1705 } while (ret != -1 || errno != ECHILD);
1706}
1707
1708/* Return nonzero if the given thread is still alive. */
1709static int
1710linux_thread_alive (ptid_t ptid)
1711{
1712 struct lwp_info *lwp = find_lwp_pid (ptid);
1713
1714 /* We assume we always know if a thread exits. If a whole process
1715 exited but we still haven't been able to report it to GDB, we'll
1716 hold on to the last lwp of the dead process. */
1717 if (lwp != NULL)
1718 return !lwp_is_marked_dead (lwp);
1719 else
1720 return 0;
1721}
1722
1723/* Return 1 if this lwp still has an interesting status pending. If
1724 not (e.g., it had stopped for a breakpoint that is gone), return
1725 false. */
1726
1727static int
1728thread_still_has_status_pending_p (struct thread_info *thread)
1729{
1730 struct lwp_info *lp = get_thread_lwp (thread);
1731
1732 if (!lp->status_pending_p)
1733 return 0;
1734
1735 if (thread->last_resume_kind != resume_stop
1736 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1737 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1738 {
1739 struct thread_info *saved_thread;
1740 CORE_ADDR pc;
1741 int discard = 0;
1742
1743 gdb_assert (lp->last_status != 0);
1744
1745 pc = get_pc (lp);
1746
1747 saved_thread = current_thread;
1748 current_thread = thread;
1749
1750 if (pc != lp->stop_pc)
1751 {
1752 if (debug_threads)
1753 debug_printf ("PC of %ld changed\n",
1754 lwpid_of (thread));
1755 discard = 1;
1756 }
1757
1758#if !USE_SIGTRAP_SIGINFO
1759 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1760 && !(*the_low_target.breakpoint_at) (pc))
1761 {
1762 if (debug_threads)
1763 debug_printf ("previous SW breakpoint of %ld gone\n",
1764 lwpid_of (thread));
1765 discard = 1;
1766 }
1767 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1768 && !hardware_breakpoint_inserted_here (pc))
1769 {
1770 if (debug_threads)
1771 debug_printf ("previous HW breakpoint of %ld gone\n",
1772 lwpid_of (thread));
1773 discard = 1;
1774 }
1775#endif
1776
1777 current_thread = saved_thread;
1778
1779 if (discard)
1780 {
1781 if (debug_threads)
1782 debug_printf ("discarding pending breakpoint status\n");
1783 lp->status_pending_p = 0;
1784 return 0;
1785 }
1786 }
1787
1788 return 1;
1789}
1790
1791/* Returns true if LWP is resumed from the client's perspective. */
1792
1793static int
1794lwp_resumed (struct lwp_info *lwp)
1795{
1796 struct thread_info *thread = get_lwp_thread (lwp);
1797
1798 if (thread->last_resume_kind != resume_stop)
1799 return 1;
1800
1801 /* Did gdb send us a `vCont;t', but we haven't reported the
1802 corresponding stop to gdb yet? If so, the thread is still
1803 resumed/running from gdb's perspective. */
1804 if (thread->last_resume_kind == resume_stop
1805 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1806 return 1;
1807
1808 return 0;
1809}
1810
1811/* Return 1 if this lwp has an interesting status pending. */
1812static int
1813status_pending_p_callback (thread_info *thread, void *arg)
1814{
1815 struct lwp_info *lp = get_thread_lwp (thread);
1816 ptid_t ptid = * (ptid_t *) arg;
1817
1818 /* Check if we're only interested in events from a specific process
1819 or a specific LWP. */
1820 if (!ptid_match (ptid_of (thread), ptid))
1821 return 0;
1822
1823 if (!lwp_resumed (lp))
1824 return 0;
1825
1826 if (lp->status_pending_p
1827 && !thread_still_has_status_pending_p (thread))
1828 {
1829 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1830 return 0;
1831 }
1832
1833 return lp->status_pending_p;
1834}
1835
1836static int
1837same_lwp (thread_info *thread, void *data)
1838{
1839 ptid_t ptid = *(ptid_t *) data;
1840 int lwp;
1841
1842 if (ptid_get_lwp (ptid) != 0)
1843 lwp = ptid_get_lwp (ptid);
1844 else
1845 lwp = ptid_get_pid (ptid);
1846
1847 if (thread->id.lwp () == lwp)
1848 return 1;
1849
1850 return 0;
1851}
1852
1853struct lwp_info *
1854find_lwp_pid (ptid_t ptid)
1855{
1856 thread_info *thread = find_inferior (&all_threads, same_lwp, &ptid);
1857
1858 if (thread == NULL)
1859 return NULL;
1860
1861 return get_thread_lwp (thread);
1862}
1863
1864/* Return the number of known LWPs in the tgid given by PID. */
1865
1866static int
1867num_lwps (int pid)
1868{
1869 int count = 0;
1870
1871 for_each_thread (pid, [&] (thread_info *thread)
1872 {
1873 count++;
1874 });
1875
1876 return count;
1877}
1878
1879/* See nat/linux-nat.h. */
1880
1881struct lwp_info *
1882iterate_over_lwps (ptid_t filter,
1883 iterate_over_lwps_ftype callback,
1884 void *data)
1885{
1886 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1887 {
1888 lwp_info *lwp = get_thread_lwp (thread);
1889
1890 return callback (lwp, data);
1891 });
1892
1893 if (thread == NULL)
1894 return NULL;
1895
1896 return get_thread_lwp (thread);
1897}
1898
1899/* Detect zombie thread group leaders, and "exit" them. We can't reap
1900 their exits until all other threads in the group have exited. */
1901
1902static void
1903check_zombie_leaders (void)
1904{
1905 for_each_process ([] (process_info *proc) {
1906 pid_t leader_pid = pid_of (proc);
1907 struct lwp_info *leader_lp;
1908
1909 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1910
1911 if (debug_threads)
1912 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1913 "num_lwps=%d, zombie=%d\n",
1914 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1915 linux_proc_pid_is_zombie (leader_pid));
1916
1917 if (leader_lp != NULL && !leader_lp->stopped
1918 /* Check if there are other threads in the group, as we may
1919 have raced with the inferior simply exiting. */
1920 && !last_thread_of_process_p (leader_pid)
1921 && linux_proc_pid_is_zombie (leader_pid))
1922 {
1923 /* A leader zombie can mean one of two things:
1924
1925 - It exited, and there's an exit status pending
1926 available, or only the leader exited (not the whole
1927 program). In the latter case, we can't waitpid the
1928 leader's exit status until all other threads are gone.
1929
1930 - There are 3 or more threads in the group, and a thread
1931 other than the leader exec'd. On an exec, the Linux
1932 kernel destroys all other threads (except the execing
1933 one) in the thread group, and resets the execing thread's
1934 tid to the tgid. No exit notification is sent for the
1935 execing thread -- from the ptracer's perspective, it
1936 appears as though the execing thread just vanishes.
1937 Until we reap all other threads except the leader and the
1938 execing thread, the leader will be zombie, and the
1939 execing thread will be in `D (disc sleep)'. As soon as
1940 all other threads are reaped, the execing thread changes
1941 it's tid to the tgid, and the previous (zombie) leader
1942 vanishes, giving place to the "new" leader. We could try
1943 distinguishing the exit and exec cases, by waiting once
1944 more, and seeing if something comes out, but it doesn't
1945 sound useful. The previous leader _does_ go away, and
1946 we'll re-add the new one once we see the exec event
1947 (which is just the same as what would happen if the
1948 previous leader did exit voluntarily before some other
1949 thread execs). */
1950
1951 if (debug_threads)
1952 debug_printf ("CZL: Thread group leader %d zombie "
1953 "(it exited, or another thread execd).\n",
1954 leader_pid);
1955
1956 delete_lwp (leader_lp);
1957 }
1958 });
1959}
1960
1961/* Callback for `find_inferior'. Returns the first LWP that is not
1962 stopped. ARG is a PTID filter. */
1963
1964static int
1965not_stopped_callback (thread_info *thread, void *arg)
1966{
1967 struct lwp_info *lwp;
1968 ptid_t filter = *(ptid_t *) arg;
1969
1970 if (!ptid_match (ptid_of (thread), filter))
1971 return 0;
1972
1973 lwp = get_thread_lwp (thread);
1974 if (!lwp->stopped)
1975 return 1;
1976
1977 return 0;
1978}
1979
1980/* Increment LWP's suspend count. */
1981
1982static void
1983lwp_suspended_inc (struct lwp_info *lwp)
1984{
1985 lwp->suspended++;
1986
1987 if (debug_threads && lwp->suspended > 4)
1988 {
1989 struct thread_info *thread = get_lwp_thread (lwp);
1990
1991 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1992 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1993 }
1994}
1995
1996/* Decrement LWP's suspend count. */
1997
1998static void
1999lwp_suspended_decr (struct lwp_info *lwp)
2000{
2001 lwp->suspended--;
2002
2003 if (lwp->suspended < 0)
2004 {
2005 struct thread_info *thread = get_lwp_thread (lwp);
2006
2007 internal_error (__FILE__, __LINE__,
2008 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2009 lwp->suspended);
2010 }
2011}
2012
2013/* This function should only be called if the LWP got a SIGTRAP.
2014
2015 Handle any tracepoint steps or hits. Return true if a tracepoint
2016 event was handled, 0 otherwise. */
2017
2018static int
2019handle_tracepoints (struct lwp_info *lwp)
2020{
2021 struct thread_info *tinfo = get_lwp_thread (lwp);
2022 int tpoint_related_event = 0;
2023
2024 gdb_assert (lwp->suspended == 0);
2025
2026 /* If this tracepoint hit causes a tracing stop, we'll immediately
2027 uninsert tracepoints. To do this, we temporarily pause all
2028 threads, unpatch away, and then unpause threads. We need to make
2029 sure the unpausing doesn't resume LWP too. */
2030 lwp_suspended_inc (lwp);
2031
2032 /* And we need to be sure that any all-threads-stopping doesn't try
2033 to move threads out of the jump pads, as it could deadlock the
2034 inferior (LWP could be in the jump pad, maybe even holding the
2035 lock.) */
2036
2037 /* Do any necessary step collect actions. */
2038 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2039
2040 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2041
2042 /* See if we just hit a tracepoint and do its main collect
2043 actions. */
2044 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2045
2046 lwp_suspended_decr (lwp);
2047
2048 gdb_assert (lwp->suspended == 0);
2049 gdb_assert (!stabilizing_threads
2050 || (lwp->collecting_fast_tracepoint
2051 != fast_tpoint_collect_result::not_collecting));
2052
2053 if (tpoint_related_event)
2054 {
2055 if (debug_threads)
2056 debug_printf ("got a tracepoint event\n");
2057 return 1;
2058 }
2059
2060 return 0;
2061}
2062
2063/* Convenience wrapper. Returns information about LWP's fast tracepoint
2064 collection status. */
2065
2066static fast_tpoint_collect_result
2067linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2068 struct fast_tpoint_collect_status *status)
2069{
2070 CORE_ADDR thread_area;
2071 struct thread_info *thread = get_lwp_thread (lwp);
2072
2073 if (the_low_target.get_thread_area == NULL)
2074 return fast_tpoint_collect_result::not_collecting;
2075
2076 /* Get the thread area address. This is used to recognize which
2077 thread is which when tracing with the in-process agent library.
2078 We don't read anything from the address, and treat it as opaque;
2079 it's the address itself that we assume is unique per-thread. */
2080 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2081 return fast_tpoint_collect_result::not_collecting;
2082
2083 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2084}
2085
2086/* The reason we resume in the caller, is because we want to be able
2087 to pass lwp->status_pending as WSTAT, and we need to clear
2088 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2089 refuses to resume. */
2090
2091static int
2092maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2093{
2094 struct thread_info *saved_thread;
2095
2096 saved_thread = current_thread;
2097 current_thread = get_lwp_thread (lwp);
2098
2099 if ((wstat == NULL
2100 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2101 && supports_fast_tracepoints ()
2102 && agent_loaded_p ())
2103 {
2104 struct fast_tpoint_collect_status status;
2105
2106 if (debug_threads)
2107 debug_printf ("Checking whether LWP %ld needs to move out of the "
2108 "jump pad.\n",
2109 lwpid_of (current_thread));
2110
2111 fast_tpoint_collect_result r
2112 = linux_fast_tracepoint_collecting (lwp, &status);
2113
2114 if (wstat == NULL
2115 || (WSTOPSIG (*wstat) != SIGILL
2116 && WSTOPSIG (*wstat) != SIGFPE
2117 && WSTOPSIG (*wstat) != SIGSEGV
2118 && WSTOPSIG (*wstat) != SIGBUS))
2119 {
2120 lwp->collecting_fast_tracepoint = r;
2121
2122 if (r != fast_tpoint_collect_result::not_collecting)
2123 {
2124 if (r == fast_tpoint_collect_result::before_insn
2125 && lwp->exit_jump_pad_bkpt == NULL)
2126 {
2127 /* Haven't executed the original instruction yet.
2128 Set breakpoint there, and wait till it's hit,
2129 then single-step until exiting the jump pad. */
2130 lwp->exit_jump_pad_bkpt
2131 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2132 }
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of "
2136 "the jump pad...it does\n",
2137 lwpid_of (current_thread));
2138 current_thread = saved_thread;
2139
2140 return 1;
2141 }
2142 }
2143 else
2144 {
2145 /* If we get a synchronous signal while collecting, *and*
2146 while executing the (relocated) original instruction,
2147 reset the PC to point at the tpoint address, before
2148 reporting to GDB. Otherwise, it's an IPA lib bug: just
2149 report the signal to GDB, and pray for the best. */
2150
2151 lwp->collecting_fast_tracepoint
2152 = fast_tpoint_collect_result::not_collecting;
2153
2154 if (r != fast_tpoint_collect_result::not_collecting
2155 && (status.adjusted_insn_addr <= lwp->stop_pc
2156 && lwp->stop_pc < status.adjusted_insn_addr_end))
2157 {
2158 siginfo_t info;
2159 struct regcache *regcache;
2160
2161 /* The si_addr on a few signals references the address
2162 of the faulting instruction. Adjust that as
2163 well. */
2164 if ((WSTOPSIG (*wstat) == SIGILL
2165 || WSTOPSIG (*wstat) == SIGFPE
2166 || WSTOPSIG (*wstat) == SIGBUS
2167 || WSTOPSIG (*wstat) == SIGSEGV)
2168 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2169 (PTRACE_TYPE_ARG3) 0, &info) == 0
2170 /* Final check just to make sure we don't clobber
2171 the siginfo of non-kernel-sent signals. */
2172 && (uintptr_t) info.si_addr == lwp->stop_pc)
2173 {
2174 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2175 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2176 (PTRACE_TYPE_ARG3) 0, &info);
2177 }
2178
2179 regcache = get_thread_regcache (current_thread, 1);
2180 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2181 lwp->stop_pc = status.tpoint_addr;
2182
2183 /* Cancel any fast tracepoint lock this thread was
2184 holding. */
2185 force_unlock_trace_buffer ();
2186 }
2187
2188 if (lwp->exit_jump_pad_bkpt != NULL)
2189 {
2190 if (debug_threads)
2191 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2192 "stopping all threads momentarily.\n");
2193
2194 stop_all_lwps (1, lwp);
2195
2196 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2197 lwp->exit_jump_pad_bkpt = NULL;
2198
2199 unstop_all_lwps (1, lwp);
2200
2201 gdb_assert (lwp->suspended >= 0);
2202 }
2203 }
2204 }
2205
2206 if (debug_threads)
2207 debug_printf ("Checking whether LWP %ld needs to move out of the "
2208 "jump pad...no\n",
2209 lwpid_of (current_thread));
2210
2211 current_thread = saved_thread;
2212 return 0;
2213}
2214
2215/* Enqueue one signal in the "signals to report later when out of the
2216 jump pad" list. */
2217
2218static void
2219enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2220{
2221 struct pending_signals *p_sig;
2222 struct thread_info *thread = get_lwp_thread (lwp);
2223
2224 if (debug_threads)
2225 debug_printf ("Deferring signal %d for LWP %ld.\n",
2226 WSTOPSIG (*wstat), lwpid_of (thread));
2227
2228 if (debug_threads)
2229 {
2230 struct pending_signals *sig;
2231
2232 for (sig = lwp->pending_signals_to_report;
2233 sig != NULL;
2234 sig = sig->prev)
2235 debug_printf (" Already queued %d\n",
2236 sig->signal);
2237
2238 debug_printf (" (no more currently queued signals)\n");
2239 }
2240
2241 /* Don't enqueue non-RT signals if they are already in the deferred
2242 queue. (SIGSTOP being the easiest signal to see ending up here
2243 twice) */
2244 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2245 {
2246 struct pending_signals *sig;
2247
2248 for (sig = lwp->pending_signals_to_report;
2249 sig != NULL;
2250 sig = sig->prev)
2251 {
2252 if (sig->signal == WSTOPSIG (*wstat))
2253 {
2254 if (debug_threads)
2255 debug_printf ("Not requeuing already queued non-RT signal %d"
2256 " for LWP %ld\n",
2257 sig->signal,
2258 lwpid_of (thread));
2259 return;
2260 }
2261 }
2262 }
2263
2264 p_sig = XCNEW (struct pending_signals);
2265 p_sig->prev = lwp->pending_signals_to_report;
2266 p_sig->signal = WSTOPSIG (*wstat);
2267
2268 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2269 &p_sig->info);
2270
2271 lwp->pending_signals_to_report = p_sig;
2272}
2273
2274/* Dequeue one signal from the "signals to report later when out of
2275 the jump pad" list. */
2276
2277static int
2278dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2279{
2280 struct thread_info *thread = get_lwp_thread (lwp);
2281
2282 if (lwp->pending_signals_to_report != NULL)
2283 {
2284 struct pending_signals **p_sig;
2285
2286 p_sig = &lwp->pending_signals_to_report;
2287 while ((*p_sig)->prev != NULL)
2288 p_sig = &(*p_sig)->prev;
2289
2290 *wstat = W_STOPCODE ((*p_sig)->signal);
2291 if ((*p_sig)->info.si_signo != 0)
2292 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2293 &(*p_sig)->info);
2294 free (*p_sig);
2295 *p_sig = NULL;
2296
2297 if (debug_threads)
2298 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2299 WSTOPSIG (*wstat), lwpid_of (thread));
2300
2301 if (debug_threads)
2302 {
2303 struct pending_signals *sig;
2304
2305 for (sig = lwp->pending_signals_to_report;
2306 sig != NULL;
2307 sig = sig->prev)
2308 debug_printf (" Still queued %d\n",
2309 sig->signal);
2310
2311 debug_printf (" (no more queued signals)\n");
2312 }
2313
2314 return 1;
2315 }
2316
2317 return 0;
2318}
2319
2320/* Fetch the possibly triggered data watchpoint info and store it in
2321 CHILD.
2322
2323 On some archs, like x86, that use debug registers to set
2324 watchpoints, it's possible that the way to know which watched
2325 address trapped, is to check the register that is used to select
2326 which address to watch. Problem is, between setting the watchpoint
2327 and reading back which data address trapped, the user may change
2328 the set of watchpoints, and, as a consequence, GDB changes the
2329 debug registers in the inferior. To avoid reading back a stale
2330 stopped-data-address when that happens, we cache in LP the fact
2331 that a watchpoint trapped, and the corresponding data address, as
2332 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2333 registers meanwhile, we have the cached data we can rely on. */
2334
2335static int
2336check_stopped_by_watchpoint (struct lwp_info *child)
2337{
2338 if (the_low_target.stopped_by_watchpoint != NULL)
2339 {
2340 struct thread_info *saved_thread;
2341
2342 saved_thread = current_thread;
2343 current_thread = get_lwp_thread (child);
2344
2345 if (the_low_target.stopped_by_watchpoint ())
2346 {
2347 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2348
2349 if (the_low_target.stopped_data_address != NULL)
2350 child->stopped_data_address
2351 = the_low_target.stopped_data_address ();
2352 else
2353 child->stopped_data_address = 0;
2354 }
2355
2356 current_thread = saved_thread;
2357 }
2358
2359 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2360}
2361
2362/* Return the ptrace options that we want to try to enable. */
2363
2364static int
2365linux_low_ptrace_options (int attached)
2366{
2367 int options = 0;
2368
2369 if (!attached)
2370 options |= PTRACE_O_EXITKILL;
2371
2372 if (report_fork_events)
2373 options |= PTRACE_O_TRACEFORK;
2374
2375 if (report_vfork_events)
2376 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2377
2378 if (report_exec_events)
2379 options |= PTRACE_O_TRACEEXEC;
2380
2381 options |= PTRACE_O_TRACESYSGOOD;
2382
2383 return options;
2384}
2385
2386/* Do low-level handling of the event, and check if we should go on
2387 and pass it to caller code. Return the affected lwp if we are, or
2388 NULL otherwise. */
2389
2390static struct lwp_info *
2391linux_low_filter_event (int lwpid, int wstat)
2392{
2393 struct lwp_info *child;
2394 struct thread_info *thread;
2395 int have_stop_pc = 0;
2396
2397 child = find_lwp_pid (pid_to_ptid (lwpid));
2398
2399 /* Check for stop events reported by a process we didn't already
2400 know about - anything not already in our LWP list.
2401
2402 If we're expecting to receive stopped processes after
2403 fork, vfork, and clone events, then we'll just add the
2404 new one to our list and go back to waiting for the event
2405 to be reported - the stopped process might be returned
2406 from waitpid before or after the event is.
2407
2408 But note the case of a non-leader thread exec'ing after the
2409 leader having exited, and gone from our lists (because
2410 check_zombie_leaders deleted it). The non-leader thread
2411 changes its tid to the tgid. */
2412
2413 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2414 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2415 {
2416 ptid_t child_ptid;
2417
2418 /* A multi-thread exec after we had seen the leader exiting. */
2419 if (debug_threads)
2420 {
2421 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2422 "after exec.\n", lwpid);
2423 }
2424
2425 child_ptid = ptid_build (lwpid, lwpid, 0);
2426 child = add_lwp (child_ptid);
2427 child->stopped = 1;
2428 current_thread = child->thread;
2429 }
2430
2431 /* If we didn't find a process, one of two things presumably happened:
2432 - A process we started and then detached from has exited. Ignore it.
2433 - A process we are controlling has forked and the new child's stop
2434 was reported to us by the kernel. Save its PID. */
2435 if (child == NULL && WIFSTOPPED (wstat))
2436 {
2437 add_to_pid_list (&stopped_pids, lwpid, wstat);
2438 return NULL;
2439 }
2440 else if (child == NULL)
2441 return NULL;
2442
2443 thread = get_lwp_thread (child);
2444
2445 child->stopped = 1;
2446
2447 child->last_status = wstat;
2448
2449 /* Check if the thread has exited. */
2450 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2451 {
2452 if (debug_threads)
2453 debug_printf ("LLFE: %d exited.\n", lwpid);
2454
2455 if (finish_step_over (child))
2456 {
2457 /* Unsuspend all other LWPs, and set them back running again. */
2458 unsuspend_all_lwps (child);
2459 }
2460
2461 /* If there is at least one more LWP, then the exit signal was
2462 not the end of the debugged application and should be
2463 ignored, unless GDB wants to hear about thread exits. */
2464 if (report_thread_events
2465 || last_thread_of_process_p (pid_of (thread)))
2466 {
2467 /* Since events are serialized to GDB core, and we can't
2468 report this one right now. Leave the status pending for
2469 the next time we're able to report it. */
2470 mark_lwp_dead (child, wstat);
2471 return child;
2472 }
2473 else
2474 {
2475 delete_lwp (child);
2476 return NULL;
2477 }
2478 }
2479
2480 gdb_assert (WIFSTOPPED (wstat));
2481
2482 if (WIFSTOPPED (wstat))
2483 {
2484 struct process_info *proc;
2485
2486 /* Architecture-specific setup after inferior is running. */
2487 proc = find_process_pid (pid_of (thread));
2488 if (proc->tdesc == NULL)
2489 {
2490 if (proc->attached)
2491 {
2492 /* This needs to happen after we have attached to the
2493 inferior and it is stopped for the first time, but
2494 before we access any inferior registers. */
2495 linux_arch_setup_thread (thread);
2496 }
2497 else
2498 {
2499 /* The process is started, but GDBserver will do
2500 architecture-specific setup after the program stops at
2501 the first instruction. */
2502 child->status_pending_p = 1;
2503 child->status_pending = wstat;
2504 return child;
2505 }
2506 }
2507 }
2508
2509 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2510 {
2511 struct process_info *proc = find_process_pid (pid_of (thread));
2512 int options = linux_low_ptrace_options (proc->attached);
2513
2514 linux_enable_event_reporting (lwpid, options);
2515 child->must_set_ptrace_flags = 0;
2516 }
2517
2518 /* Always update syscall_state, even if it will be filtered later. */
2519 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2520 {
2521 child->syscall_state
2522 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2523 ? TARGET_WAITKIND_SYSCALL_RETURN
2524 : TARGET_WAITKIND_SYSCALL_ENTRY);
2525 }
2526 else
2527 {
2528 /* Almost all other ptrace-stops are known to be outside of system
2529 calls, with further exceptions in handle_extended_wait. */
2530 child->syscall_state = TARGET_WAITKIND_IGNORE;
2531 }
2532
2533 /* Be careful to not overwrite stop_pc until save_stop_reason is
2534 called. */
2535 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2536 && linux_is_extended_waitstatus (wstat))
2537 {
2538 child->stop_pc = get_pc (child);
2539 if (handle_extended_wait (&child, wstat))
2540 {
2541 /* The event has been handled, so just return without
2542 reporting it. */
2543 return NULL;
2544 }
2545 }
2546
2547 if (linux_wstatus_maybe_breakpoint (wstat))
2548 {
2549 if (save_stop_reason (child))
2550 have_stop_pc = 1;
2551 }
2552
2553 if (!have_stop_pc)
2554 child->stop_pc = get_pc (child);
2555
2556 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2557 && child->stop_expected)
2558 {
2559 if (debug_threads)
2560 debug_printf ("Expected stop.\n");
2561 child->stop_expected = 0;
2562
2563 if (thread->last_resume_kind == resume_stop)
2564 {
2565 /* We want to report the stop to the core. Treat the
2566 SIGSTOP as a normal event. */
2567 if (debug_threads)
2568 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2569 target_pid_to_str (ptid_of (thread)));
2570 }
2571 else if (stopping_threads != NOT_STOPPING_THREADS)
2572 {
2573 /* Stopping threads. We don't want this SIGSTOP to end up
2574 pending. */
2575 if (debug_threads)
2576 debug_printf ("LLW: SIGSTOP caught for %s "
2577 "while stopping threads.\n",
2578 target_pid_to_str (ptid_of (thread)));
2579 return NULL;
2580 }
2581 else
2582 {
2583 /* This is a delayed SIGSTOP. Filter out the event. */
2584 if (debug_threads)
2585 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2586 child->stepping ? "step" : "continue",
2587 target_pid_to_str (ptid_of (thread)));
2588
2589 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2590 return NULL;
2591 }
2592 }
2593
2594 child->status_pending_p = 1;
2595 child->status_pending = wstat;
2596 return child;
2597}
2598
2599/* Return true if THREAD is doing hardware single step. */
2600
2601static int
2602maybe_hw_step (struct thread_info *thread)
2603{
2604 if (can_hardware_single_step ())
2605 return 1;
2606 else
2607 {
2608 /* GDBserver must insert single-step breakpoint for software
2609 single step. */
2610 gdb_assert (has_single_step_breakpoints (thread));
2611 return 0;
2612 }
2613}
2614
2615/* Resume LWPs that are currently stopped without any pending status
2616 to report, but are resumed from the core's perspective. */
2617
2618static void
2619resume_stopped_resumed_lwps (thread_info *thread)
2620{
2621 struct lwp_info *lp = get_thread_lwp (thread);
2622
2623 if (lp->stopped
2624 && !lp->suspended
2625 && !lp->status_pending_p
2626 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2627 {
2628 int step = 0;
2629
2630 if (thread->last_resume_kind == resume_step)
2631 step = maybe_hw_step (thread);
2632
2633 if (debug_threads)
2634 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2635 target_pid_to_str (ptid_of (thread)),
2636 paddress (lp->stop_pc),
2637 step);
2638
2639 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2640 }
2641}
2642
2643/* Wait for an event from child(ren) WAIT_PTID, and return any that
2644 match FILTER_PTID (leaving others pending). The PTIDs can be:
2645 minus_one_ptid, to specify any child; a pid PTID, specifying all
2646 lwps of a thread group; or a PTID representing a single lwp. Store
2647 the stop status through the status pointer WSTAT. OPTIONS is
2648 passed to the waitpid call. Return 0 if no event was found and
2649 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2650 was found. Return the PID of the stopped child otherwise. */
2651
2652static int
2653linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2654 int *wstatp, int options)
2655{
2656 struct thread_info *event_thread;
2657 struct lwp_info *event_child, *requested_child;
2658 sigset_t block_mask, prev_mask;
2659
2660 retry:
2661 /* N.B. event_thread points to the thread_info struct that contains
2662 event_child. Keep them in sync. */
2663 event_thread = NULL;
2664 event_child = NULL;
2665 requested_child = NULL;
2666
2667 /* Check for a lwp with a pending status. */
2668
2669 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2670 {
2671 event_thread = (struct thread_info *)
2672 find_inferior_in_random (&all_threads, status_pending_p_callback,
2673 &filter_ptid);
2674 if (event_thread != NULL)
2675 event_child = get_thread_lwp (event_thread);
2676 if (debug_threads && event_thread)
2677 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2678 }
2679 else if (!ptid_equal (filter_ptid, null_ptid))
2680 {
2681 requested_child = find_lwp_pid (filter_ptid);
2682
2683 if (stopping_threads == NOT_STOPPING_THREADS
2684 && requested_child->status_pending_p
2685 && (requested_child->collecting_fast_tracepoint
2686 != fast_tpoint_collect_result::not_collecting))
2687 {
2688 enqueue_one_deferred_signal (requested_child,
2689 &requested_child->status_pending);
2690 requested_child->status_pending_p = 0;
2691 requested_child->status_pending = 0;
2692 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2693 }
2694
2695 if (requested_child->suspended
2696 && requested_child->status_pending_p)
2697 {
2698 internal_error (__FILE__, __LINE__,
2699 "requesting an event out of a"
2700 " suspended child?");
2701 }
2702
2703 if (requested_child->status_pending_p)
2704 {
2705 event_child = requested_child;
2706 event_thread = get_lwp_thread (event_child);
2707 }
2708 }
2709
2710 if (event_child != NULL)
2711 {
2712 if (debug_threads)
2713 debug_printf ("Got an event from pending child %ld (%04x)\n",
2714 lwpid_of (event_thread), event_child->status_pending);
2715 *wstatp = event_child->status_pending;
2716 event_child->status_pending_p = 0;
2717 event_child->status_pending = 0;
2718 current_thread = event_thread;
2719 return lwpid_of (event_thread);
2720 }
2721
2722 /* But if we don't find a pending event, we'll have to wait.
2723
2724 We only enter this loop if no process has a pending wait status.
2725 Thus any action taken in response to a wait status inside this
2726 loop is responding as soon as we detect the status, not after any
2727 pending events. */
2728
2729 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2730 all signals while here. */
2731 sigfillset (&block_mask);
2732 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2733
2734 /* Always pull all events out of the kernel. We'll randomly select
2735 an event LWP out of all that have events, to prevent
2736 starvation. */
2737 while (event_child == NULL)
2738 {
2739 pid_t ret = 0;
2740
2741 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2742 quirks:
2743
2744 - If the thread group leader exits while other threads in the
2745 thread group still exist, waitpid(TGID, ...) hangs. That
2746 waitpid won't return an exit status until the other threads
2747 in the group are reaped.
2748
2749 - When a non-leader thread execs, that thread just vanishes
2750 without reporting an exit (so we'd hang if we waited for it
2751 explicitly in that case). The exec event is reported to
2752 the TGID pid. */
2753 errno = 0;
2754 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2755
2756 if (debug_threads)
2757 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2758 ret, errno ? strerror (errno) : "ERRNO-OK");
2759
2760 if (ret > 0)
2761 {
2762 if (debug_threads)
2763 {
2764 debug_printf ("LLW: waitpid %ld received %s\n",
2765 (long) ret, status_to_str (*wstatp));
2766 }
2767
2768 /* Filter all events. IOW, leave all events pending. We'll
2769 randomly select an event LWP out of all that have events
2770 below. */
2771 linux_low_filter_event (ret, *wstatp);
2772 /* Retry until nothing comes out of waitpid. A single
2773 SIGCHLD can indicate more than one child stopped. */
2774 continue;
2775 }
2776
2777 /* Now that we've pulled all events out of the kernel, resume
2778 LWPs that don't have an interesting event to report. */
2779 if (stopping_threads == NOT_STOPPING_THREADS)
2780 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2781
2782 /* ... and find an LWP with a status to report to the core, if
2783 any. */
2784 event_thread = (struct thread_info *)
2785 find_inferior_in_random (&all_threads, status_pending_p_callback,
2786 &filter_ptid);
2787 if (event_thread != NULL)
2788 {
2789 event_child = get_thread_lwp (event_thread);
2790 *wstatp = event_child->status_pending;
2791 event_child->status_pending_p = 0;
2792 event_child->status_pending = 0;
2793 break;
2794 }
2795
2796 /* Check for zombie thread group leaders. Those can't be reaped
2797 until all other threads in the thread group are. */
2798 check_zombie_leaders ();
2799
2800 /* If there are no resumed children left in the set of LWPs we
2801 want to wait for, bail. We can't just block in
2802 waitpid/sigsuspend, because lwps might have been left stopped
2803 in trace-stop state, and we'd be stuck forever waiting for
2804 their status to change (which would only happen if we resumed
2805 them). Even if WNOHANG is set, this return code is preferred
2806 over 0 (below), as it is more detailed. */
2807 if ((find_inferior (&all_threads,
2808 not_stopped_callback,
2809 &wait_ptid) == NULL))
2810 {
2811 if (debug_threads)
2812 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2813 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2814 return -1;
2815 }
2816
2817 /* No interesting event to report to the caller. */
2818 if ((options & WNOHANG))
2819 {
2820 if (debug_threads)
2821 debug_printf ("WNOHANG set, no event found\n");
2822
2823 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2824 return 0;
2825 }
2826
2827 /* Block until we get an event reported with SIGCHLD. */
2828 if (debug_threads)
2829 debug_printf ("sigsuspend'ing\n");
2830
2831 sigsuspend (&prev_mask);
2832 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2833 goto retry;
2834 }
2835
2836 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2837
2838 current_thread = event_thread;
2839
2840 return lwpid_of (event_thread);
2841}
2842
2843/* Wait for an event from child(ren) PTID. PTIDs can be:
2844 minus_one_ptid, to specify any child; a pid PTID, specifying all
2845 lwps of a thread group; or a PTID representing a single lwp. Store
2846 the stop status through the status pointer WSTAT. OPTIONS is
2847 passed to the waitpid call. Return 0 if no event was found and
2848 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2849 was found. Return the PID of the stopped child otherwise. */
2850
2851static int
2852linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2853{
2854 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2855}
2856
2857/* Count the LWP's that have had events. */
2858
2859static int
2860count_events_callback (thread_info *thread, void *data)
2861{
2862 struct lwp_info *lp = get_thread_lwp (thread);
2863 int *count = (int *) data;
2864
2865 gdb_assert (count != NULL);
2866
2867 /* Count only resumed LWPs that have an event pending. */
2868 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2869 && lp->status_pending_p)
2870 (*count)++;
2871
2872 return 0;
2873}
2874
2875/* Select the LWP (if any) that is currently being single-stepped. */
2876
2877static int
2878select_singlestep_lwp_callback (thread_info *thread, void *data)
2879{
2880 struct lwp_info *lp = get_thread_lwp (thread);
2881
2882 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2883 && thread->last_resume_kind == resume_step
2884 && lp->status_pending_p)
2885 return 1;
2886 else
2887 return 0;
2888}
2889
2890/* Select the Nth LWP that has had an event. */
2891
2892static int
2893select_event_lwp_callback (thread_info *thread, void *data)
2894{
2895 struct lwp_info *lp = get_thread_lwp (thread);
2896 int *selector = (int *) data;
2897
2898 gdb_assert (selector != NULL);
2899
2900 /* Select only resumed LWPs that have an event pending. */
2901 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2902 && lp->status_pending_p)
2903 if ((*selector)-- == 0)
2904 return 1;
2905
2906 return 0;
2907}
2908
2909/* Select one LWP out of those that have events pending. */
2910
2911static void
2912select_event_lwp (struct lwp_info **orig_lp)
2913{
2914 int num_events = 0;
2915 int random_selector;
2916 struct thread_info *event_thread = NULL;
2917
2918 /* In all-stop, give preference to the LWP that is being
2919 single-stepped. There will be at most one, and it's the LWP that
2920 the core is most interested in. If we didn't do this, then we'd
2921 have to handle pending step SIGTRAPs somehow in case the core
2922 later continues the previously-stepped thread, otherwise we'd
2923 report the pending SIGTRAP, and the core, not having stepped the
2924 thread, wouldn't understand what the trap was for, and therefore
2925 would report it to the user as a random signal. */
2926 if (!non_stop)
2927 {
2928 event_thread
2929 = (struct thread_info *) find_inferior (&all_threads,
2930 select_singlestep_lwp_callback,
2931 NULL);
2932 if (event_thread != NULL)
2933 {
2934 if (debug_threads)
2935 debug_printf ("SEL: Select single-step %s\n",
2936 target_pid_to_str (ptid_of (event_thread)));
2937 }
2938 }
2939 if (event_thread == NULL)
2940 {
2941 /* No single-stepping LWP. Select one at random, out of those
2942 which have had events. */
2943
2944 /* First see how many events we have. */
2945 find_inferior (&all_threads, count_events_callback, &num_events);
2946 gdb_assert (num_events > 0);
2947
2948 /* Now randomly pick a LWP out of those that have had
2949 events. */
2950 random_selector = (int)
2951 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2952
2953 if (debug_threads && num_events > 1)
2954 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2955 num_events, random_selector);
2956
2957 event_thread
2958 = (struct thread_info *) find_inferior (&all_threads,
2959 select_event_lwp_callback,
2960 &random_selector);
2961 }
2962
2963 if (event_thread != NULL)
2964 {
2965 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2966
2967 /* Switch the event LWP. */
2968 *orig_lp = event_lp;
2969 }
2970}
2971
2972/* Decrement the suspend count of an LWP. */
2973
2974static int
2975unsuspend_one_lwp (thread_info *thread, void *except)
2976{
2977 struct lwp_info *lwp = get_thread_lwp (thread);
2978
2979 /* Ignore EXCEPT. */
2980 if (lwp == except)
2981 return 0;
2982
2983 lwp_suspended_decr (lwp);
2984 return 0;
2985}
2986
2987/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2988 NULL. */
2989
2990static void
2991unsuspend_all_lwps (struct lwp_info *except)
2992{
2993 find_inferior (&all_threads, unsuspend_one_lwp, except);
2994}
2995
2996static void move_out_of_jump_pad_callback (thread_info *thread);
2997static int stuck_in_jump_pad_callback (thread_info *thread, void *data);
2998static int lwp_running (thread_info *thread, void *data);
2999static ptid_t linux_wait_1 (ptid_t ptid,
3000 struct target_waitstatus *ourstatus,
3001 int target_options);
3002
3003/* Stabilize threads (move out of jump pads).
3004
3005 If a thread is midway collecting a fast tracepoint, we need to
3006 finish the collection and move it out of the jump pad before
3007 reporting the signal.
3008
3009 This avoids recursion while collecting (when a signal arrives
3010 midway, and the signal handler itself collects), which would trash
3011 the trace buffer. In case the user set a breakpoint in a signal
3012 handler, this avoids the backtrace showing the jump pad, etc..
3013 Most importantly, there are certain things we can't do safely if
3014 threads are stopped in a jump pad (or in its callee's). For
3015 example:
3016
3017 - starting a new trace run. A thread still collecting the
3018 previous run, could trash the trace buffer when resumed. The trace
3019 buffer control structures would have been reset but the thread had
3020 no way to tell. The thread could even midway memcpy'ing to the
3021 buffer, which would mean that when resumed, it would clobber the
3022 trace buffer that had been set for a new run.
3023
3024 - we can't rewrite/reuse the jump pads for new tracepoints
3025 safely. Say you do tstart while a thread is stopped midway while
3026 collecting. When the thread is later resumed, it finishes the
3027 collection, and returns to the jump pad, to execute the original
3028 instruction that was under the tracepoint jump at the time the
3029 older run had been started. If the jump pad had been rewritten
3030 since for something else in the new run, the thread would now
3031 execute the wrong / random instructions. */
3032
3033static void
3034linux_stabilize_threads (void)
3035{
3036 struct thread_info *saved_thread;
3037 struct thread_info *thread_stuck;
3038
3039 thread_stuck
3040 = (struct thread_info *) find_inferior (&all_threads,
3041 stuck_in_jump_pad_callback,
3042 NULL);
3043 if (thread_stuck != NULL)
3044 {
3045 if (debug_threads)
3046 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3047 lwpid_of (thread_stuck));
3048 return;
3049 }
3050
3051 saved_thread = current_thread;
3052
3053 stabilizing_threads = 1;
3054
3055 /* Kick 'em all. */
3056 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3057
3058 /* Loop until all are stopped out of the jump pads. */
3059 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3060 {
3061 struct target_waitstatus ourstatus;
3062 struct lwp_info *lwp;
3063 int wstat;
3064
3065 /* Note that we go through the full wait even loop. While
3066 moving threads out of jump pad, we need to be able to step
3067 over internal breakpoints and such. */
3068 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3069
3070 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3071 {
3072 lwp = get_thread_lwp (current_thread);
3073
3074 /* Lock it. */
3075 lwp_suspended_inc (lwp);
3076
3077 if (ourstatus.value.sig != GDB_SIGNAL_0
3078 || current_thread->last_resume_kind == resume_stop)
3079 {
3080 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3081 enqueue_one_deferred_signal (lwp, &wstat);
3082 }
3083 }
3084 }
3085
3086 unsuspend_all_lwps (NULL);
3087
3088 stabilizing_threads = 0;
3089
3090 current_thread = saved_thread;
3091
3092 if (debug_threads)
3093 {
3094 thread_stuck
3095 = (struct thread_info *) find_inferior (&all_threads,
3096 stuck_in_jump_pad_callback,
3097 NULL);
3098 if (thread_stuck != NULL)
3099 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3100 lwpid_of (thread_stuck));
3101 }
3102}
3103
3104/* Convenience function that is called when the kernel reports an
3105 event that is not passed out to GDB. */
3106
3107static ptid_t
3108ignore_event (struct target_waitstatus *ourstatus)
3109{
3110 /* If we got an event, there may still be others, as a single
3111 SIGCHLD can indicate more than one child stopped. This forces
3112 another target_wait call. */
3113 async_file_mark ();
3114
3115 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3116 return null_ptid;
3117}
3118
3119/* Convenience function that is called when the kernel reports an exit
3120 event. This decides whether to report the event to GDB as a
3121 process exit event, a thread exit event, or to suppress the
3122 event. */
3123
3124static ptid_t
3125filter_exit_event (struct lwp_info *event_child,
3126 struct target_waitstatus *ourstatus)
3127{
3128 struct thread_info *thread = get_lwp_thread (event_child);
3129 ptid_t ptid = ptid_of (thread);
3130
3131 if (!last_thread_of_process_p (pid_of (thread)))
3132 {
3133 if (report_thread_events)
3134 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3135 else
3136 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3137
3138 delete_lwp (event_child);
3139 }
3140 return ptid;
3141}
3142
3143/* Returns 1 if GDB is interested in any event_child syscalls. */
3144
3145static int
3146gdb_catching_syscalls_p (struct lwp_info *event_child)
3147{
3148 struct thread_info *thread = get_lwp_thread (event_child);
3149 struct process_info *proc = get_thread_process (thread);
3150
3151 return !proc->syscalls_to_catch.empty ();
3152}
3153
3154/* Returns 1 if GDB is interested in the event_child syscall.
3155 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3156
3157static int
3158gdb_catch_this_syscall_p (struct lwp_info *event_child)
3159{
3160 int sysno;
3161 struct thread_info *thread = get_lwp_thread (event_child);
3162 struct process_info *proc = get_thread_process (thread);
3163
3164 if (proc->syscalls_to_catch.empty ())
3165 return 0;
3166
3167 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3168 return 1;
3169
3170 get_syscall_trapinfo (event_child, &sysno);
3171
3172 for (int iter : proc->syscalls_to_catch)
3173 if (iter == sysno)
3174 return 1;
3175
3176 return 0;
3177}
3178
3179/* Wait for process, returns status. */
3180
3181static ptid_t
3182linux_wait_1 (ptid_t ptid,
3183 struct target_waitstatus *ourstatus, int target_options)
3184{
3185 int w;
3186 struct lwp_info *event_child;
3187 int options;
3188 int pid;
3189 int step_over_finished;
3190 int bp_explains_trap;
3191 int maybe_internal_trap;
3192 int report_to_gdb;
3193 int trace_event;
3194 int in_step_range;
3195 int any_resumed;
3196
3197 if (debug_threads)
3198 {
3199 debug_enter ();
3200 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3201 }
3202
3203 /* Translate generic target options into linux options. */
3204 options = __WALL;
3205 if (target_options & TARGET_WNOHANG)
3206 options |= WNOHANG;
3207
3208 bp_explains_trap = 0;
3209 trace_event = 0;
3210 in_step_range = 0;
3211 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3212
3213 /* Find a resumed LWP, if any. */
3214 if (find_inferior (&all_threads,
3215 status_pending_p_callback,
3216 &minus_one_ptid) != NULL)
3217 any_resumed = 1;
3218 else if ((find_inferior (&all_threads,
3219 not_stopped_callback,
3220 &minus_one_ptid) != NULL))
3221 any_resumed = 1;
3222 else
3223 any_resumed = 0;
3224
3225 if (ptid_equal (step_over_bkpt, null_ptid))
3226 pid = linux_wait_for_event (ptid, &w, options);
3227 else
3228 {
3229 if (debug_threads)
3230 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3231 target_pid_to_str (step_over_bkpt));
3232 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3233 }
3234
3235 if (pid == 0 || (pid == -1 && !any_resumed))
3236 {
3237 gdb_assert (target_options & TARGET_WNOHANG);
3238
3239 if (debug_threads)
3240 {
3241 debug_printf ("linux_wait_1 ret = null_ptid, "
3242 "TARGET_WAITKIND_IGNORE\n");
3243 debug_exit ();
3244 }
3245
3246 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3247 return null_ptid;
3248 }
3249 else if (pid == -1)
3250 {
3251 if (debug_threads)
3252 {
3253 debug_printf ("linux_wait_1 ret = null_ptid, "
3254 "TARGET_WAITKIND_NO_RESUMED\n");
3255 debug_exit ();
3256 }
3257
3258 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3259 return null_ptid;
3260 }
3261
3262 event_child = get_thread_lwp (current_thread);
3263
3264 /* linux_wait_for_event only returns an exit status for the last
3265 child of a process. Report it. */
3266 if (WIFEXITED (w) || WIFSIGNALED (w))
3267 {
3268 if (WIFEXITED (w))
3269 {
3270 ourstatus->kind = TARGET_WAITKIND_EXITED;
3271 ourstatus->value.integer = WEXITSTATUS (w);
3272
3273 if (debug_threads)
3274 {
3275 debug_printf ("linux_wait_1 ret = %s, exited with "
3276 "retcode %d\n",
3277 target_pid_to_str (ptid_of (current_thread)),
3278 WEXITSTATUS (w));
3279 debug_exit ();
3280 }
3281 }
3282 else
3283 {
3284 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3285 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3286
3287 if (debug_threads)
3288 {
3289 debug_printf ("linux_wait_1 ret = %s, terminated with "
3290 "signal %d\n",
3291 target_pid_to_str (ptid_of (current_thread)),
3292 WTERMSIG (w));
3293 debug_exit ();
3294 }
3295 }
3296
3297 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3298 return filter_exit_event (event_child, ourstatus);
3299
3300 return ptid_of (current_thread);
3301 }
3302
3303 /* If step-over executes a breakpoint instruction, in the case of a
3304 hardware single step it means a gdb/gdbserver breakpoint had been
3305 planted on top of a permanent breakpoint, in the case of a software
3306 single step it may just mean that gdbserver hit the reinsert breakpoint.
3307 The PC has been adjusted by save_stop_reason to point at
3308 the breakpoint address.
3309 So in the case of the hardware single step advance the PC manually
3310 past the breakpoint and in the case of software single step advance only
3311 if it's not the single_step_breakpoint we are hitting.
3312 This avoids that a program would keep trapping a permanent breakpoint
3313 forever. */
3314 if (!ptid_equal (step_over_bkpt, null_ptid)
3315 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3316 && (event_child->stepping
3317 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3318 {
3319 int increment_pc = 0;
3320 int breakpoint_kind = 0;
3321 CORE_ADDR stop_pc = event_child->stop_pc;
3322
3323 breakpoint_kind =
3324 the_target->breakpoint_kind_from_current_state (&stop_pc);
3325 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3326
3327 if (debug_threads)
3328 {
3329 debug_printf ("step-over for %s executed software breakpoint\n",
3330 target_pid_to_str (ptid_of (current_thread)));
3331 }
3332
3333 if (increment_pc != 0)
3334 {
3335 struct regcache *regcache
3336 = get_thread_regcache (current_thread, 1);
3337
3338 event_child->stop_pc += increment_pc;
3339 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3340
3341 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3342 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3343 }
3344 }
3345
3346 /* If this event was not handled before, and is not a SIGTRAP, we
3347 report it. SIGILL and SIGSEGV are also treated as traps in case
3348 a breakpoint is inserted at the current PC. If this target does
3349 not support internal breakpoints at all, we also report the
3350 SIGTRAP without further processing; it's of no concern to us. */
3351 maybe_internal_trap
3352 = (supports_breakpoints ()
3353 && (WSTOPSIG (w) == SIGTRAP
3354 || ((WSTOPSIG (w) == SIGILL
3355 || WSTOPSIG (w) == SIGSEGV)
3356 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3357
3358 if (maybe_internal_trap)
3359 {
3360 /* Handle anything that requires bookkeeping before deciding to
3361 report the event or continue waiting. */
3362
3363 /* First check if we can explain the SIGTRAP with an internal
3364 breakpoint, or if we should possibly report the event to GDB.
3365 Do this before anything that may remove or insert a
3366 breakpoint. */
3367 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3368
3369 /* We have a SIGTRAP, possibly a step-over dance has just
3370 finished. If so, tweak the state machine accordingly,
3371 reinsert breakpoints and delete any single-step
3372 breakpoints. */
3373 step_over_finished = finish_step_over (event_child);
3374
3375 /* Now invoke the callbacks of any internal breakpoints there. */
3376 check_breakpoints (event_child->stop_pc);
3377
3378 /* Handle tracepoint data collecting. This may overflow the
3379 trace buffer, and cause a tracing stop, removing
3380 breakpoints. */
3381 trace_event = handle_tracepoints (event_child);
3382
3383 if (bp_explains_trap)
3384 {
3385 if (debug_threads)
3386 debug_printf ("Hit a gdbserver breakpoint.\n");
3387 }
3388 }
3389 else
3390 {
3391 /* We have some other signal, possibly a step-over dance was in
3392 progress, and it should be cancelled too. */
3393 step_over_finished = finish_step_over (event_child);
3394 }
3395
3396 /* We have all the data we need. Either report the event to GDB, or
3397 resume threads and keep waiting for more. */
3398
3399 /* If we're collecting a fast tracepoint, finish the collection and
3400 move out of the jump pad before delivering a signal. See
3401 linux_stabilize_threads. */
3402
3403 if (WIFSTOPPED (w)
3404 && WSTOPSIG (w) != SIGTRAP
3405 && supports_fast_tracepoints ()
3406 && agent_loaded_p ())
3407 {
3408 if (debug_threads)
3409 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3410 "to defer or adjust it.\n",
3411 WSTOPSIG (w), lwpid_of (current_thread));
3412
3413 /* Allow debugging the jump pad itself. */
3414 if (current_thread->last_resume_kind != resume_step
3415 && maybe_move_out_of_jump_pad (event_child, &w))
3416 {
3417 enqueue_one_deferred_signal (event_child, &w);
3418
3419 if (debug_threads)
3420 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3421 WSTOPSIG (w), lwpid_of (current_thread));
3422
3423 linux_resume_one_lwp (event_child, 0, 0, NULL);
3424
3425 if (debug_threads)
3426 debug_exit ();
3427 return ignore_event (ourstatus);
3428 }
3429 }
3430
3431 if (event_child->collecting_fast_tracepoint
3432 != fast_tpoint_collect_result::not_collecting)
3433 {
3434 if (debug_threads)
3435 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3436 "Check if we're already there.\n",
3437 lwpid_of (current_thread),
3438 (int) event_child->collecting_fast_tracepoint);
3439
3440 trace_event = 1;
3441
3442 event_child->collecting_fast_tracepoint
3443 = linux_fast_tracepoint_collecting (event_child, NULL);
3444
3445 if (event_child->collecting_fast_tracepoint
3446 != fast_tpoint_collect_result::before_insn)
3447 {
3448 /* No longer need this breakpoint. */
3449 if (event_child->exit_jump_pad_bkpt != NULL)
3450 {
3451 if (debug_threads)
3452 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3453 "stopping all threads momentarily.\n");
3454
3455 /* Other running threads could hit this breakpoint.
3456 We don't handle moribund locations like GDB does,
3457 instead we always pause all threads when removing
3458 breakpoints, so that any step-over or
3459 decr_pc_after_break adjustment is always taken
3460 care of while the breakpoint is still
3461 inserted. */
3462 stop_all_lwps (1, event_child);
3463
3464 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3465 event_child->exit_jump_pad_bkpt = NULL;
3466
3467 unstop_all_lwps (1, event_child);
3468
3469 gdb_assert (event_child->suspended >= 0);
3470 }
3471 }
3472
3473 if (event_child->collecting_fast_tracepoint
3474 == fast_tpoint_collect_result::not_collecting)
3475 {
3476 if (debug_threads)
3477 debug_printf ("fast tracepoint finished "
3478 "collecting successfully.\n");
3479
3480 /* We may have a deferred signal to report. */
3481 if (dequeue_one_deferred_signal (event_child, &w))
3482 {
3483 if (debug_threads)
3484 debug_printf ("dequeued one signal.\n");
3485 }
3486 else
3487 {
3488 if (debug_threads)
3489 debug_printf ("no deferred signals.\n");
3490
3491 if (stabilizing_threads)
3492 {
3493 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3494 ourstatus->value.sig = GDB_SIGNAL_0;
3495
3496 if (debug_threads)
3497 {
3498 debug_printf ("linux_wait_1 ret = %s, stopped "
3499 "while stabilizing threads\n",
3500 target_pid_to_str (ptid_of (current_thread)));
3501 debug_exit ();
3502 }
3503
3504 return ptid_of (current_thread);
3505 }
3506 }
3507 }
3508 }
3509
3510 /* Check whether GDB would be interested in this event. */
3511
3512 /* Check if GDB is interested in this syscall. */
3513 if (WIFSTOPPED (w)
3514 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3515 && !gdb_catch_this_syscall_p (event_child))
3516 {
3517 if (debug_threads)
3518 {
3519 debug_printf ("Ignored syscall for LWP %ld.\n",
3520 lwpid_of (current_thread));
3521 }
3522
3523 linux_resume_one_lwp (event_child, event_child->stepping,
3524 0, NULL);
3525
3526 if (debug_threads)
3527 debug_exit ();
3528 return ignore_event (ourstatus);
3529 }
3530
3531 /* If GDB is not interested in this signal, don't stop other
3532 threads, and don't report it to GDB. Just resume the inferior
3533 right away. We do this for threading-related signals as well as
3534 any that GDB specifically requested we ignore. But never ignore
3535 SIGSTOP if we sent it ourselves, and do not ignore signals when
3536 stepping - they may require special handling to skip the signal
3537 handler. Also never ignore signals that could be caused by a
3538 breakpoint. */
3539 if (WIFSTOPPED (w)
3540 && current_thread->last_resume_kind != resume_step
3541 && (
3542#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3543 (current_process ()->priv->thread_db != NULL
3544 && (WSTOPSIG (w) == __SIGRTMIN
3545 || WSTOPSIG (w) == __SIGRTMIN + 1))
3546 ||
3547#endif
3548 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3549 && !(WSTOPSIG (w) == SIGSTOP
3550 && current_thread->last_resume_kind == resume_stop)
3551 && !linux_wstatus_maybe_breakpoint (w))))
3552 {
3553 siginfo_t info, *info_p;
3554
3555 if (debug_threads)
3556 debug_printf ("Ignored signal %d for LWP %ld.\n",
3557 WSTOPSIG (w), lwpid_of (current_thread));
3558
3559 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3560 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3561 info_p = &info;
3562 else
3563 info_p = NULL;
3564
3565 if (step_over_finished)
3566 {
3567 /* We cancelled this thread's step-over above. We still
3568 need to unsuspend all other LWPs, and set them back
3569 running again while the signal handler runs. */
3570 unsuspend_all_lwps (event_child);
3571
3572 /* Enqueue the pending signal info so that proceed_all_lwps
3573 doesn't lose it. */
3574 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3575
3576 proceed_all_lwps ();
3577 }
3578 else
3579 {
3580 linux_resume_one_lwp (event_child, event_child->stepping,
3581 WSTOPSIG (w), info_p);
3582 }
3583
3584 if (debug_threads)
3585 debug_exit ();
3586
3587 return ignore_event (ourstatus);
3588 }
3589
3590 /* Note that all addresses are always "out of the step range" when
3591 there's no range to begin with. */
3592 in_step_range = lwp_in_step_range (event_child);
3593
3594 /* If GDB wanted this thread to single step, and the thread is out
3595 of the step range, we always want to report the SIGTRAP, and let
3596 GDB handle it. Watchpoints should always be reported. So should
3597 signals we can't explain. A SIGTRAP we can't explain could be a
3598 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3599 do, we're be able to handle GDB breakpoints on top of internal
3600 breakpoints, by handling the internal breakpoint and still
3601 reporting the event to GDB. If we don't, we're out of luck, GDB
3602 won't see the breakpoint hit. If we see a single-step event but
3603 the thread should be continuing, don't pass the trap to gdb.
3604 That indicates that we had previously finished a single-step but
3605 left the single-step pending -- see
3606 complete_ongoing_step_over. */
3607 report_to_gdb = (!maybe_internal_trap
3608 || (current_thread->last_resume_kind == resume_step
3609 && !in_step_range)
3610 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3611 || (!in_step_range
3612 && !bp_explains_trap
3613 && !trace_event
3614 && !step_over_finished
3615 && !(current_thread->last_resume_kind == resume_continue
3616 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3617 || (gdb_breakpoint_here (event_child->stop_pc)
3618 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3619 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3620 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3621
3622 run_breakpoint_commands (event_child->stop_pc);
3623
3624 /* We found no reason GDB would want us to stop. We either hit one
3625 of our own breakpoints, or finished an internal step GDB
3626 shouldn't know about. */
3627 if (!report_to_gdb)
3628 {
3629 if (debug_threads)
3630 {
3631 if (bp_explains_trap)
3632 debug_printf ("Hit a gdbserver breakpoint.\n");
3633 if (step_over_finished)
3634 debug_printf ("Step-over finished.\n");
3635 if (trace_event)
3636 debug_printf ("Tracepoint event.\n");
3637 if (lwp_in_step_range (event_child))
3638 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3639 paddress (event_child->stop_pc),
3640 paddress (event_child->step_range_start),
3641 paddress (event_child->step_range_end));
3642 }
3643
3644 /* We're not reporting this breakpoint to GDB, so apply the
3645 decr_pc_after_break adjustment to the inferior's regcache
3646 ourselves. */
3647
3648 if (the_low_target.set_pc != NULL)
3649 {
3650 struct regcache *regcache
3651 = get_thread_regcache (current_thread, 1);
3652 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3653 }
3654
3655 if (step_over_finished)
3656 {
3657 /* If we have finished stepping over a breakpoint, we've
3658 stopped and suspended all LWPs momentarily except the
3659 stepping one. This is where we resume them all again.
3660 We're going to keep waiting, so use proceed, which
3661 handles stepping over the next breakpoint. */
3662 unsuspend_all_lwps (event_child);
3663 }
3664 else
3665 {
3666 /* Remove the single-step breakpoints if any. Note that
3667 there isn't single-step breakpoint if we finished stepping
3668 over. */
3669 if (can_software_single_step ()
3670 && has_single_step_breakpoints (current_thread))
3671 {
3672 stop_all_lwps (0, event_child);
3673 delete_single_step_breakpoints (current_thread);
3674 unstop_all_lwps (0, event_child);
3675 }
3676 }
3677
3678 if (debug_threads)
3679 debug_printf ("proceeding all threads.\n");
3680 proceed_all_lwps ();
3681
3682 if (debug_threads)
3683 debug_exit ();
3684
3685 return ignore_event (ourstatus);
3686 }
3687
3688 if (debug_threads)
3689 {
3690 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3691 {
3692 std::string str
3693 = target_waitstatus_to_string (&event_child->waitstatus);
3694
3695 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3696 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3697 }
3698 if (current_thread->last_resume_kind == resume_step)
3699 {
3700 if (event_child->step_range_start == event_child->step_range_end)
3701 debug_printf ("GDB wanted to single-step, reporting event.\n");
3702 else if (!lwp_in_step_range (event_child))
3703 debug_printf ("Out of step range, reporting event.\n");
3704 }
3705 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3706 debug_printf ("Stopped by watchpoint.\n");
3707 else if (gdb_breakpoint_here (event_child->stop_pc))
3708 debug_printf ("Stopped by GDB breakpoint.\n");
3709 if (debug_threads)
3710 debug_printf ("Hit a non-gdbserver trap event.\n");
3711 }
3712
3713 /* Alright, we're going to report a stop. */
3714
3715 /* Remove single-step breakpoints. */
3716 if (can_software_single_step ())
3717 {
3718 /* Remove single-step breakpoints or not. It it is true, stop all
3719 lwps, so that other threads won't hit the breakpoint in the
3720 staled memory. */
3721 int remove_single_step_breakpoints_p = 0;
3722
3723 if (non_stop)
3724 {
3725 remove_single_step_breakpoints_p
3726 = has_single_step_breakpoints (current_thread);
3727 }
3728 else
3729 {
3730 /* In all-stop, a stop reply cancels all previous resume
3731 requests. Delete all single-step breakpoints. */
3732
3733 find_thread ([&] (thread_info *thread) {
3734 if (has_single_step_breakpoints (thread))
3735 {
3736 remove_single_step_breakpoints_p = 1;
3737 return true;
3738 }
3739
3740 return false;
3741 });
3742 }
3743
3744 if (remove_single_step_breakpoints_p)
3745 {
3746 /* If we remove single-step breakpoints from memory, stop all lwps,
3747 so that other threads won't hit the breakpoint in the staled
3748 memory. */
3749 stop_all_lwps (0, event_child);
3750
3751 if (non_stop)
3752 {
3753 gdb_assert (has_single_step_breakpoints (current_thread));
3754 delete_single_step_breakpoints (current_thread);
3755 }
3756 else
3757 {
3758 for_each_thread ([] (thread_info *thread){
3759 if (has_single_step_breakpoints (thread))
3760 delete_single_step_breakpoints (thread);
3761 });
3762 }
3763
3764 unstop_all_lwps (0, event_child);
3765 }
3766 }
3767
3768 if (!stabilizing_threads)
3769 {
3770 /* In all-stop, stop all threads. */
3771 if (!non_stop)
3772 stop_all_lwps (0, NULL);
3773
3774 if (step_over_finished)
3775 {
3776 if (!non_stop)
3777 {
3778 /* If we were doing a step-over, all other threads but
3779 the stepping one had been paused in start_step_over,
3780 with their suspend counts incremented. We don't want
3781 to do a full unstop/unpause, because we're in
3782 all-stop mode (so we want threads stopped), but we
3783 still need to unsuspend the other threads, to
3784 decrement their `suspended' count back. */
3785 unsuspend_all_lwps (event_child);
3786 }
3787 else
3788 {
3789 /* If we just finished a step-over, then all threads had
3790 been momentarily paused. In all-stop, that's fine,
3791 we want threads stopped by now anyway. In non-stop,
3792 we need to re-resume threads that GDB wanted to be
3793 running. */
3794 unstop_all_lwps (1, event_child);
3795 }
3796 }
3797
3798 /* If we're not waiting for a specific LWP, choose an event LWP
3799 from among those that have had events. Giving equal priority
3800 to all LWPs that have had events helps prevent
3801 starvation. */
3802 if (ptid_equal (ptid, minus_one_ptid))
3803 {
3804 event_child->status_pending_p = 1;
3805 event_child->status_pending = w;
3806
3807 select_event_lwp (&event_child);
3808
3809 /* current_thread and event_child must stay in sync. */
3810 current_thread = get_lwp_thread (event_child);
3811
3812 event_child->status_pending_p = 0;
3813 w = event_child->status_pending;
3814 }
3815
3816
3817 /* Stabilize threads (move out of jump pads). */
3818 if (!non_stop)
3819 stabilize_threads ();
3820 }
3821 else
3822 {
3823 /* If we just finished a step-over, then all threads had been
3824 momentarily paused. In all-stop, that's fine, we want
3825 threads stopped by now anyway. In non-stop, we need to
3826 re-resume threads that GDB wanted to be running. */
3827 if (step_over_finished)
3828 unstop_all_lwps (1, event_child);
3829 }
3830
3831 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3832 {
3833 /* If the reported event is an exit, fork, vfork or exec, let
3834 GDB know. */
3835
3836 /* Break the unreported fork relationship chain. */
3837 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3838 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3839 {
3840 event_child->fork_relative->fork_relative = NULL;
3841 event_child->fork_relative = NULL;
3842 }
3843
3844 *ourstatus = event_child->waitstatus;
3845 /* Clear the event lwp's waitstatus since we handled it already. */
3846 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3847 }
3848 else
3849 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3850
3851 /* Now that we've selected our final event LWP, un-adjust its PC if
3852 it was a software breakpoint, and the client doesn't know we can
3853 adjust the breakpoint ourselves. */
3854 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3855 && !swbreak_feature)
3856 {
3857 int decr_pc = the_low_target.decr_pc_after_break;
3858
3859 if (decr_pc != 0)
3860 {
3861 struct regcache *regcache
3862 = get_thread_regcache (current_thread, 1);
3863 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3864 }
3865 }
3866
3867 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3868 {
3869 get_syscall_trapinfo (event_child,
3870 &ourstatus->value.syscall_number);
3871 ourstatus->kind = event_child->syscall_state;
3872 }
3873 else if (current_thread->last_resume_kind == resume_stop
3874 && WSTOPSIG (w) == SIGSTOP)
3875 {
3876 /* A thread that has been requested to stop by GDB with vCont;t,
3877 and it stopped cleanly, so report as SIG0. The use of
3878 SIGSTOP is an implementation detail. */
3879 ourstatus->value.sig = GDB_SIGNAL_0;
3880 }
3881 else if (current_thread->last_resume_kind == resume_stop
3882 && WSTOPSIG (w) != SIGSTOP)
3883 {
3884 /* A thread that has been requested to stop by GDB with vCont;t,
3885 but, it stopped for other reasons. */
3886 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3887 }
3888 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3889 {
3890 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3891 }
3892
3893 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3894
3895 if (debug_threads)
3896 {
3897 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3898 target_pid_to_str (ptid_of (current_thread)),
3899 ourstatus->kind, ourstatus->value.sig);
3900 debug_exit ();
3901 }
3902
3903 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3904 return filter_exit_event (event_child, ourstatus);
3905
3906 return ptid_of (current_thread);
3907}
3908
3909/* Get rid of any pending event in the pipe. */
3910static void
3911async_file_flush (void)
3912{
3913 int ret;
3914 char buf;
3915
3916 do
3917 ret = read (linux_event_pipe[0], &buf, 1);
3918 while (ret >= 0 || (ret == -1 && errno == EINTR));
3919}
3920
3921/* Put something in the pipe, so the event loop wakes up. */
3922static void
3923async_file_mark (void)
3924{
3925 int ret;
3926
3927 async_file_flush ();
3928
3929 do
3930 ret = write (linux_event_pipe[1], "+", 1);
3931 while (ret == 0 || (ret == -1 && errno == EINTR));
3932
3933 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3934 be awakened anyway. */
3935}
3936
3937static ptid_t
3938linux_wait (ptid_t ptid,
3939 struct target_waitstatus *ourstatus, int target_options)
3940{
3941 ptid_t event_ptid;
3942
3943 /* Flush the async file first. */
3944 if (target_is_async_p ())
3945 async_file_flush ();
3946
3947 do
3948 {
3949 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3950 }
3951 while ((target_options & TARGET_WNOHANG) == 0
3952 && ptid_equal (event_ptid, null_ptid)
3953 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3954
3955 /* If at least one stop was reported, there may be more. A single
3956 SIGCHLD can signal more than one child stop. */
3957 if (target_is_async_p ()
3958 && (target_options & TARGET_WNOHANG) != 0
3959 && !ptid_equal (event_ptid, null_ptid))
3960 async_file_mark ();
3961
3962 return event_ptid;
3963}
3964
3965/* Send a signal to an LWP. */
3966
3967static int
3968kill_lwp (unsigned long lwpid, int signo)
3969{
3970 int ret;
3971
3972 errno = 0;
3973 ret = syscall (__NR_tkill, lwpid, signo);
3974 if (errno == ENOSYS)
3975 {
3976 /* If tkill fails, then we are not using nptl threads, a
3977 configuration we no longer support. */
3978 perror_with_name (("tkill"));
3979 }
3980 return ret;
3981}
3982
3983void
3984linux_stop_lwp (struct lwp_info *lwp)
3985{
3986 send_sigstop (lwp);
3987}
3988
3989static void
3990send_sigstop (struct lwp_info *lwp)
3991{
3992 int pid;
3993
3994 pid = lwpid_of (get_lwp_thread (lwp));
3995
3996 /* If we already have a pending stop signal for this process, don't
3997 send another. */
3998 if (lwp->stop_expected)
3999 {
4000 if (debug_threads)
4001 debug_printf ("Have pending sigstop for lwp %d\n", pid);
4002
4003 return;
4004 }
4005
4006 if (debug_threads)
4007 debug_printf ("Sending sigstop to lwp %d\n", pid);
4008
4009 lwp->stop_expected = 1;
4010 kill_lwp (pid, SIGSTOP);
4011}
4012
4013static int
4014send_sigstop_callback (thread_info *thread, void *except)
4015{
4016 struct lwp_info *lwp = get_thread_lwp (thread);
4017
4018 /* Ignore EXCEPT. */
4019 if (lwp == except)
4020 return 0;
4021
4022 if (lwp->stopped)
4023 return 0;
4024
4025 send_sigstop (lwp);
4026 return 0;
4027}
4028
4029/* Increment the suspend count of an LWP, and stop it, if not stopped
4030 yet. */
4031static int
4032suspend_and_send_sigstop_callback (thread_info *thread, void *except)
4033{
4034 struct lwp_info *lwp = get_thread_lwp (thread);
4035
4036 /* Ignore EXCEPT. */
4037 if (lwp == except)
4038 return 0;
4039
4040 lwp_suspended_inc (lwp);
4041
4042 return send_sigstop_callback (thread, except);
4043}
4044
4045static void
4046mark_lwp_dead (struct lwp_info *lwp, int wstat)
4047{
4048 /* Store the exit status for later. */
4049 lwp->status_pending_p = 1;
4050 lwp->status_pending = wstat;
4051
4052 /* Store in waitstatus as well, as there's nothing else to process
4053 for this event. */
4054 if (WIFEXITED (wstat))
4055 {
4056 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4057 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4058 }
4059 else if (WIFSIGNALED (wstat))
4060 {
4061 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4062 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4063 }
4064
4065 /* Prevent trying to stop it. */
4066 lwp->stopped = 1;
4067
4068 /* No further stops are expected from a dead lwp. */
4069 lwp->stop_expected = 0;
4070}
4071
4072/* Return true if LWP has exited already, and has a pending exit event
4073 to report to GDB. */
4074
4075static int
4076lwp_is_marked_dead (struct lwp_info *lwp)
4077{
4078 return (lwp->status_pending_p
4079 && (WIFEXITED (lwp->status_pending)
4080 || WIFSIGNALED (lwp->status_pending)));
4081}
4082
4083/* Wait for all children to stop for the SIGSTOPs we just queued. */
4084
4085static void
4086wait_for_sigstop (void)
4087{
4088 struct thread_info *saved_thread;
4089 ptid_t saved_tid;
4090 int wstat;
4091 int ret;
4092
4093 saved_thread = current_thread;
4094 if (saved_thread != NULL)
4095 saved_tid = saved_thread->id;
4096 else
4097 saved_tid = null_ptid; /* avoid bogus unused warning */
4098
4099 if (debug_threads)
4100 debug_printf ("wait_for_sigstop: pulling events\n");
4101
4102 /* Passing NULL_PTID as filter indicates we want all events to be
4103 left pending. Eventually this returns when there are no
4104 unwaited-for children left. */
4105 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4106 &wstat, __WALL);
4107 gdb_assert (ret == -1);
4108
4109 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4110 current_thread = saved_thread;
4111 else
4112 {
4113 if (debug_threads)
4114 debug_printf ("Previously current thread died.\n");
4115
4116 /* We can't change the current inferior behind GDB's back,
4117 otherwise, a subsequent command may apply to the wrong
4118 process. */
4119 current_thread = NULL;
4120 }
4121}
4122
4123/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4124 move it out, because we need to report the stop event to GDB. For
4125 example, if the user puts a breakpoint in the jump pad, it's
4126 because she wants to debug it. */
4127
4128static int
4129stuck_in_jump_pad_callback (thread_info *thread, void *data)
4130{
4131 struct lwp_info *lwp = get_thread_lwp (thread);
4132
4133 if (lwp->suspended != 0)
4134 {
4135 internal_error (__FILE__, __LINE__,
4136 "LWP %ld is suspended, suspended=%d\n",
4137 lwpid_of (thread), lwp->suspended);
4138 }
4139 gdb_assert (lwp->stopped);
4140
4141 /* Allow debugging the jump pad, gdb_collect, etc.. */
4142 return (supports_fast_tracepoints ()
4143 && agent_loaded_p ()
4144 && (gdb_breakpoint_here (lwp->stop_pc)
4145 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4146 || thread->last_resume_kind == resume_step)
4147 && (linux_fast_tracepoint_collecting (lwp, NULL)
4148 != fast_tpoint_collect_result::not_collecting));
4149}
4150
4151static void
4152move_out_of_jump_pad_callback (thread_info *thread)
4153{
4154 struct thread_info *saved_thread;
4155 struct lwp_info *lwp = get_thread_lwp (thread);
4156 int *wstat;
4157
4158 if (lwp->suspended != 0)
4159 {
4160 internal_error (__FILE__, __LINE__,
4161 "LWP %ld is suspended, suspended=%d\n",
4162 lwpid_of (thread), lwp->suspended);
4163 }
4164 gdb_assert (lwp->stopped);
4165
4166 /* For gdb_breakpoint_here. */
4167 saved_thread = current_thread;
4168 current_thread = thread;
4169
4170 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4171
4172 /* Allow debugging the jump pad, gdb_collect, etc. */
4173 if (!gdb_breakpoint_here (lwp->stop_pc)
4174 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4175 && thread->last_resume_kind != resume_step
4176 && maybe_move_out_of_jump_pad (lwp, wstat))
4177 {
4178 if (debug_threads)
4179 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4180 lwpid_of (thread));
4181
4182 if (wstat)
4183 {
4184 lwp->status_pending_p = 0;
4185 enqueue_one_deferred_signal (lwp, wstat);
4186
4187 if (debug_threads)
4188 debug_printf ("Signal %d for LWP %ld deferred "
4189 "(in jump pad)\n",
4190 WSTOPSIG (*wstat), lwpid_of (thread));
4191 }
4192
4193 linux_resume_one_lwp (lwp, 0, 0, NULL);
4194 }
4195 else
4196 lwp_suspended_inc (lwp);
4197
4198 current_thread = saved_thread;
4199}
4200
4201static int
4202lwp_running (thread_info *thread, void *data)
4203{
4204 struct lwp_info *lwp = get_thread_lwp (thread);
4205
4206 if (lwp_is_marked_dead (lwp))
4207 return 0;
4208 if (lwp->stopped)
4209 return 0;
4210 return 1;
4211}
4212
4213/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4214 If SUSPEND, then also increase the suspend count of every LWP,
4215 except EXCEPT. */
4216
4217static void
4218stop_all_lwps (int suspend, struct lwp_info *except)
4219{
4220 /* Should not be called recursively. */
4221 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4222
4223 if (debug_threads)
4224 {
4225 debug_enter ();
4226 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4227 suspend ? "stop-and-suspend" : "stop",
4228 except != NULL
4229 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4230 : "none");
4231 }
4232
4233 stopping_threads = (suspend
4234 ? STOPPING_AND_SUSPENDING_THREADS
4235 : STOPPING_THREADS);
4236
4237 if (suspend)
4238 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4239 else
4240 find_inferior (&all_threads, send_sigstop_callback, except);
4241 wait_for_sigstop ();
4242 stopping_threads = NOT_STOPPING_THREADS;
4243
4244 if (debug_threads)
4245 {
4246 debug_printf ("stop_all_lwps done, setting stopping_threads "
4247 "back to !stopping\n");
4248 debug_exit ();
4249 }
4250}
4251
4252/* Enqueue one signal in the chain of signals which need to be
4253 delivered to this process on next resume. */
4254
4255static void
4256enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4257{
4258 struct pending_signals *p_sig = XNEW (struct pending_signals);
4259
4260 p_sig->prev = lwp->pending_signals;
4261 p_sig->signal = signal;
4262 if (info == NULL)
4263 memset (&p_sig->info, 0, sizeof (siginfo_t));
4264 else
4265 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4266 lwp->pending_signals = p_sig;
4267}
4268
4269/* Install breakpoints for software single stepping. */
4270
4271static void
4272install_software_single_step_breakpoints (struct lwp_info *lwp)
4273{
4274 struct thread_info *thread = get_lwp_thread (lwp);
4275 struct regcache *regcache = get_thread_regcache (thread, 1);
4276 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4277
4278 current_thread = thread;
4279 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4280
4281 for (CORE_ADDR pc : next_pcs)
4282 set_single_step_breakpoint (pc, current_ptid);
4283
4284 do_cleanups (old_chain);
4285}
4286
4287/* Single step via hardware or software single step.
4288 Return 1 if hardware single stepping, 0 if software single stepping
4289 or can't single step. */
4290
4291static int
4292single_step (struct lwp_info* lwp)
4293{
4294 int step = 0;
4295
4296 if (can_hardware_single_step ())
4297 {
4298 step = 1;
4299 }
4300 else if (can_software_single_step ())
4301 {
4302 install_software_single_step_breakpoints (lwp);
4303 step = 0;
4304 }
4305 else
4306 {
4307 if (debug_threads)
4308 debug_printf ("stepping is not implemented on this target");
4309 }
4310
4311 return step;
4312}
4313
4314/* The signal can be delivered to the inferior if we are not trying to
4315 finish a fast tracepoint collect. Since signal can be delivered in
4316 the step-over, the program may go to signal handler and trap again
4317 after return from the signal handler. We can live with the spurious
4318 double traps. */
4319
4320static int
4321lwp_signal_can_be_delivered (struct lwp_info *lwp)
4322{
4323 return (lwp->collecting_fast_tracepoint
4324 == fast_tpoint_collect_result::not_collecting);
4325}
4326
4327/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4328 SIGNAL is nonzero, give it that signal. */
4329
4330static void
4331linux_resume_one_lwp_throw (struct lwp_info *lwp,
4332 int step, int signal, siginfo_t *info)
4333{
4334 struct thread_info *thread = get_lwp_thread (lwp);
4335 struct thread_info *saved_thread;
4336 int ptrace_request;
4337 struct process_info *proc = get_thread_process (thread);
4338
4339 /* Note that target description may not be initialised
4340 (proc->tdesc == NULL) at this point because the program hasn't
4341 stopped at the first instruction yet. It means GDBserver skips
4342 the extra traps from the wrapper program (see option --wrapper).
4343 Code in this function that requires register access should be
4344 guarded by proc->tdesc == NULL or something else. */
4345
4346 if (lwp->stopped == 0)
4347 return;
4348
4349 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4350
4351 fast_tpoint_collect_result fast_tp_collecting
4352 = lwp->collecting_fast_tracepoint;
4353
4354 gdb_assert (!stabilizing_threads
4355 || (fast_tp_collecting
4356 != fast_tpoint_collect_result::not_collecting));
4357
4358 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4359 user used the "jump" command, or "set $pc = foo"). */
4360 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4361 {
4362 /* Collecting 'while-stepping' actions doesn't make sense
4363 anymore. */
4364 release_while_stepping_state_list (thread);
4365 }
4366
4367 /* If we have pending signals or status, and a new signal, enqueue the
4368 signal. Also enqueue the signal if it can't be delivered to the
4369 inferior right now. */
4370 if (signal != 0
4371 && (lwp->status_pending_p
4372 || lwp->pending_signals != NULL
4373 || !lwp_signal_can_be_delivered (lwp)))
4374 {
4375 enqueue_pending_signal (lwp, signal, info);
4376
4377 /* Postpone any pending signal. It was enqueued above. */
4378 signal = 0;
4379 }
4380
4381 if (lwp->status_pending_p)
4382 {
4383 if (debug_threads)
4384 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4385 " has pending status\n",
4386 lwpid_of (thread), step ? "step" : "continue",
4387 lwp->stop_expected ? "expected" : "not expected");
4388 return;
4389 }
4390
4391 saved_thread = current_thread;
4392 current_thread = thread;
4393
4394 /* This bit needs some thinking about. If we get a signal that
4395 we must report while a single-step reinsert is still pending,
4396 we often end up resuming the thread. It might be better to
4397 (ew) allow a stack of pending events; then we could be sure that
4398 the reinsert happened right away and not lose any signals.
4399
4400 Making this stack would also shrink the window in which breakpoints are
4401 uninserted (see comment in linux_wait_for_lwp) but not enough for
4402 complete correctness, so it won't solve that problem. It may be
4403 worthwhile just to solve this one, however. */
4404 if (lwp->bp_reinsert != 0)
4405 {
4406 if (debug_threads)
4407 debug_printf (" pending reinsert at 0x%s\n",
4408 paddress (lwp->bp_reinsert));
4409
4410 if (can_hardware_single_step ())
4411 {
4412 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4413 {
4414 if (step == 0)
4415 warning ("BAD - reinserting but not stepping.");
4416 if (lwp->suspended)
4417 warning ("BAD - reinserting and suspended(%d).",
4418 lwp->suspended);
4419 }
4420 }
4421
4422 step = maybe_hw_step (thread);
4423 }
4424
4425 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4426 {
4427 if (debug_threads)
4428 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4429 " (exit-jump-pad-bkpt)\n",
4430 lwpid_of (thread));
4431 }
4432 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4433 {
4434 if (debug_threads)
4435 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4436 " single-stepping\n",
4437 lwpid_of (thread));
4438
4439 if (can_hardware_single_step ())
4440 step = 1;
4441 else
4442 {
4443 internal_error (__FILE__, __LINE__,
4444 "moving out of jump pad single-stepping"
4445 " not implemented on this target");
4446 }
4447 }
4448
4449 /* If we have while-stepping actions in this thread set it stepping.
4450 If we have a signal to deliver, it may or may not be set to
4451 SIG_IGN, we don't know. Assume so, and allow collecting
4452 while-stepping into a signal handler. A possible smart thing to
4453 do would be to set an internal breakpoint at the signal return
4454 address, continue, and carry on catching this while-stepping
4455 action only when that breakpoint is hit. A future
4456 enhancement. */
4457 if (thread->while_stepping != NULL)
4458 {
4459 if (debug_threads)
4460 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4461 lwpid_of (thread));
4462
4463 step = single_step (lwp);
4464 }
4465
4466 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4467 {
4468 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4469
4470 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4471
4472 if (debug_threads)
4473 {
4474 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4475 (long) lwp->stop_pc);
4476 }
4477 }
4478
4479 /* If we have pending signals, consume one if it can be delivered to
4480 the inferior. */
4481 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4482 {
4483 struct pending_signals **p_sig;
4484
4485 p_sig = &lwp->pending_signals;
4486 while ((*p_sig)->prev != NULL)
4487 p_sig = &(*p_sig)->prev;
4488
4489 signal = (*p_sig)->signal;
4490 if ((*p_sig)->info.si_signo != 0)
4491 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4492 &(*p_sig)->info);
4493
4494 free (*p_sig);
4495 *p_sig = NULL;
4496 }
4497
4498 if (debug_threads)
4499 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4500 lwpid_of (thread), step ? "step" : "continue", signal,
4501 lwp->stop_expected ? "expected" : "not expected");
4502
4503 if (the_low_target.prepare_to_resume != NULL)
4504 the_low_target.prepare_to_resume (lwp);
4505
4506 regcache_invalidate_thread (thread);
4507 errno = 0;
4508 lwp->stepping = step;
4509 if (step)
4510 ptrace_request = PTRACE_SINGLESTEP;
4511 else if (gdb_catching_syscalls_p (lwp))
4512 ptrace_request = PTRACE_SYSCALL;
4513 else
4514 ptrace_request = PTRACE_CONT;
4515 ptrace (ptrace_request,
4516 lwpid_of (thread),
4517 (PTRACE_TYPE_ARG3) 0,
4518 /* Coerce to a uintptr_t first to avoid potential gcc warning
4519 of coercing an 8 byte integer to a 4 byte pointer. */
4520 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4521
4522 current_thread = saved_thread;
4523 if (errno)
4524 perror_with_name ("resuming thread");
4525
4526 /* Successfully resumed. Clear state that no longer makes sense,
4527 and mark the LWP as running. Must not do this before resuming
4528 otherwise if that fails other code will be confused. E.g., we'd
4529 later try to stop the LWP and hang forever waiting for a stop
4530 status. Note that we must not throw after this is cleared,
4531 otherwise handle_zombie_lwp_error would get confused. */
4532 lwp->stopped = 0;
4533 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4534}
4535
4536/* Called when we try to resume a stopped LWP and that errors out. If
4537 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4538 or about to become), discard the error, clear any pending status
4539 the LWP may have, and return true (we'll collect the exit status
4540 soon enough). Otherwise, return false. */
4541
4542static int
4543check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4544{
4545 struct thread_info *thread = get_lwp_thread (lp);
4546
4547 /* If we get an error after resuming the LWP successfully, we'd
4548 confuse !T state for the LWP being gone. */
4549 gdb_assert (lp->stopped);
4550
4551 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4552 because even if ptrace failed with ESRCH, the tracee may be "not
4553 yet fully dead", but already refusing ptrace requests. In that
4554 case the tracee has 'R (Running)' state for a little bit
4555 (observed in Linux 3.18). See also the note on ESRCH in the
4556 ptrace(2) man page. Instead, check whether the LWP has any state
4557 other than ptrace-stopped. */
4558
4559 /* Don't assume anything if /proc/PID/status can't be read. */
4560 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4561 {
4562 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4563 lp->status_pending_p = 0;
4564 return 1;
4565 }
4566 return 0;
4567}
4568
4569/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4570 disappears while we try to resume it. */
4571
4572static void
4573linux_resume_one_lwp (struct lwp_info *lwp,
4574 int step, int signal, siginfo_t *info)
4575{
4576 TRY
4577 {
4578 linux_resume_one_lwp_throw (lwp, step, signal, info);
4579 }
4580 CATCH (ex, RETURN_MASK_ERROR)
4581 {
4582 if (!check_ptrace_stopped_lwp_gone (lwp))
4583 throw_exception (ex);
4584 }
4585 END_CATCH
4586}
4587
4588struct thread_resume_array
4589{
4590 struct thread_resume *resume;
4591 size_t n;
4592};
4593
4594/* This function is called once per thread via find_inferior.
4595 ARG is a pointer to a thread_resume_array struct.
4596 We look up the thread specified by ENTRY in ARG, and mark the thread
4597 with a pointer to the appropriate resume request.
4598
4599 This algorithm is O(threads * resume elements), but resume elements
4600 is small (and will remain small at least until GDB supports thread
4601 suspension). */
4602
4603static int
4604linux_set_resume_request (thread_info *thread, void *arg)
4605{
4606 struct lwp_info *lwp = get_thread_lwp (thread);
4607 int ndx;
4608 struct thread_resume_array *r;
4609
4610 r = (struct thread_resume_array *) arg;
4611
4612 for (ndx = 0; ndx < r->n; ndx++)
4613 {
4614 ptid_t ptid = r->resume[ndx].thread;
4615 if (ptid_equal (ptid, minus_one_ptid)
4616 || ptid == thread->id
4617 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4618 of PID'. */
4619 || (ptid_get_pid (ptid) == pid_of (thread)
4620 && (ptid_is_pid (ptid)
4621 || ptid_get_lwp (ptid) == -1)))
4622 {
4623 if (r->resume[ndx].kind == resume_stop
4624 && thread->last_resume_kind == resume_stop)
4625 {
4626 if (debug_threads)
4627 debug_printf ("already %s LWP %ld at GDB's request\n",
4628 (thread->last_status.kind
4629 == TARGET_WAITKIND_STOPPED)
4630 ? "stopped"
4631 : "stopping",
4632 lwpid_of (thread));
4633
4634 continue;
4635 }
4636
4637 /* Ignore (wildcard) resume requests for already-resumed
4638 threads. */
4639 if (r->resume[ndx].kind != resume_stop
4640 && thread->last_resume_kind != resume_stop)
4641 {
4642 if (debug_threads)
4643 debug_printf ("already %s LWP %ld at GDB's request\n",
4644 (thread->last_resume_kind
4645 == resume_step)
4646 ? "stepping"
4647 : "continuing",
4648 lwpid_of (thread));
4649 continue;
4650 }
4651
4652 /* Don't let wildcard resumes resume fork children that GDB
4653 does not yet know are new fork children. */
4654 if (lwp->fork_relative != NULL)
4655 {
4656 struct lwp_info *rel = lwp->fork_relative;
4657
4658 if (rel->status_pending_p
4659 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4660 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4661 {
4662 if (debug_threads)
4663 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4664 lwpid_of (thread));
4665 continue;
4666 }
4667 }
4668
4669 /* If the thread has a pending event that has already been
4670 reported to GDBserver core, but GDB has not pulled the
4671 event out of the vStopped queue yet, likewise, ignore the
4672 (wildcard) resume request. */
4673 if (in_queued_stop_replies (thread->id))
4674 {
4675 if (debug_threads)
4676 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4677 lwpid_of (thread));
4678 continue;
4679 }
4680
4681 lwp->resume = &r->resume[ndx];
4682 thread->last_resume_kind = lwp->resume->kind;
4683
4684 lwp->step_range_start = lwp->resume->step_range_start;
4685 lwp->step_range_end = lwp->resume->step_range_end;
4686
4687 /* If we had a deferred signal to report, dequeue one now.
4688 This can happen if LWP gets more than one signal while
4689 trying to get out of a jump pad. */
4690 if (lwp->stopped
4691 && !lwp->status_pending_p
4692 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4693 {
4694 lwp->status_pending_p = 1;
4695
4696 if (debug_threads)
4697 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4698 "leaving status pending.\n",
4699 WSTOPSIG (lwp->status_pending),
4700 lwpid_of (thread));
4701 }
4702
4703 return 0;
4704 }
4705 }
4706
4707 /* No resume action for this thread. */
4708 lwp->resume = NULL;
4709
4710 return 0;
4711}
4712
4713/* find_inferior callback for linux_resume.
4714 Set *FLAG_P if this lwp has an interesting status pending. */
4715
4716static int
4717resume_status_pending_p (thread_info *thread, void *flag_p)
4718{
4719 struct lwp_info *lwp = get_thread_lwp (thread);
4720
4721 /* LWPs which will not be resumed are not interesting, because
4722 we might not wait for them next time through linux_wait. */
4723 if (lwp->resume == NULL)
4724 return 0;
4725
4726 if (thread_still_has_status_pending_p (thread))
4727 * (int *) flag_p = 1;
4728
4729 return 0;
4730}
4731
4732/* Return 1 if this lwp that GDB wants running is stopped at an
4733 internal breakpoint that we need to step over. It assumes that any
4734 required STOP_PC adjustment has already been propagated to the
4735 inferior's regcache. */
4736
4737static int
4738need_step_over_p (thread_info *thread, void *dummy)
4739{
4740 struct lwp_info *lwp = get_thread_lwp (thread);
4741 struct thread_info *saved_thread;
4742 CORE_ADDR pc;
4743 struct process_info *proc = get_thread_process (thread);
4744
4745 /* GDBserver is skipping the extra traps from the wrapper program,
4746 don't have to do step over. */
4747 if (proc->tdesc == NULL)
4748 return 0;
4749
4750 /* LWPs which will not be resumed are not interesting, because we
4751 might not wait for them next time through linux_wait. */
4752
4753 if (!lwp->stopped)
4754 {
4755 if (debug_threads)
4756 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4757 lwpid_of (thread));
4758 return 0;
4759 }
4760
4761 if (thread->last_resume_kind == resume_stop)
4762 {
4763 if (debug_threads)
4764 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4765 " stopped\n",
4766 lwpid_of (thread));
4767 return 0;
4768 }
4769
4770 gdb_assert (lwp->suspended >= 0);
4771
4772 if (lwp->suspended)
4773 {
4774 if (debug_threads)
4775 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4776 lwpid_of (thread));
4777 return 0;
4778 }
4779
4780 if (lwp->status_pending_p)
4781 {
4782 if (debug_threads)
4783 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4784 " status.\n",
4785 lwpid_of (thread));
4786 return 0;
4787 }
4788
4789 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4790 or we have. */
4791 pc = get_pc (lwp);
4792
4793 /* If the PC has changed since we stopped, then don't do anything,
4794 and let the breakpoint/tracepoint be hit. This happens if, for
4795 instance, GDB handled the decr_pc_after_break subtraction itself,
4796 GDB is OOL stepping this thread, or the user has issued a "jump"
4797 command, or poked thread's registers herself. */
4798 if (pc != lwp->stop_pc)
4799 {
4800 if (debug_threads)
4801 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4802 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4803 lwpid_of (thread),
4804 paddress (lwp->stop_pc), paddress (pc));
4805 return 0;
4806 }
4807
4808 /* On software single step target, resume the inferior with signal
4809 rather than stepping over. */
4810 if (can_software_single_step ()
4811 && lwp->pending_signals != NULL
4812 && lwp_signal_can_be_delivered (lwp))
4813 {
4814 if (debug_threads)
4815 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4816 " signals.\n",
4817 lwpid_of (thread));
4818
4819 return 0;
4820 }
4821
4822 saved_thread = current_thread;
4823 current_thread = thread;
4824
4825 /* We can only step over breakpoints we know about. */
4826 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4827 {
4828 /* Don't step over a breakpoint that GDB expects to hit
4829 though. If the condition is being evaluated on the target's side
4830 and it evaluate to false, step over this breakpoint as well. */
4831 if (gdb_breakpoint_here (pc)
4832 && gdb_condition_true_at_breakpoint (pc)
4833 && gdb_no_commands_at_breakpoint (pc))
4834 {
4835 if (debug_threads)
4836 debug_printf ("Need step over [LWP %ld]? yes, but found"
4837 " GDB breakpoint at 0x%s; skipping step over\n",
4838 lwpid_of (thread), paddress (pc));
4839
4840 current_thread = saved_thread;
4841 return 0;
4842 }
4843 else
4844 {
4845 if (debug_threads)
4846 debug_printf ("Need step over [LWP %ld]? yes, "
4847 "found breakpoint at 0x%s\n",
4848 lwpid_of (thread), paddress (pc));
4849
4850 /* We've found an lwp that needs stepping over --- return 1 so
4851 that find_inferior stops looking. */
4852 current_thread = saved_thread;
4853
4854 return 1;
4855 }
4856 }
4857
4858 current_thread = saved_thread;
4859
4860 if (debug_threads)
4861 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4862 " at 0x%s\n",
4863 lwpid_of (thread), paddress (pc));
4864
4865 return 0;
4866}
4867
4868/* Start a step-over operation on LWP. When LWP stopped at a
4869 breakpoint, to make progress, we need to remove the breakpoint out
4870 of the way. If we let other threads run while we do that, they may
4871 pass by the breakpoint location and miss hitting it. To avoid
4872 that, a step-over momentarily stops all threads while LWP is
4873 single-stepped by either hardware or software while the breakpoint
4874 is temporarily uninserted from the inferior. When the single-step
4875 finishes, we reinsert the breakpoint, and let all threads that are
4876 supposed to be running, run again. */
4877
4878static int
4879start_step_over (struct lwp_info *lwp)
4880{
4881 struct thread_info *thread = get_lwp_thread (lwp);
4882 struct thread_info *saved_thread;
4883 CORE_ADDR pc;
4884 int step;
4885
4886 if (debug_threads)
4887 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4888 lwpid_of (thread));
4889
4890 stop_all_lwps (1, lwp);
4891
4892 if (lwp->suspended != 0)
4893 {
4894 internal_error (__FILE__, __LINE__,
4895 "LWP %ld suspended=%d\n", lwpid_of (thread),
4896 lwp->suspended);
4897 }
4898
4899 if (debug_threads)
4900 debug_printf ("Done stopping all threads for step-over.\n");
4901
4902 /* Note, we should always reach here with an already adjusted PC,
4903 either by GDB (if we're resuming due to GDB's request), or by our
4904 caller, if we just finished handling an internal breakpoint GDB
4905 shouldn't care about. */
4906 pc = get_pc (lwp);
4907
4908 saved_thread = current_thread;
4909 current_thread = thread;
4910
4911 lwp->bp_reinsert = pc;
4912 uninsert_breakpoints_at (pc);
4913 uninsert_fast_tracepoint_jumps_at (pc);
4914
4915 step = single_step (lwp);
4916
4917 current_thread = saved_thread;
4918
4919 linux_resume_one_lwp (lwp, step, 0, NULL);
4920
4921 /* Require next event from this LWP. */
4922 step_over_bkpt = thread->id;
4923 return 1;
4924}
4925
4926/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4927 start_step_over, if still there, and delete any single-step
4928 breakpoints we've set, on non hardware single-step targets. */
4929
4930static int
4931finish_step_over (struct lwp_info *lwp)
4932{
4933 if (lwp->bp_reinsert != 0)
4934 {
4935 struct thread_info *saved_thread = current_thread;
4936
4937 if (debug_threads)
4938 debug_printf ("Finished step over.\n");
4939
4940 current_thread = get_lwp_thread (lwp);
4941
4942 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4943 may be no breakpoint to reinsert there by now. */
4944 reinsert_breakpoints_at (lwp->bp_reinsert);
4945 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4946
4947 lwp->bp_reinsert = 0;
4948
4949 /* Delete any single-step breakpoints. No longer needed. We
4950 don't have to worry about other threads hitting this trap,
4951 and later not being able to explain it, because we were
4952 stepping over a breakpoint, and we hold all threads but
4953 LWP stopped while doing that. */
4954 if (!can_hardware_single_step ())
4955 {
4956 gdb_assert (has_single_step_breakpoints (current_thread));
4957 delete_single_step_breakpoints (current_thread);
4958 }
4959
4960 step_over_bkpt = null_ptid;
4961 current_thread = saved_thread;
4962 return 1;
4963 }
4964 else
4965 return 0;
4966}
4967
4968/* If there's a step over in progress, wait until all threads stop
4969 (that is, until the stepping thread finishes its step), and
4970 unsuspend all lwps. The stepping thread ends with its status
4971 pending, which is processed later when we get back to processing
4972 events. */
4973
4974static void
4975complete_ongoing_step_over (void)
4976{
4977 if (!ptid_equal (step_over_bkpt, null_ptid))
4978 {
4979 struct lwp_info *lwp;
4980 int wstat;
4981 int ret;
4982
4983 if (debug_threads)
4984 debug_printf ("detach: step over in progress, finish it first\n");
4985
4986 /* Passing NULL_PTID as filter indicates we want all events to
4987 be left pending. Eventually this returns when there are no
4988 unwaited-for children left. */
4989 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4990 &wstat, __WALL);
4991 gdb_assert (ret == -1);
4992
4993 lwp = find_lwp_pid (step_over_bkpt);
4994 if (lwp != NULL)
4995 finish_step_over (lwp);
4996 step_over_bkpt = null_ptid;
4997 unsuspend_all_lwps (lwp);
4998 }
4999}
5000
5001/* This function is called once per thread. We check the thread's resume
5002 request, which will tell us whether to resume, step, or leave the thread
5003 stopped; and what signal, if any, it should be sent.
5004
5005 For threads which we aren't explicitly told otherwise, we preserve
5006 the stepping flag; this is used for stepping over gdbserver-placed
5007 breakpoints.
5008
5009 If pending_flags was set in any thread, we queue any needed
5010 signals, since we won't actually resume. We already have a pending
5011 event to report, so we don't need to preserve any step requests;
5012 they should be re-issued if necessary. */
5013
5014static int
5015linux_resume_one_thread (thread_info *thread, void *arg)
5016{
5017 struct lwp_info *lwp = get_thread_lwp (thread);
5018 int leave_all_stopped = * (int *) arg;
5019 int leave_pending;
5020
5021 if (lwp->resume == NULL)
5022 return 0;
5023
5024 if (lwp->resume->kind == resume_stop)
5025 {
5026 if (debug_threads)
5027 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
5028
5029 if (!lwp->stopped)
5030 {
5031 if (debug_threads)
5032 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
5033
5034 /* Stop the thread, and wait for the event asynchronously,
5035 through the event loop. */
5036 send_sigstop (lwp);
5037 }
5038 else
5039 {
5040 if (debug_threads)
5041 debug_printf ("already stopped LWP %ld\n",
5042 lwpid_of (thread));
5043
5044 /* The LWP may have been stopped in an internal event that
5045 was not meant to be notified back to GDB (e.g., gdbserver
5046 breakpoint), so we should be reporting a stop event in
5047 this case too. */
5048
5049 /* If the thread already has a pending SIGSTOP, this is a
5050 no-op. Otherwise, something later will presumably resume
5051 the thread and this will cause it to cancel any pending
5052 operation, due to last_resume_kind == resume_stop. If
5053 the thread already has a pending status to report, we
5054 will still report it the next time we wait - see
5055 status_pending_p_callback. */
5056
5057 /* If we already have a pending signal to report, then
5058 there's no need to queue a SIGSTOP, as this means we're
5059 midway through moving the LWP out of the jumppad, and we
5060 will report the pending signal as soon as that is
5061 finished. */
5062 if (lwp->pending_signals_to_report == NULL)
5063 send_sigstop (lwp);
5064 }
5065
5066 /* For stop requests, we're done. */
5067 lwp->resume = NULL;
5068 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5069 return 0;
5070 }
5071
5072 /* If this thread which is about to be resumed has a pending status,
5073 then don't resume it - we can just report the pending status.
5074 Likewise if it is suspended, because e.g., another thread is
5075 stepping past a breakpoint. Make sure to queue any signals that
5076 would otherwise be sent. In all-stop mode, we do this decision
5077 based on if *any* thread has a pending status. If there's a
5078 thread that needs the step-over-breakpoint dance, then don't
5079 resume any other thread but that particular one. */
5080 leave_pending = (lwp->suspended
5081 || lwp->status_pending_p
5082 || leave_all_stopped);
5083
5084 /* If we have a new signal, enqueue the signal. */
5085 if (lwp->resume->sig != 0)
5086 {
5087 siginfo_t info, *info_p;
5088
5089 /* If this is the same signal we were previously stopped by,
5090 make sure to queue its siginfo. */
5091 if (WIFSTOPPED (lwp->last_status)
5092 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5093 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5094 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5095 info_p = &info;
5096 else
5097 info_p = NULL;
5098
5099 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5100 }
5101
5102 if (!leave_pending)
5103 {
5104 if (debug_threads)
5105 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5106
5107 proceed_one_lwp (thread, NULL);
5108 }
5109 else
5110 {
5111 if (debug_threads)
5112 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5113 }
5114
5115 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5116 lwp->resume = NULL;
5117 return 0;
5118}
5119
5120static void
5121linux_resume (struct thread_resume *resume_info, size_t n)
5122{
5123 struct thread_resume_array array = { resume_info, n };
5124 struct thread_info *need_step_over = NULL;
5125 int any_pending;
5126 int leave_all_stopped;
5127
5128 if (debug_threads)
5129 {
5130 debug_enter ();
5131 debug_printf ("linux_resume:\n");
5132 }
5133
5134 find_inferior (&all_threads, linux_set_resume_request, &array);
5135
5136 /* If there is a thread which would otherwise be resumed, which has
5137 a pending status, then don't resume any threads - we can just
5138 report the pending status. Make sure to queue any signals that
5139 would otherwise be sent. In non-stop mode, we'll apply this
5140 logic to each thread individually. We consume all pending events
5141 before considering to start a step-over (in all-stop). */
5142 any_pending = 0;
5143 if (!non_stop)
5144 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5145
5146 /* If there is a thread which would otherwise be resumed, which is
5147 stopped at a breakpoint that needs stepping over, then don't
5148 resume any threads - have it step over the breakpoint with all
5149 other threads stopped, then resume all threads again. Make sure
5150 to queue any signals that would otherwise be delivered or
5151 queued. */
5152 if (!any_pending && supports_breakpoints ())
5153 need_step_over
5154 = (struct thread_info *) find_inferior (&all_threads,
5155 need_step_over_p, NULL);
5156
5157 leave_all_stopped = (need_step_over != NULL || any_pending);
5158
5159 if (debug_threads)
5160 {
5161 if (need_step_over != NULL)
5162 debug_printf ("Not resuming all, need step over\n");
5163 else if (any_pending)
5164 debug_printf ("Not resuming, all-stop and found "
5165 "an LWP with pending status\n");
5166 else
5167 debug_printf ("Resuming, no pending status or step over needed\n");
5168 }
5169
5170 /* Even if we're leaving threads stopped, queue all signals we'd
5171 otherwise deliver. */
5172 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5173
5174 if (need_step_over)
5175 start_step_over (get_thread_lwp (need_step_over));
5176
5177 if (debug_threads)
5178 {
5179 debug_printf ("linux_resume done\n");
5180 debug_exit ();
5181 }
5182
5183 /* We may have events that were pending that can/should be sent to
5184 the client now. Trigger a linux_wait call. */
5185 if (target_is_async_p ())
5186 async_file_mark ();
5187}
5188
5189/* This function is called once per thread. We check the thread's
5190 last resume request, which will tell us whether to resume, step, or
5191 leave the thread stopped. Any signal the client requested to be
5192 delivered has already been enqueued at this point.
5193
5194 If any thread that GDB wants running is stopped at an internal
5195 breakpoint that needs stepping over, we start a step-over operation
5196 on that particular thread, and leave all others stopped. */
5197
5198static int
5199proceed_one_lwp (thread_info *thread, void *except)
5200{
5201 struct lwp_info *lwp = get_thread_lwp (thread);
5202 int step;
5203
5204 if (lwp == except)
5205 return 0;
5206
5207 if (debug_threads)
5208 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5209
5210 if (!lwp->stopped)
5211 {
5212 if (debug_threads)
5213 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5214 return 0;
5215 }
5216
5217 if (thread->last_resume_kind == resume_stop
5218 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5219 {
5220 if (debug_threads)
5221 debug_printf (" client wants LWP to remain %ld stopped\n",
5222 lwpid_of (thread));
5223 return 0;
5224 }
5225
5226 if (lwp->status_pending_p)
5227 {
5228 if (debug_threads)
5229 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5230 lwpid_of (thread));
5231 return 0;
5232 }
5233
5234 gdb_assert (lwp->suspended >= 0);
5235
5236 if (lwp->suspended)
5237 {
5238 if (debug_threads)
5239 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5240 return 0;
5241 }
5242
5243 if (thread->last_resume_kind == resume_stop
5244 && lwp->pending_signals_to_report == NULL
5245 && (lwp->collecting_fast_tracepoint
5246 == fast_tpoint_collect_result::not_collecting))
5247 {
5248 /* We haven't reported this LWP as stopped yet (otherwise, the
5249 last_status.kind check above would catch it, and we wouldn't
5250 reach here. This LWP may have been momentarily paused by a
5251 stop_all_lwps call while handling for example, another LWP's
5252 step-over. In that case, the pending expected SIGSTOP signal
5253 that was queued at vCont;t handling time will have already
5254 been consumed by wait_for_sigstop, and so we need to requeue
5255 another one here. Note that if the LWP already has a SIGSTOP
5256 pending, this is a no-op. */
5257
5258 if (debug_threads)
5259 debug_printf ("Client wants LWP %ld to stop. "
5260 "Making sure it has a SIGSTOP pending\n",
5261 lwpid_of (thread));
5262
5263 send_sigstop (lwp);
5264 }
5265
5266 if (thread->last_resume_kind == resume_step)
5267 {
5268 if (debug_threads)
5269 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5270 lwpid_of (thread));
5271
5272 /* If resume_step is requested by GDB, install single-step
5273 breakpoints when the thread is about to be actually resumed if
5274 the single-step breakpoints weren't removed. */
5275 if (can_software_single_step ()
5276 && !has_single_step_breakpoints (thread))
5277 install_software_single_step_breakpoints (lwp);
5278
5279 step = maybe_hw_step (thread);
5280 }
5281 else if (lwp->bp_reinsert != 0)
5282 {
5283 if (debug_threads)
5284 debug_printf (" stepping LWP %ld, reinsert set\n",
5285 lwpid_of (thread));
5286
5287 step = maybe_hw_step (thread);
5288 }
5289 else
5290 step = 0;
5291
5292 linux_resume_one_lwp (lwp, step, 0, NULL);
5293 return 0;
5294}
5295
5296static int
5297unsuspend_and_proceed_one_lwp (thread_info *thread, void *except)
5298{
5299 struct lwp_info *lwp = get_thread_lwp (thread);
5300
5301 if (lwp == except)
5302 return 0;
5303
5304 lwp_suspended_decr (lwp);
5305
5306 return proceed_one_lwp (thread, except);
5307}
5308
5309/* When we finish a step-over, set threads running again. If there's
5310 another thread that may need a step-over, now's the time to start
5311 it. Eventually, we'll move all threads past their breakpoints. */
5312
5313static void
5314proceed_all_lwps (void)
5315{
5316 struct thread_info *need_step_over;
5317
5318 /* If there is a thread which would otherwise be resumed, which is
5319 stopped at a breakpoint that needs stepping over, then don't
5320 resume any threads - have it step over the breakpoint with all
5321 other threads stopped, then resume all threads again. */
5322
5323 if (supports_breakpoints ())
5324 {
5325 need_step_over
5326 = (struct thread_info *) find_inferior (&all_threads,
5327 need_step_over_p, NULL);
5328
5329 if (need_step_over != NULL)
5330 {
5331 if (debug_threads)
5332 debug_printf ("proceed_all_lwps: found "
5333 "thread %ld needing a step-over\n",
5334 lwpid_of (need_step_over));
5335
5336 start_step_over (get_thread_lwp (need_step_over));
5337 return;
5338 }
5339 }
5340
5341 if (debug_threads)
5342 debug_printf ("Proceeding, no step-over needed\n");
5343
5344 find_inferior (&all_threads, proceed_one_lwp, NULL);
5345}
5346
5347/* Stopped LWPs that the client wanted to be running, that don't have
5348 pending statuses, are set to run again, except for EXCEPT, if not
5349 NULL. This undoes a stop_all_lwps call. */
5350
5351static void
5352unstop_all_lwps (int unsuspend, struct lwp_info *except)
5353{
5354 if (debug_threads)
5355 {
5356 debug_enter ();
5357 if (except)
5358 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5359 lwpid_of (get_lwp_thread (except)));
5360 else
5361 debug_printf ("unstopping all lwps\n");
5362 }
5363
5364 if (unsuspend)
5365 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5366 else
5367 find_inferior (&all_threads, proceed_one_lwp, except);
5368
5369 if (debug_threads)
5370 {
5371 debug_printf ("unstop_all_lwps done\n");
5372 debug_exit ();
5373 }
5374}
5375
5376
5377#ifdef HAVE_LINUX_REGSETS
5378
5379#define use_linux_regsets 1
5380
5381/* Returns true if REGSET has been disabled. */
5382
5383static int
5384regset_disabled (struct regsets_info *info, struct regset_info *regset)
5385{
5386 return (info->disabled_regsets != NULL
5387 && info->disabled_regsets[regset - info->regsets]);
5388}
5389
5390/* Disable REGSET. */
5391
5392static void
5393disable_regset (struct regsets_info *info, struct regset_info *regset)
5394{
5395 int dr_offset;
5396
5397 dr_offset = regset - info->regsets;
5398 if (info->disabled_regsets == NULL)
5399 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5400 info->disabled_regsets[dr_offset] = 1;
5401}
5402
5403static int
5404regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5405 struct regcache *regcache)
5406{
5407 struct regset_info *regset;
5408 int saw_general_regs = 0;
5409 int pid;
5410 struct iovec iov;
5411
5412 pid = lwpid_of (current_thread);
5413 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5414 {
5415 void *buf, *data;
5416 int nt_type, res;
5417
5418 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5419 continue;
5420
5421 buf = xmalloc (regset->size);
5422
5423 nt_type = regset->nt_type;
5424 if (nt_type)
5425 {
5426 iov.iov_base = buf;
5427 iov.iov_len = regset->size;
5428 data = (void *) &iov;
5429 }
5430 else
5431 data = buf;
5432
5433#ifndef __sparc__
5434 res = ptrace (regset->get_request, pid,
5435 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5436#else
5437 res = ptrace (regset->get_request, pid, data, nt_type);
5438#endif
5439 if (res < 0)
5440 {
5441 if (errno == EIO)
5442 {
5443 /* If we get EIO on a regset, do not try it again for
5444 this process mode. */
5445 disable_regset (regsets_info, regset);
5446 }
5447 else if (errno == ENODATA)
5448 {
5449 /* ENODATA may be returned if the regset is currently
5450 not "active". This can happen in normal operation,
5451 so suppress the warning in this case. */
5452 }
5453 else if (errno == ESRCH)
5454 {
5455 /* At this point, ESRCH should mean the process is
5456 already gone, in which case we simply ignore attempts
5457 to read its registers. */
5458 }
5459 else
5460 {
5461 char s[256];
5462 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5463 pid);
5464 perror (s);
5465 }
5466 }
5467 else
5468 {
5469 if (regset->type == GENERAL_REGS)
5470 saw_general_regs = 1;
5471 regset->store_function (regcache, buf);
5472 }
5473 free (buf);
5474 }
5475 if (saw_general_regs)
5476 return 0;
5477 else
5478 return 1;
5479}
5480
5481static int
5482regsets_store_inferior_registers (struct regsets_info *regsets_info,
5483 struct regcache *regcache)
5484{
5485 struct regset_info *regset;
5486 int saw_general_regs = 0;
5487 int pid;
5488 struct iovec iov;
5489
5490 pid = lwpid_of (current_thread);
5491 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5492 {
5493 void *buf, *data;
5494 int nt_type, res;
5495
5496 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5497 || regset->fill_function == NULL)
5498 continue;
5499
5500 buf = xmalloc (regset->size);
5501
5502 /* First fill the buffer with the current register set contents,
5503 in case there are any items in the kernel's regset that are
5504 not in gdbserver's regcache. */
5505
5506 nt_type = regset->nt_type;
5507 if (nt_type)
5508 {
5509 iov.iov_base = buf;
5510 iov.iov_len = regset->size;
5511 data = (void *) &iov;
5512 }
5513 else
5514 data = buf;
5515
5516#ifndef __sparc__
5517 res = ptrace (regset->get_request, pid,
5518 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5519#else
5520 res = ptrace (regset->get_request, pid, data, nt_type);
5521#endif
5522
5523 if (res == 0)
5524 {
5525 /* Then overlay our cached registers on that. */
5526 regset->fill_function (regcache, buf);
5527
5528 /* Only now do we write the register set. */
5529#ifndef __sparc__
5530 res = ptrace (regset->set_request, pid,
5531 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5532#else
5533 res = ptrace (regset->set_request, pid, data, nt_type);
5534#endif
5535 }
5536
5537 if (res < 0)
5538 {
5539 if (errno == EIO)
5540 {
5541 /* If we get EIO on a regset, do not try it again for
5542 this process mode. */
5543 disable_regset (regsets_info, regset);
5544 }
5545 else if (errno == ESRCH)
5546 {
5547 /* At this point, ESRCH should mean the process is
5548 already gone, in which case we simply ignore attempts
5549 to change its registers. See also the related
5550 comment in linux_resume_one_lwp. */
5551 free (buf);
5552 return 0;
5553 }
5554 else
5555 {
5556 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5557 }
5558 }
5559 else if (regset->type == GENERAL_REGS)
5560 saw_general_regs = 1;
5561 free (buf);
5562 }
5563 if (saw_general_regs)
5564 return 0;
5565 else
5566 return 1;
5567}
5568
5569#else /* !HAVE_LINUX_REGSETS */
5570
5571#define use_linux_regsets 0
5572#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5573#define regsets_store_inferior_registers(regsets_info, regcache) 1
5574
5575#endif
5576
5577/* Return 1 if register REGNO is supported by one of the regset ptrace
5578 calls or 0 if it has to be transferred individually. */
5579
5580static int
5581linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5582{
5583 unsigned char mask = 1 << (regno % 8);
5584 size_t index = regno / 8;
5585
5586 return (use_linux_regsets
5587 && (regs_info->regset_bitmap == NULL
5588 || (regs_info->regset_bitmap[index] & mask) != 0));
5589}
5590
5591#ifdef HAVE_LINUX_USRREGS
5592
5593static int
5594register_addr (const struct usrregs_info *usrregs, int regnum)
5595{
5596 int addr;
5597
5598 if (regnum < 0 || regnum >= usrregs->num_regs)
5599 error ("Invalid register number %d.", regnum);
5600
5601 addr = usrregs->regmap[regnum];
5602
5603 return addr;
5604}
5605
5606/* Fetch one register. */
5607static void
5608fetch_register (const struct usrregs_info *usrregs,
5609 struct regcache *regcache, int regno)
5610{
5611 CORE_ADDR regaddr;
5612 int i, size;
5613 char *buf;
5614 int pid;
5615
5616 if (regno >= usrregs->num_regs)
5617 return;
5618 if ((*the_low_target.cannot_fetch_register) (regno))
5619 return;
5620
5621 regaddr = register_addr (usrregs, regno);
5622 if (regaddr == -1)
5623 return;
5624
5625 size = ((register_size (regcache->tdesc, regno)
5626 + sizeof (PTRACE_XFER_TYPE) - 1)
5627 & -sizeof (PTRACE_XFER_TYPE));
5628 buf = (char *) alloca (size);
5629
5630 pid = lwpid_of (current_thread);
5631 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5632 {
5633 errno = 0;
5634 *(PTRACE_XFER_TYPE *) (buf + i) =
5635 ptrace (PTRACE_PEEKUSER, pid,
5636 /* Coerce to a uintptr_t first to avoid potential gcc warning
5637 of coercing an 8 byte integer to a 4 byte pointer. */
5638 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5639 regaddr += sizeof (PTRACE_XFER_TYPE);
5640 if (errno != 0)
5641 error ("reading register %d: %s", regno, strerror (errno));
5642 }
5643
5644 if (the_low_target.supply_ptrace_register)
5645 the_low_target.supply_ptrace_register (regcache, regno, buf);
5646 else
5647 supply_register (regcache, regno, buf);
5648}
5649
5650/* Store one register. */
5651static void
5652store_register (const struct usrregs_info *usrregs,
5653 struct regcache *regcache, int regno)
5654{
5655 CORE_ADDR regaddr;
5656 int i, size;
5657 char *buf;
5658 int pid;
5659
5660 if (regno >= usrregs->num_regs)
5661 return;
5662 if ((*the_low_target.cannot_store_register) (regno))
5663 return;
5664
5665 regaddr = register_addr (usrregs, regno);
5666 if (regaddr == -1)
5667 return;
5668
5669 size = ((register_size (regcache->tdesc, regno)
5670 + sizeof (PTRACE_XFER_TYPE) - 1)
5671 & -sizeof (PTRACE_XFER_TYPE));
5672 buf = (char *) alloca (size);
5673 memset (buf, 0, size);
5674
5675 if (the_low_target.collect_ptrace_register)
5676 the_low_target.collect_ptrace_register (regcache, regno, buf);
5677 else
5678 collect_register (regcache, regno, buf);
5679
5680 pid = lwpid_of (current_thread);
5681 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5682 {
5683 errno = 0;
5684 ptrace (PTRACE_POKEUSER, pid,
5685 /* Coerce to a uintptr_t first to avoid potential gcc warning
5686 about coercing an 8 byte integer to a 4 byte pointer. */
5687 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5688 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5689 if (errno != 0)
5690 {
5691 /* At this point, ESRCH should mean the process is
5692 already gone, in which case we simply ignore attempts
5693 to change its registers. See also the related
5694 comment in linux_resume_one_lwp. */
5695 if (errno == ESRCH)
5696 return;
5697
5698 if ((*the_low_target.cannot_store_register) (regno) == 0)
5699 error ("writing register %d: %s", regno, strerror (errno));
5700 }
5701 regaddr += sizeof (PTRACE_XFER_TYPE);
5702 }
5703}
5704
5705/* Fetch all registers, or just one, from the child process.
5706 If REGNO is -1, do this for all registers, skipping any that are
5707 assumed to have been retrieved by regsets_fetch_inferior_registers,
5708 unless ALL is non-zero.
5709 Otherwise, REGNO specifies which register (so we can save time). */
5710static void
5711usr_fetch_inferior_registers (const struct regs_info *regs_info,
5712 struct regcache *regcache, int regno, int all)
5713{
5714 struct usrregs_info *usr = regs_info->usrregs;
5715
5716 if (regno == -1)
5717 {
5718 for (regno = 0; regno < usr->num_regs; regno++)
5719 if (all || !linux_register_in_regsets (regs_info, regno))
5720 fetch_register (usr, regcache, regno);
5721 }
5722 else
5723 fetch_register (usr, regcache, regno);
5724}
5725
5726/* Store our register values back into the inferior.
5727 If REGNO is -1, do this for all registers, skipping any that are
5728 assumed to have been saved by regsets_store_inferior_registers,
5729 unless ALL is non-zero.
5730 Otherwise, REGNO specifies which register (so we can save time). */
5731static void
5732usr_store_inferior_registers (const struct regs_info *regs_info,
5733 struct regcache *regcache, int regno, int all)
5734{
5735 struct usrregs_info *usr = regs_info->usrregs;
5736
5737 if (regno == -1)
5738 {
5739 for (regno = 0; regno < usr->num_regs; regno++)
5740 if (all || !linux_register_in_regsets (regs_info, regno))
5741 store_register (usr, regcache, regno);
5742 }
5743 else
5744 store_register (usr, regcache, regno);
5745}
5746
5747#else /* !HAVE_LINUX_USRREGS */
5748
5749#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5750#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5751
5752#endif
5753
5754
5755static void
5756linux_fetch_registers (struct regcache *regcache, int regno)
5757{
5758 int use_regsets;
5759 int all = 0;
5760 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5761
5762 if (regno == -1)
5763 {
5764 if (the_low_target.fetch_register != NULL
5765 && regs_info->usrregs != NULL)
5766 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5767 (*the_low_target.fetch_register) (regcache, regno);
5768
5769 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5770 if (regs_info->usrregs != NULL)
5771 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5772 }
5773 else
5774 {
5775 if (the_low_target.fetch_register != NULL
5776 && (*the_low_target.fetch_register) (regcache, regno))
5777 return;
5778
5779 use_regsets = linux_register_in_regsets (regs_info, regno);
5780 if (use_regsets)
5781 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5782 regcache);
5783 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5784 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5785 }
5786}
5787
5788static void
5789linux_store_registers (struct regcache *regcache, int regno)
5790{
5791 int use_regsets;
5792 int all = 0;
5793 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5794
5795 if (regno == -1)
5796 {
5797 all = regsets_store_inferior_registers (regs_info->regsets_info,
5798 regcache);
5799 if (regs_info->usrregs != NULL)
5800 usr_store_inferior_registers (regs_info, regcache, regno, all);
5801 }
5802 else
5803 {
5804 use_regsets = linux_register_in_regsets (regs_info, regno);
5805 if (use_regsets)
5806 all = regsets_store_inferior_registers (regs_info->regsets_info,
5807 regcache);
5808 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5809 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5810 }
5811}
5812
5813
5814/* Copy LEN bytes from inferior's memory starting at MEMADDR
5815 to debugger memory starting at MYADDR. */
5816
5817static int
5818linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5819{
5820 int pid = lwpid_of (current_thread);
5821 PTRACE_XFER_TYPE *buffer;
5822 CORE_ADDR addr;
5823 int count;
5824 char filename[64];
5825 int i;
5826 int ret;
5827 int fd;
5828
5829 /* Try using /proc. Don't bother for one word. */
5830 if (len >= 3 * sizeof (long))
5831 {
5832 int bytes;
5833
5834 /* We could keep this file open and cache it - possibly one per
5835 thread. That requires some juggling, but is even faster. */
5836 sprintf (filename, "/proc/%d/mem", pid);
5837 fd = open (filename, O_RDONLY | O_LARGEFILE);
5838 if (fd == -1)
5839 goto no_proc;
5840
5841 /* If pread64 is available, use it. It's faster if the kernel
5842 supports it (only one syscall), and it's 64-bit safe even on
5843 32-bit platforms (for instance, SPARC debugging a SPARC64
5844 application). */
5845#ifdef HAVE_PREAD64
5846 bytes = pread64 (fd, myaddr, len, memaddr);
5847#else
5848 bytes = -1;
5849 if (lseek (fd, memaddr, SEEK_SET) != -1)
5850 bytes = read (fd, myaddr, len);
5851#endif
5852
5853 close (fd);
5854 if (bytes == len)
5855 return 0;
5856
5857 /* Some data was read, we'll try to get the rest with ptrace. */
5858 if (bytes > 0)
5859 {
5860 memaddr += bytes;
5861 myaddr += bytes;
5862 len -= bytes;
5863 }
5864 }
5865
5866 no_proc:
5867 /* Round starting address down to longword boundary. */
5868 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5869 /* Round ending address up; get number of longwords that makes. */
5870 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5871 / sizeof (PTRACE_XFER_TYPE));
5872 /* Allocate buffer of that many longwords. */
5873 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5874
5875 /* Read all the longwords */
5876 errno = 0;
5877 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5878 {
5879 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5880 about coercing an 8 byte integer to a 4 byte pointer. */
5881 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5882 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5883 (PTRACE_TYPE_ARG4) 0);
5884 if (errno)
5885 break;
5886 }
5887 ret = errno;
5888
5889 /* Copy appropriate bytes out of the buffer. */
5890 if (i > 0)
5891 {
5892 i *= sizeof (PTRACE_XFER_TYPE);
5893 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5894 memcpy (myaddr,
5895 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5896 i < len ? i : len);
5897 }
5898
5899 return ret;
5900}
5901
5902/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5903 memory at MEMADDR. On failure (cannot write to the inferior)
5904 returns the value of errno. Always succeeds if LEN is zero. */
5905
5906static int
5907linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5908{
5909 int i;
5910 /* Round starting address down to longword boundary. */
5911 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5912 /* Round ending address up; get number of longwords that makes. */
5913 int count
5914 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5915 / sizeof (PTRACE_XFER_TYPE);
5916
5917 /* Allocate buffer of that many longwords. */
5918 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5919
5920 int pid = lwpid_of (current_thread);
5921
5922 if (len == 0)
5923 {
5924 /* Zero length write always succeeds. */
5925 return 0;
5926 }
5927
5928 if (debug_threads)
5929 {
5930 /* Dump up to four bytes. */
5931 char str[4 * 2 + 1];
5932 char *p = str;
5933 int dump = len < 4 ? len : 4;
5934
5935 for (i = 0; i < dump; i++)
5936 {
5937 sprintf (p, "%02x", myaddr[i]);
5938 p += 2;
5939 }
5940 *p = '\0';
5941
5942 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5943 str, (long) memaddr, pid);
5944 }
5945
5946 /* Fill start and end extra bytes of buffer with existing memory data. */
5947
5948 errno = 0;
5949 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5950 about coercing an 8 byte integer to a 4 byte pointer. */
5951 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5952 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5953 (PTRACE_TYPE_ARG4) 0);
5954 if (errno)
5955 return errno;
5956
5957 if (count > 1)
5958 {
5959 errno = 0;
5960 buffer[count - 1]
5961 = ptrace (PTRACE_PEEKTEXT, pid,
5962 /* Coerce to a uintptr_t first to avoid potential gcc warning
5963 about coercing an 8 byte integer to a 4 byte pointer. */
5964 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5965 * sizeof (PTRACE_XFER_TYPE)),
5966 (PTRACE_TYPE_ARG4) 0);
5967 if (errno)
5968 return errno;
5969 }
5970
5971 /* Copy data to be written over corresponding part of buffer. */
5972
5973 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5974 myaddr, len);
5975
5976 /* Write the entire buffer. */
5977
5978 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5979 {
5980 errno = 0;
5981 ptrace (PTRACE_POKETEXT, pid,
5982 /* Coerce to a uintptr_t first to avoid potential gcc warning
5983 about coercing an 8 byte integer to a 4 byte pointer. */
5984 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5985 (PTRACE_TYPE_ARG4) buffer[i]);
5986 if (errno)
5987 return errno;
5988 }
5989
5990 return 0;
5991}
5992
5993static void
5994linux_look_up_symbols (void)
5995{
5996#ifdef USE_THREAD_DB
5997 struct process_info *proc = current_process ();
5998
5999 if (proc->priv->thread_db != NULL)
6000 return;
6001
6002 thread_db_init ();
6003#endif
6004}
6005
6006static void
6007linux_request_interrupt (void)
6008{
6009 /* Send a SIGINT to the process group. This acts just like the user
6010 typed a ^C on the controlling terminal. */
6011 kill (-signal_pid, SIGINT);
6012}
6013
6014/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
6015 to debugger memory starting at MYADDR. */
6016
6017static int
6018linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
6019{
6020 char filename[PATH_MAX];
6021 int fd, n;
6022 int pid = lwpid_of (current_thread);
6023
6024 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6025
6026 fd = open (filename, O_RDONLY);
6027 if (fd < 0)
6028 return -1;
6029
6030 if (offset != (CORE_ADDR) 0
6031 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6032 n = -1;
6033 else
6034 n = read (fd, myaddr, len);
6035
6036 close (fd);
6037
6038 return n;
6039}
6040
6041/* These breakpoint and watchpoint related wrapper functions simply
6042 pass on the function call if the target has registered a
6043 corresponding function. */
6044
6045static int
6046linux_supports_z_point_type (char z_type)
6047{
6048 return (the_low_target.supports_z_point_type != NULL
6049 && the_low_target.supports_z_point_type (z_type));
6050}
6051
6052static int
6053linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6054 int size, struct raw_breakpoint *bp)
6055{
6056 if (type == raw_bkpt_type_sw)
6057 return insert_memory_breakpoint (bp);
6058 else if (the_low_target.insert_point != NULL)
6059 return the_low_target.insert_point (type, addr, size, bp);
6060 else
6061 /* Unsupported (see target.h). */
6062 return 1;
6063}
6064
6065static int
6066linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6067 int size, struct raw_breakpoint *bp)
6068{
6069 if (type == raw_bkpt_type_sw)
6070 return remove_memory_breakpoint (bp);
6071 else if (the_low_target.remove_point != NULL)
6072 return the_low_target.remove_point (type, addr, size, bp);
6073 else
6074 /* Unsupported (see target.h). */
6075 return 1;
6076}
6077
6078/* Implement the to_stopped_by_sw_breakpoint target_ops
6079 method. */
6080
6081static int
6082linux_stopped_by_sw_breakpoint (void)
6083{
6084 struct lwp_info *lwp = get_thread_lwp (current_thread);
6085
6086 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6087}
6088
6089/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6090 method. */
6091
6092static int
6093linux_supports_stopped_by_sw_breakpoint (void)
6094{
6095 return USE_SIGTRAP_SIGINFO;
6096}
6097
6098/* Implement the to_stopped_by_hw_breakpoint target_ops
6099 method. */
6100
6101static int
6102linux_stopped_by_hw_breakpoint (void)
6103{
6104 struct lwp_info *lwp = get_thread_lwp (current_thread);
6105
6106 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6107}
6108
6109/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6110 method. */
6111
6112static int
6113linux_supports_stopped_by_hw_breakpoint (void)
6114{
6115 return USE_SIGTRAP_SIGINFO;
6116}
6117
6118/* Implement the supports_hardware_single_step target_ops method. */
6119
6120static int
6121linux_supports_hardware_single_step (void)
6122{
6123 return can_hardware_single_step ();
6124}
6125
6126static int
6127linux_supports_software_single_step (void)
6128{
6129 return can_software_single_step ();
6130}
6131
6132static int
6133linux_stopped_by_watchpoint (void)
6134{
6135 struct lwp_info *lwp = get_thread_lwp (current_thread);
6136
6137 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6138}
6139
6140static CORE_ADDR
6141linux_stopped_data_address (void)
6142{
6143 struct lwp_info *lwp = get_thread_lwp (current_thread);
6144
6145 return lwp->stopped_data_address;
6146}
6147
6148#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6149 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6150 && defined(PT_TEXT_END_ADDR)
6151
6152/* This is only used for targets that define PT_TEXT_ADDR,
6153 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6154 the target has different ways of acquiring this information, like
6155 loadmaps. */
6156
6157/* Under uClinux, programs are loaded at non-zero offsets, which we need
6158 to tell gdb about. */
6159
6160static int
6161linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6162{
6163 unsigned long text, text_end, data;
6164 int pid = lwpid_of (current_thread);
6165
6166 errno = 0;
6167
6168 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6169 (PTRACE_TYPE_ARG4) 0);
6170 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6171 (PTRACE_TYPE_ARG4) 0);
6172 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6173 (PTRACE_TYPE_ARG4) 0);
6174
6175 if (errno == 0)
6176 {
6177 /* Both text and data offsets produced at compile-time (and so
6178 used by gdb) are relative to the beginning of the program,
6179 with the data segment immediately following the text segment.
6180 However, the actual runtime layout in memory may put the data
6181 somewhere else, so when we send gdb a data base-address, we
6182 use the real data base address and subtract the compile-time
6183 data base-address from it (which is just the length of the
6184 text segment). BSS immediately follows data in both
6185 cases. */
6186 *text_p = text;
6187 *data_p = data - (text_end - text);
6188
6189 return 1;
6190 }
6191 return 0;
6192}
6193#endif
6194
6195static int
6196linux_qxfer_osdata (const char *annex,
6197 unsigned char *readbuf, unsigned const char *writebuf,
6198 CORE_ADDR offset, int len)
6199{
6200 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6201}
6202
6203/* Convert a native/host siginfo object, into/from the siginfo in the
6204 layout of the inferiors' architecture. */
6205
6206static void
6207siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6208{
6209 int done = 0;
6210
6211 if (the_low_target.siginfo_fixup != NULL)
6212 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6213
6214 /* If there was no callback, or the callback didn't do anything,
6215 then just do a straight memcpy. */
6216 if (!done)
6217 {
6218 if (direction == 1)
6219 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6220 else
6221 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6222 }
6223}
6224
6225static int
6226linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6227 unsigned const char *writebuf, CORE_ADDR offset, int len)
6228{
6229 int pid;
6230 siginfo_t siginfo;
6231 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6232
6233 if (current_thread == NULL)
6234 return -1;
6235
6236 pid = lwpid_of (current_thread);
6237
6238 if (debug_threads)
6239 debug_printf ("%s siginfo for lwp %d.\n",
6240 readbuf != NULL ? "Reading" : "Writing",
6241 pid);
6242
6243 if (offset >= sizeof (siginfo))
6244 return -1;
6245
6246 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6247 return -1;
6248
6249 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6250 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6251 inferior with a 64-bit GDBSERVER should look the same as debugging it
6252 with a 32-bit GDBSERVER, we need to convert it. */
6253 siginfo_fixup (&siginfo, inf_siginfo, 0);
6254
6255 if (offset + len > sizeof (siginfo))
6256 len = sizeof (siginfo) - offset;
6257
6258 if (readbuf != NULL)
6259 memcpy (readbuf, inf_siginfo + offset, len);
6260 else
6261 {
6262 memcpy (inf_siginfo + offset, writebuf, len);
6263
6264 /* Convert back to ptrace layout before flushing it out. */
6265 siginfo_fixup (&siginfo, inf_siginfo, 1);
6266
6267 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6268 return -1;
6269 }
6270
6271 return len;
6272}
6273
6274/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6275 so we notice when children change state; as the handler for the
6276 sigsuspend in my_waitpid. */
6277
6278static void
6279sigchld_handler (int signo)
6280{
6281 int old_errno = errno;
6282
6283 if (debug_threads)
6284 {
6285 do
6286 {
6287 /* fprintf is not async-signal-safe, so call write
6288 directly. */
6289 if (write (2, "sigchld_handler\n",
6290 sizeof ("sigchld_handler\n") - 1) < 0)
6291 break; /* just ignore */
6292 } while (0);
6293 }
6294
6295 if (target_is_async_p ())
6296 async_file_mark (); /* trigger a linux_wait */
6297
6298 errno = old_errno;
6299}
6300
6301static int
6302linux_supports_non_stop (void)
6303{
6304 return 1;
6305}
6306
6307static int
6308linux_async (int enable)
6309{
6310 int previous = target_is_async_p ();
6311
6312 if (debug_threads)
6313 debug_printf ("linux_async (%d), previous=%d\n",
6314 enable, previous);
6315
6316 if (previous != enable)
6317 {
6318 sigset_t mask;
6319 sigemptyset (&mask);
6320 sigaddset (&mask, SIGCHLD);
6321
6322 sigprocmask (SIG_BLOCK, &mask, NULL);
6323
6324 if (enable)
6325 {
6326 if (pipe (linux_event_pipe) == -1)
6327 {
6328 linux_event_pipe[0] = -1;
6329 linux_event_pipe[1] = -1;
6330 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6331
6332 warning ("creating event pipe failed.");
6333 return previous;
6334 }
6335
6336 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6337 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6338
6339 /* Register the event loop handler. */
6340 add_file_handler (linux_event_pipe[0],
6341 handle_target_event, NULL);
6342
6343 /* Always trigger a linux_wait. */
6344 async_file_mark ();
6345 }
6346 else
6347 {
6348 delete_file_handler (linux_event_pipe[0]);
6349
6350 close (linux_event_pipe[0]);
6351 close (linux_event_pipe[1]);
6352 linux_event_pipe[0] = -1;
6353 linux_event_pipe[1] = -1;
6354 }
6355
6356 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6357 }
6358
6359 return previous;
6360}
6361
6362static int
6363linux_start_non_stop (int nonstop)
6364{
6365 /* Register or unregister from event-loop accordingly. */
6366 linux_async (nonstop);
6367
6368 if (target_is_async_p () != (nonstop != 0))
6369 return -1;
6370
6371 return 0;
6372}
6373
6374static int
6375linux_supports_multi_process (void)
6376{
6377 return 1;
6378}
6379
6380/* Check if fork events are supported. */
6381
6382static int
6383linux_supports_fork_events (void)
6384{
6385 return linux_supports_tracefork ();
6386}
6387
6388/* Check if vfork events are supported. */
6389
6390static int
6391linux_supports_vfork_events (void)
6392{
6393 return linux_supports_tracefork ();
6394}
6395
6396/* Check if exec events are supported. */
6397
6398static int
6399linux_supports_exec_events (void)
6400{
6401 return linux_supports_traceexec ();
6402}
6403
6404/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6405 ptrace flags for all inferiors. This is in case the new GDB connection
6406 doesn't support the same set of events that the previous one did. */
6407
6408static void
6409linux_handle_new_gdb_connection (void)
6410{
6411 /* Request that all the lwps reset their ptrace options. */
6412 for_each_thread ([] (thread_info *thread)
6413 {
6414 struct lwp_info *lwp = get_thread_lwp (thread);
6415
6416 if (!lwp->stopped)
6417 {
6418 /* Stop the lwp so we can modify its ptrace options. */
6419 lwp->must_set_ptrace_flags = 1;
6420 linux_stop_lwp (lwp);
6421 }
6422 else
6423 {
6424 /* Already stopped; go ahead and set the ptrace options. */
6425 struct process_info *proc = find_process_pid (pid_of (thread));
6426 int options = linux_low_ptrace_options (proc->attached);
6427
6428 linux_enable_event_reporting (lwpid_of (thread), options);
6429 lwp->must_set_ptrace_flags = 0;
6430 }
6431 });
6432}
6433
6434static int
6435linux_supports_disable_randomization (void)
6436{
6437#ifdef HAVE_PERSONALITY
6438 return 1;
6439#else
6440 return 0;
6441#endif
6442}
6443
6444static int
6445linux_supports_agent (void)
6446{
6447 return 1;
6448}
6449
6450static int
6451linux_supports_range_stepping (void)
6452{
6453 if (can_software_single_step ())
6454 return 1;
6455 if (*the_low_target.supports_range_stepping == NULL)
6456 return 0;
6457
6458 return (*the_low_target.supports_range_stepping) ();
6459}
6460
6461/* Enumerate spufs IDs for process PID. */
6462static int
6463spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6464{
6465 int pos = 0;
6466 int written = 0;
6467 char path[128];
6468 DIR *dir;
6469 struct dirent *entry;
6470
6471 sprintf (path, "/proc/%ld/fd", pid);
6472 dir = opendir (path);
6473 if (!dir)
6474 return -1;
6475
6476 rewinddir (dir);
6477 while ((entry = readdir (dir)) != NULL)
6478 {
6479 struct stat st;
6480 struct statfs stfs;
6481 int fd;
6482
6483 fd = atoi (entry->d_name);
6484 if (!fd)
6485 continue;
6486
6487 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6488 if (stat (path, &st) != 0)
6489 continue;
6490 if (!S_ISDIR (st.st_mode))
6491 continue;
6492
6493 if (statfs (path, &stfs) != 0)
6494 continue;
6495 if (stfs.f_type != SPUFS_MAGIC)
6496 continue;
6497
6498 if (pos >= offset && pos + 4 <= offset + len)
6499 {
6500 *(unsigned int *)(buf + pos - offset) = fd;
6501 written += 4;
6502 }
6503 pos += 4;
6504 }
6505
6506 closedir (dir);
6507 return written;
6508}
6509
6510/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6511 object type, using the /proc file system. */
6512static int
6513linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6514 unsigned const char *writebuf,
6515 CORE_ADDR offset, int len)
6516{
6517 long pid = lwpid_of (current_thread);
6518 char buf[128];
6519 int fd = 0;
6520 int ret = 0;
6521
6522 if (!writebuf && !readbuf)
6523 return -1;
6524
6525 if (!*annex)
6526 {
6527 if (!readbuf)
6528 return -1;
6529 else
6530 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6531 }
6532
6533 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6534 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6535 if (fd <= 0)
6536 return -1;
6537
6538 if (offset != 0
6539 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6540 {
6541 close (fd);
6542 return 0;
6543 }
6544
6545 if (writebuf)
6546 ret = write (fd, writebuf, (size_t) len);
6547 else
6548 ret = read (fd, readbuf, (size_t) len);
6549
6550 close (fd);
6551 return ret;
6552}
6553
6554#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6555struct target_loadseg
6556{
6557 /* Core address to which the segment is mapped. */
6558 Elf32_Addr addr;
6559 /* VMA recorded in the program header. */
6560 Elf32_Addr p_vaddr;
6561 /* Size of this segment in memory. */
6562 Elf32_Word p_memsz;
6563};
6564
6565# if defined PT_GETDSBT
6566struct target_loadmap
6567{
6568 /* Protocol version number, must be zero. */
6569 Elf32_Word version;
6570 /* Pointer to the DSBT table, its size, and the DSBT index. */
6571 unsigned *dsbt_table;
6572 unsigned dsbt_size, dsbt_index;
6573 /* Number of segments in this map. */
6574 Elf32_Word nsegs;
6575 /* The actual memory map. */
6576 struct target_loadseg segs[/*nsegs*/];
6577};
6578# define LINUX_LOADMAP PT_GETDSBT
6579# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6580# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6581# else
6582struct target_loadmap
6583{
6584 /* Protocol version number, must be zero. */
6585 Elf32_Half version;
6586 /* Number of segments in this map. */
6587 Elf32_Half nsegs;
6588 /* The actual memory map. */
6589 struct target_loadseg segs[/*nsegs*/];
6590};
6591# define LINUX_LOADMAP PTRACE_GETFDPIC
6592# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6593# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6594# endif
6595
6596static int
6597linux_read_loadmap (const char *annex, CORE_ADDR offset,
6598 unsigned char *myaddr, unsigned int len)
6599{
6600 int pid = lwpid_of (current_thread);
6601 int addr = -1;
6602 struct target_loadmap *data = NULL;
6603 unsigned int actual_length, copy_length;
6604
6605 if (strcmp (annex, "exec") == 0)
6606 addr = (int) LINUX_LOADMAP_EXEC;
6607 else if (strcmp (annex, "interp") == 0)
6608 addr = (int) LINUX_LOADMAP_INTERP;
6609 else
6610 return -1;
6611
6612 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6613 return -1;
6614
6615 if (data == NULL)
6616 return -1;
6617
6618 actual_length = sizeof (struct target_loadmap)
6619 + sizeof (struct target_loadseg) * data->nsegs;
6620
6621 if (offset < 0 || offset > actual_length)
6622 return -1;
6623
6624 copy_length = actual_length - offset < len ? actual_length - offset : len;
6625 memcpy (myaddr, (char *) data + offset, copy_length);
6626 return copy_length;
6627}
6628#else
6629# define linux_read_loadmap NULL
6630#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6631
6632static void
6633linux_process_qsupported (char **features, int count)
6634{
6635 if (the_low_target.process_qsupported != NULL)
6636 the_low_target.process_qsupported (features, count);
6637}
6638
6639static int
6640linux_supports_catch_syscall (void)
6641{
6642 return (the_low_target.get_syscall_trapinfo != NULL
6643 && linux_supports_tracesysgood ());
6644}
6645
6646static int
6647linux_get_ipa_tdesc_idx (void)
6648{
6649 if (the_low_target.get_ipa_tdesc_idx == NULL)
6650 return 0;
6651
6652 return (*the_low_target.get_ipa_tdesc_idx) ();
6653}
6654
6655static int
6656linux_supports_tracepoints (void)
6657{
6658 if (*the_low_target.supports_tracepoints == NULL)
6659 return 0;
6660
6661 return (*the_low_target.supports_tracepoints) ();
6662}
6663
6664static CORE_ADDR
6665linux_read_pc (struct regcache *regcache)
6666{
6667 if (the_low_target.get_pc == NULL)
6668 return 0;
6669
6670 return (*the_low_target.get_pc) (regcache);
6671}
6672
6673static void
6674linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6675{
6676 gdb_assert (the_low_target.set_pc != NULL);
6677
6678 (*the_low_target.set_pc) (regcache, pc);
6679}
6680
6681static int
6682linux_thread_stopped (struct thread_info *thread)
6683{
6684 return get_thread_lwp (thread)->stopped;
6685}
6686
6687/* This exposes stop-all-threads functionality to other modules. */
6688
6689static void
6690linux_pause_all (int freeze)
6691{
6692 stop_all_lwps (freeze, NULL);
6693}
6694
6695/* This exposes unstop-all-threads functionality to other gdbserver
6696 modules. */
6697
6698static void
6699linux_unpause_all (int unfreeze)
6700{
6701 unstop_all_lwps (unfreeze, NULL);
6702}
6703
6704static int
6705linux_prepare_to_access_memory (void)
6706{
6707 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6708 running LWP. */
6709 if (non_stop)
6710 linux_pause_all (1);
6711 return 0;
6712}
6713
6714static void
6715linux_done_accessing_memory (void)
6716{
6717 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6718 running LWP. */
6719 if (non_stop)
6720 linux_unpause_all (1);
6721}
6722
6723static int
6724linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6725 CORE_ADDR collector,
6726 CORE_ADDR lockaddr,
6727 ULONGEST orig_size,
6728 CORE_ADDR *jump_entry,
6729 CORE_ADDR *trampoline,
6730 ULONGEST *trampoline_size,
6731 unsigned char *jjump_pad_insn,
6732 ULONGEST *jjump_pad_insn_size,
6733 CORE_ADDR *adjusted_insn_addr,
6734 CORE_ADDR *adjusted_insn_addr_end,
6735 char *err)
6736{
6737 return (*the_low_target.install_fast_tracepoint_jump_pad)
6738 (tpoint, tpaddr, collector, lockaddr, orig_size,
6739 jump_entry, trampoline, trampoline_size,
6740 jjump_pad_insn, jjump_pad_insn_size,
6741 adjusted_insn_addr, adjusted_insn_addr_end,
6742 err);
6743}
6744
6745static struct emit_ops *
6746linux_emit_ops (void)
6747{
6748 if (the_low_target.emit_ops != NULL)
6749 return (*the_low_target.emit_ops) ();
6750 else
6751 return NULL;
6752}
6753
6754static int
6755linux_get_min_fast_tracepoint_insn_len (void)
6756{
6757 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6758}
6759
6760/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6761
6762static int
6763get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6764 CORE_ADDR *phdr_memaddr, int *num_phdr)
6765{
6766 char filename[PATH_MAX];
6767 int fd;
6768 const int auxv_size = is_elf64
6769 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6770 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6771
6772 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6773
6774 fd = open (filename, O_RDONLY);
6775 if (fd < 0)
6776 return 1;
6777
6778 *phdr_memaddr = 0;
6779 *num_phdr = 0;
6780 while (read (fd, buf, auxv_size) == auxv_size
6781 && (*phdr_memaddr == 0 || *num_phdr == 0))
6782 {
6783 if (is_elf64)
6784 {
6785 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6786
6787 switch (aux->a_type)
6788 {
6789 case AT_PHDR:
6790 *phdr_memaddr = aux->a_un.a_val;
6791 break;
6792 case AT_PHNUM:
6793 *num_phdr = aux->a_un.a_val;
6794 break;
6795 }
6796 }
6797 else
6798 {
6799 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6800
6801 switch (aux->a_type)
6802 {
6803 case AT_PHDR:
6804 *phdr_memaddr = aux->a_un.a_val;
6805 break;
6806 case AT_PHNUM:
6807 *num_phdr = aux->a_un.a_val;
6808 break;
6809 }
6810 }
6811 }
6812
6813 close (fd);
6814
6815 if (*phdr_memaddr == 0 || *num_phdr == 0)
6816 {
6817 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6818 "phdr_memaddr = %ld, phdr_num = %d",
6819 (long) *phdr_memaddr, *num_phdr);
6820 return 2;
6821 }
6822
6823 return 0;
6824}
6825
6826/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6827
6828static CORE_ADDR
6829get_dynamic (const int pid, const int is_elf64)
6830{
6831 CORE_ADDR phdr_memaddr, relocation;
6832 int num_phdr, i;
6833 unsigned char *phdr_buf;
6834 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6835
6836 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6837 return 0;
6838
6839 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6840 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6841
6842 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6843 return 0;
6844
6845 /* Compute relocation: it is expected to be 0 for "regular" executables,
6846 non-zero for PIE ones. */
6847 relocation = -1;
6848 for (i = 0; relocation == -1 && i < num_phdr; i++)
6849 if (is_elf64)
6850 {
6851 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6852
6853 if (p->p_type == PT_PHDR)
6854 relocation = phdr_memaddr - p->p_vaddr;
6855 }
6856 else
6857 {
6858 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6859
6860 if (p->p_type == PT_PHDR)
6861 relocation = phdr_memaddr - p->p_vaddr;
6862 }
6863
6864 if (relocation == -1)
6865 {
6866 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6867 any real world executables, including PIE executables, have always
6868 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6869 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6870 or present DT_DEBUG anyway (fpc binaries are statically linked).
6871
6872 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6873
6874 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6875
6876 return 0;
6877 }
6878
6879 for (i = 0; i < num_phdr; i++)
6880 {
6881 if (is_elf64)
6882 {
6883 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6884
6885 if (p->p_type == PT_DYNAMIC)
6886 return p->p_vaddr + relocation;
6887 }
6888 else
6889 {
6890 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6891
6892 if (p->p_type == PT_DYNAMIC)
6893 return p->p_vaddr + relocation;
6894 }
6895 }
6896
6897 return 0;
6898}
6899
6900/* Return &_r_debug in the inferior, or -1 if not present. Return value
6901 can be 0 if the inferior does not yet have the library list initialized.
6902 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6903 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6904
6905static CORE_ADDR
6906get_r_debug (const int pid, const int is_elf64)
6907{
6908 CORE_ADDR dynamic_memaddr;
6909 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6910 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6911 CORE_ADDR map = -1;
6912
6913 dynamic_memaddr = get_dynamic (pid, is_elf64);
6914 if (dynamic_memaddr == 0)
6915 return map;
6916
6917 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6918 {
6919 if (is_elf64)
6920 {
6921 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6922#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6923 union
6924 {
6925 Elf64_Xword map;
6926 unsigned char buf[sizeof (Elf64_Xword)];
6927 }
6928 rld_map;
6929#endif
6930#ifdef DT_MIPS_RLD_MAP
6931 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6932 {
6933 if (linux_read_memory (dyn->d_un.d_val,
6934 rld_map.buf, sizeof (rld_map.buf)) == 0)
6935 return rld_map.map;
6936 else
6937 break;
6938 }
6939#endif /* DT_MIPS_RLD_MAP */
6940#ifdef DT_MIPS_RLD_MAP_REL
6941 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6942 {
6943 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6944 rld_map.buf, sizeof (rld_map.buf)) == 0)
6945 return rld_map.map;
6946 else
6947 break;
6948 }
6949#endif /* DT_MIPS_RLD_MAP_REL */
6950
6951 if (dyn->d_tag == DT_DEBUG && map == -1)
6952 map = dyn->d_un.d_val;
6953
6954 if (dyn->d_tag == DT_NULL)
6955 break;
6956 }
6957 else
6958 {
6959 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6960#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6961 union
6962 {
6963 Elf32_Word map;
6964 unsigned char buf[sizeof (Elf32_Word)];
6965 }
6966 rld_map;
6967#endif
6968#ifdef DT_MIPS_RLD_MAP
6969 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6970 {
6971 if (linux_read_memory (dyn->d_un.d_val,
6972 rld_map.buf, sizeof (rld_map.buf)) == 0)
6973 return rld_map.map;
6974 else
6975 break;
6976 }
6977#endif /* DT_MIPS_RLD_MAP */
6978#ifdef DT_MIPS_RLD_MAP_REL
6979 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6980 {
6981 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6982 rld_map.buf, sizeof (rld_map.buf)) == 0)
6983 return rld_map.map;
6984 else
6985 break;
6986 }
6987#endif /* DT_MIPS_RLD_MAP_REL */
6988
6989 if (dyn->d_tag == DT_DEBUG && map == -1)
6990 map = dyn->d_un.d_val;
6991
6992 if (dyn->d_tag == DT_NULL)
6993 break;
6994 }
6995
6996 dynamic_memaddr += dyn_size;
6997 }
6998
6999 return map;
7000}
7001
7002/* Read one pointer from MEMADDR in the inferior. */
7003
7004static int
7005read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
7006{
7007 int ret;
7008
7009 /* Go through a union so this works on either big or little endian
7010 hosts, when the inferior's pointer size is smaller than the size
7011 of CORE_ADDR. It is assumed the inferior's endianness is the
7012 same of the superior's. */
7013 union
7014 {
7015 CORE_ADDR core_addr;
7016 unsigned int ui;
7017 unsigned char uc;
7018 } addr;
7019
7020 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
7021 if (ret == 0)
7022 {
7023 if (ptr_size == sizeof (CORE_ADDR))
7024 *ptr = addr.core_addr;
7025 else if (ptr_size == sizeof (unsigned int))
7026 *ptr = addr.ui;
7027 else
7028 gdb_assert_not_reached ("unhandled pointer size");
7029 }
7030 return ret;
7031}
7032
7033struct link_map_offsets
7034 {
7035 /* Offset and size of r_debug.r_version. */
7036 int r_version_offset;
7037
7038 /* Offset and size of r_debug.r_map. */
7039 int r_map_offset;
7040
7041 /* Offset to l_addr field in struct link_map. */
7042 int l_addr_offset;
7043
7044 /* Offset to l_name field in struct link_map. */
7045 int l_name_offset;
7046
7047 /* Offset to l_ld field in struct link_map. */
7048 int l_ld_offset;
7049
7050 /* Offset to l_next field in struct link_map. */
7051 int l_next_offset;
7052
7053 /* Offset to l_prev field in struct link_map. */
7054 int l_prev_offset;
7055 };
7056
7057/* Construct qXfer:libraries-svr4:read reply. */
7058
7059static int
7060linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7061 unsigned const char *writebuf,
7062 CORE_ADDR offset, int len)
7063{
7064 char *document;
7065 unsigned document_len;
7066 struct process_info_private *const priv = current_process ()->priv;
7067 char filename[PATH_MAX];
7068 int pid, is_elf64;
7069
7070 static const struct link_map_offsets lmo_32bit_offsets =
7071 {
7072 0, /* r_version offset. */
7073 4, /* r_debug.r_map offset. */
7074 0, /* l_addr offset in link_map. */
7075 4, /* l_name offset in link_map. */
7076 8, /* l_ld offset in link_map. */
7077 12, /* l_next offset in link_map. */
7078 16 /* l_prev offset in link_map. */
7079 };
7080
7081 static const struct link_map_offsets lmo_64bit_offsets =
7082 {
7083 0, /* r_version offset. */
7084 8, /* r_debug.r_map offset. */
7085 0, /* l_addr offset in link_map. */
7086 8, /* l_name offset in link_map. */
7087 16, /* l_ld offset in link_map. */
7088 24, /* l_next offset in link_map. */
7089 32 /* l_prev offset in link_map. */
7090 };
7091 const struct link_map_offsets *lmo;
7092 unsigned int machine;
7093 int ptr_size;
7094 CORE_ADDR lm_addr = 0, lm_prev = 0;
7095 int allocated = 1024;
7096 char *p;
7097 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7098 int header_done = 0;
7099
7100 if (writebuf != NULL)
7101 return -2;
7102 if (readbuf == NULL)
7103 return -1;
7104
7105 pid = lwpid_of (current_thread);
7106 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7107 is_elf64 = elf_64_file_p (filename, &machine);
7108 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7109 ptr_size = is_elf64 ? 8 : 4;
7110
7111 while (annex[0] != '\0')
7112 {
7113 const char *sep;
7114 CORE_ADDR *addrp;
7115 int len;
7116
7117 sep = strchr (annex, '=');
7118 if (sep == NULL)
7119 break;
7120
7121 len = sep - annex;
7122 if (len == 5 && startswith (annex, "start"))
7123 addrp = &lm_addr;
7124 else if (len == 4 && startswith (annex, "prev"))
7125 addrp = &lm_prev;
7126 else
7127 {
7128 annex = strchr (sep, ';');
7129 if (annex == NULL)
7130 break;
7131 annex++;
7132 continue;
7133 }
7134
7135 annex = decode_address_to_semicolon (addrp, sep + 1);
7136 }
7137
7138 if (lm_addr == 0)
7139 {
7140 int r_version = 0;
7141
7142 if (priv->r_debug == 0)
7143 priv->r_debug = get_r_debug (pid, is_elf64);
7144
7145 /* We failed to find DT_DEBUG. Such situation will not change
7146 for this inferior - do not retry it. Report it to GDB as
7147 E01, see for the reasons at the GDB solib-svr4.c side. */
7148 if (priv->r_debug == (CORE_ADDR) -1)
7149 return -1;
7150
7151 if (priv->r_debug != 0)
7152 {
7153 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7154 (unsigned char *) &r_version,
7155 sizeof (r_version)) != 0
7156 || r_version != 1)
7157 {
7158 warning ("unexpected r_debug version %d", r_version);
7159 }
7160 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7161 &lm_addr, ptr_size) != 0)
7162 {
7163 warning ("unable to read r_map from 0x%lx",
7164 (long) priv->r_debug + lmo->r_map_offset);
7165 }
7166 }
7167 }
7168
7169 document = (char *) xmalloc (allocated);
7170 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7171 p = document + strlen (document);
7172
7173 while (lm_addr
7174 && read_one_ptr (lm_addr + lmo->l_name_offset,
7175 &l_name, ptr_size) == 0
7176 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7177 &l_addr, ptr_size) == 0
7178 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7179 &l_ld, ptr_size) == 0
7180 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7181 &l_prev, ptr_size) == 0
7182 && read_one_ptr (lm_addr + lmo->l_next_offset,
7183 &l_next, ptr_size) == 0)
7184 {
7185 unsigned char libname[PATH_MAX];
7186
7187 if (lm_prev != l_prev)
7188 {
7189 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7190 (long) lm_prev, (long) l_prev);
7191 break;
7192 }
7193
7194 /* Ignore the first entry even if it has valid name as the first entry
7195 corresponds to the main executable. The first entry should not be
7196 skipped if the dynamic loader was loaded late by a static executable
7197 (see solib-svr4.c parameter ignore_first). But in such case the main
7198 executable does not have PT_DYNAMIC present and this function already
7199 exited above due to failed get_r_debug. */
7200 if (lm_prev == 0)
7201 {
7202 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7203 p = p + strlen (p);
7204 }
7205 else
7206 {
7207 /* Not checking for error because reading may stop before
7208 we've got PATH_MAX worth of characters. */
7209 libname[0] = '\0';
7210 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7211 libname[sizeof (libname) - 1] = '\0';
7212 if (libname[0] != '\0')
7213 {
7214 /* 6x the size for xml_escape_text below. */
7215 size_t len = 6 * strlen ((char *) libname);
7216
7217 if (!header_done)
7218 {
7219 /* Terminate `<library-list-svr4'. */
7220 *p++ = '>';
7221 header_done = 1;
7222 }
7223
7224 while (allocated < p - document + len + 200)
7225 {
7226 /* Expand to guarantee sufficient storage. */
7227 uintptr_t document_len = p - document;
7228
7229 document = (char *) xrealloc (document, 2 * allocated);
7230 allocated *= 2;
7231 p = document + document_len;
7232 }
7233
7234 std::string name = xml_escape_text ((char *) libname);
7235 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7236 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7237 name.c_str (), (unsigned long) lm_addr,
7238 (unsigned long) l_addr, (unsigned long) l_ld);
7239 }
7240 }
7241
7242 lm_prev = lm_addr;
7243 lm_addr = l_next;
7244 }
7245
7246 if (!header_done)
7247 {
7248 /* Empty list; terminate `<library-list-svr4'. */
7249 strcpy (p, "/>");
7250 }
7251 else
7252 strcpy (p, "</library-list-svr4>");
7253
7254 document_len = strlen (document);
7255 if (offset < document_len)
7256 document_len -= offset;
7257 else
7258 document_len = 0;
7259 if (len > document_len)
7260 len = document_len;
7261
7262 memcpy (readbuf, document + offset, len);
7263 xfree (document);
7264
7265 return len;
7266}
7267
7268#ifdef HAVE_LINUX_BTRACE
7269
7270/* See to_disable_btrace target method. */
7271
7272static int
7273linux_low_disable_btrace (struct btrace_target_info *tinfo)
7274{
7275 enum btrace_error err;
7276
7277 err = linux_disable_btrace (tinfo);
7278 return (err == BTRACE_ERR_NONE ? 0 : -1);
7279}
7280
7281/* Encode an Intel Processor Trace configuration. */
7282
7283static void
7284linux_low_encode_pt_config (struct buffer *buffer,
7285 const struct btrace_data_pt_config *config)
7286{
7287 buffer_grow_str (buffer, "<pt-config>\n");
7288
7289 switch (config->cpu.vendor)
7290 {
7291 case CV_INTEL:
7292 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7293 "model=\"%u\" stepping=\"%u\"/>\n",
7294 config->cpu.family, config->cpu.model,
7295 config->cpu.stepping);
7296 break;
7297
7298 default:
7299 break;
7300 }
7301
7302 buffer_grow_str (buffer, "</pt-config>\n");
7303}
7304
7305/* Encode a raw buffer. */
7306
7307static void
7308linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7309 unsigned int size)
7310{
7311 if (size == 0)
7312 return;
7313
7314 /* We use hex encoding - see common/rsp-low.h. */
7315 buffer_grow_str (buffer, "<raw>\n");
7316
7317 while (size-- > 0)
7318 {
7319 char elem[2];
7320
7321 elem[0] = tohex ((*data >> 4) & 0xf);
7322 elem[1] = tohex (*data++ & 0xf);
7323
7324 buffer_grow (buffer, elem, 2);
7325 }
7326
7327 buffer_grow_str (buffer, "</raw>\n");
7328}
7329
7330/* See to_read_btrace target method. */
7331
7332static int
7333linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7334 enum btrace_read_type type)
7335{
7336 struct btrace_data btrace;
7337 struct btrace_block *block;
7338 enum btrace_error err;
7339 int i;
7340
7341 btrace_data_init (&btrace);
7342
7343 err = linux_read_btrace (&btrace, tinfo, type);
7344 if (err != BTRACE_ERR_NONE)
7345 {
7346 if (err == BTRACE_ERR_OVERFLOW)
7347 buffer_grow_str0 (buffer, "E.Overflow.");
7348 else
7349 buffer_grow_str0 (buffer, "E.Generic Error.");
7350
7351 goto err;
7352 }
7353
7354 switch (btrace.format)
7355 {
7356 case BTRACE_FORMAT_NONE:
7357 buffer_grow_str0 (buffer, "E.No Trace.");
7358 goto err;
7359
7360 case BTRACE_FORMAT_BTS:
7361 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7362 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7363
7364 for (i = 0;
7365 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7366 i++)
7367 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7368 paddress (block->begin), paddress (block->end));
7369
7370 buffer_grow_str0 (buffer, "</btrace>\n");
7371 break;
7372
7373 case BTRACE_FORMAT_PT:
7374 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7375 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7376 buffer_grow_str (buffer, "<pt>\n");
7377
7378 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7379
7380 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7381 btrace.variant.pt.size);
7382
7383 buffer_grow_str (buffer, "</pt>\n");
7384 buffer_grow_str0 (buffer, "</btrace>\n");
7385 break;
7386
7387 default:
7388 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7389 goto err;
7390 }
7391
7392 btrace_data_fini (&btrace);
7393 return 0;
7394
7395err:
7396 btrace_data_fini (&btrace);
7397 return -1;
7398}
7399
7400/* See to_btrace_conf target method. */
7401
7402static int
7403linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7404 struct buffer *buffer)
7405{
7406 const struct btrace_config *conf;
7407
7408 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7409 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7410
7411 conf = linux_btrace_conf (tinfo);
7412 if (conf != NULL)
7413 {
7414 switch (conf->format)
7415 {
7416 case BTRACE_FORMAT_NONE:
7417 break;
7418
7419 case BTRACE_FORMAT_BTS:
7420 buffer_xml_printf (buffer, "<bts");
7421 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7422 buffer_xml_printf (buffer, " />\n");
7423 break;
7424
7425 case BTRACE_FORMAT_PT:
7426 buffer_xml_printf (buffer, "<pt");
7427 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7428 buffer_xml_printf (buffer, "/>\n");
7429 break;
7430 }
7431 }
7432
7433 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7434 return 0;
7435}
7436#endif /* HAVE_LINUX_BTRACE */
7437
7438/* See nat/linux-nat.h. */
7439
7440ptid_t
7441current_lwp_ptid (void)
7442{
7443 return ptid_of (current_thread);
7444}
7445
7446/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7447
7448static int
7449linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7450{
7451 if (the_low_target.breakpoint_kind_from_pc != NULL)
7452 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7453 else
7454 return default_breakpoint_kind_from_pc (pcptr);
7455}
7456
7457/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7458
7459static const gdb_byte *
7460linux_sw_breakpoint_from_kind (int kind, int *size)
7461{
7462 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7463
7464 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7465}
7466
7467/* Implementation of the target_ops method
7468 "breakpoint_kind_from_current_state". */
7469
7470static int
7471linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7472{
7473 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7474 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7475 else
7476 return linux_breakpoint_kind_from_pc (pcptr);
7477}
7478
7479/* Default implementation of linux_target_ops method "set_pc" for
7480 32-bit pc register which is literally named "pc". */
7481
7482void
7483linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7484{
7485 uint32_t newpc = pc;
7486
7487 supply_register_by_name (regcache, "pc", &newpc);
7488}
7489
7490/* Default implementation of linux_target_ops method "get_pc" for
7491 32-bit pc register which is literally named "pc". */
7492
7493CORE_ADDR
7494linux_get_pc_32bit (struct regcache *regcache)
7495{
7496 uint32_t pc;
7497
7498 collect_register_by_name (regcache, "pc", &pc);
7499 if (debug_threads)
7500 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7501 return pc;
7502}
7503
7504/* Default implementation of linux_target_ops method "set_pc" for
7505 64-bit pc register which is literally named "pc". */
7506
7507void
7508linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7509{
7510 uint64_t newpc = pc;
7511
7512 supply_register_by_name (regcache, "pc", &newpc);
7513}
7514
7515/* Default implementation of linux_target_ops method "get_pc" for
7516 64-bit pc register which is literally named "pc". */
7517
7518CORE_ADDR
7519linux_get_pc_64bit (struct regcache *regcache)
7520{
7521 uint64_t pc;
7522
7523 collect_register_by_name (regcache, "pc", &pc);
7524 if (debug_threads)
7525 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7526 return pc;
7527}
7528
7529
7530static struct target_ops linux_target_ops = {
7531 linux_create_inferior,
7532 linux_post_create_inferior,
7533 linux_attach,
7534 linux_kill,
7535 linux_detach,
7536 linux_mourn,
7537 linux_join,
7538 linux_thread_alive,
7539 linux_resume,
7540 linux_wait,
7541 linux_fetch_registers,
7542 linux_store_registers,
7543 linux_prepare_to_access_memory,
7544 linux_done_accessing_memory,
7545 linux_read_memory,
7546 linux_write_memory,
7547 linux_look_up_symbols,
7548 linux_request_interrupt,
7549 linux_read_auxv,
7550 linux_supports_z_point_type,
7551 linux_insert_point,
7552 linux_remove_point,
7553 linux_stopped_by_sw_breakpoint,
7554 linux_supports_stopped_by_sw_breakpoint,
7555 linux_stopped_by_hw_breakpoint,
7556 linux_supports_stopped_by_hw_breakpoint,
7557 linux_supports_hardware_single_step,
7558 linux_stopped_by_watchpoint,
7559 linux_stopped_data_address,
7560#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7561 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7562 && defined(PT_TEXT_END_ADDR)
7563 linux_read_offsets,
7564#else
7565 NULL,
7566#endif
7567#ifdef USE_THREAD_DB
7568 thread_db_get_tls_address,
7569#else
7570 NULL,
7571#endif
7572 linux_qxfer_spu,
7573 hostio_last_error_from_errno,
7574 linux_qxfer_osdata,
7575 linux_xfer_siginfo,
7576 linux_supports_non_stop,
7577 linux_async,
7578 linux_start_non_stop,
7579 linux_supports_multi_process,
7580 linux_supports_fork_events,
7581 linux_supports_vfork_events,
7582 linux_supports_exec_events,
7583 linux_handle_new_gdb_connection,
7584#ifdef USE_THREAD_DB
7585 thread_db_handle_monitor_command,
7586#else
7587 NULL,
7588#endif
7589 linux_common_core_of_thread,
7590 linux_read_loadmap,
7591 linux_process_qsupported,
7592 linux_supports_tracepoints,
7593 linux_read_pc,
7594 linux_write_pc,
7595 linux_thread_stopped,
7596 NULL,
7597 linux_pause_all,
7598 linux_unpause_all,
7599 linux_stabilize_threads,
7600 linux_install_fast_tracepoint_jump_pad,
7601 linux_emit_ops,
7602 linux_supports_disable_randomization,
7603 linux_get_min_fast_tracepoint_insn_len,
7604 linux_qxfer_libraries_svr4,
7605 linux_supports_agent,
7606#ifdef HAVE_LINUX_BTRACE
7607 linux_supports_btrace,
7608 linux_enable_btrace,
7609 linux_low_disable_btrace,
7610 linux_low_read_btrace,
7611 linux_low_btrace_conf,
7612#else
7613 NULL,
7614 NULL,
7615 NULL,
7616 NULL,
7617 NULL,
7618#endif
7619 linux_supports_range_stepping,
7620 linux_proc_pid_to_exec_file,
7621 linux_mntns_open_cloexec,
7622 linux_mntns_unlink,
7623 linux_mntns_readlink,
7624 linux_breakpoint_kind_from_pc,
7625 linux_sw_breakpoint_from_kind,
7626 linux_proc_tid_get_name,
7627 linux_breakpoint_kind_from_current_state,
7628 linux_supports_software_single_step,
7629 linux_supports_catch_syscall,
7630 linux_get_ipa_tdesc_idx,
7631#if USE_THREAD_DB
7632 thread_db_thread_handle,
7633#else
7634 NULL,
7635#endif
7636};
7637
7638#ifdef HAVE_LINUX_REGSETS
7639void
7640initialize_regsets_info (struct regsets_info *info)
7641{
7642 for (info->num_regsets = 0;
7643 info->regsets[info->num_regsets].size >= 0;
7644 info->num_regsets++)
7645 ;
7646}
7647#endif
7648
7649void
7650initialize_low (void)
7651{
7652 struct sigaction sigchld_action;
7653
7654 memset (&sigchld_action, 0, sizeof (sigchld_action));
7655 set_target_ops (&linux_target_ops);
7656
7657 linux_ptrace_init_warnings ();
7658
7659 sigchld_action.sa_handler = sigchld_handler;
7660 sigemptyset (&sigchld_action.sa_mask);
7661 sigchld_action.sa_flags = SA_RESTART;
7662 sigaction (SIGCHLD, &sigchld_action, NULL);
7663
7664 initialize_low_arch ();
7665
7666 linux_check_ptrace_features ();
7667}
This page took 0.089513 seconds and 4 git commands to generate.