gdbserver/linux-low: turn 'prepare_to_resume' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void unsuspend_all_lwps (struct lwp_info *except);
271 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
272 static int lwp_is_marked_dead (struct lwp_info *lwp);
273 static int finish_step_over (struct lwp_info *lwp);
274 static int kill_lwp (unsigned long lwpid, int signo);
275 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
276 static int linux_low_ptrace_options (int attached);
277 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
278
279 /* When the event-loop is doing a step-over, this points at the thread
280 being stepped. */
281 ptid_t step_over_bkpt;
282
283 /* True if the low target can hardware single-step. */
284
285 static int
286 can_hardware_single_step (void)
287 {
288 if (the_low_target.supports_hardware_single_step != NULL)
289 return the_low_target.supports_hardware_single_step ();
290 else
291 return 0;
292 }
293
294 bool
295 linux_process_target::low_supports_breakpoints ()
296 {
297 return false;
298 }
299
300 CORE_ADDR
301 linux_process_target::low_get_pc (regcache *regcache)
302 {
303 return 0;
304 }
305
306 void
307 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
308 {
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
310 }
311
312 std::vector<CORE_ADDR>
313 linux_process_target::low_get_next_pcs (regcache *regcache)
314 {
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
317 }
318
319 int
320 linux_process_target::low_decr_pc_after_break ()
321 {
322 return 0;
323 }
324
325 /* Returns true if this target can support fast tracepoints. This
326 does not mean that the in-process agent has been loaded in the
327 inferior. */
328
329 static int
330 supports_fast_tracepoints (void)
331 {
332 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
333 }
334
335 /* True if LWP is stopped in its stepping range. */
336
337 static int
338 lwp_in_step_range (struct lwp_info *lwp)
339 {
340 CORE_ADDR pc = lwp->stop_pc;
341
342 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
343 }
344
345 struct pending_signals
346 {
347 int signal;
348 siginfo_t info;
349 struct pending_signals *prev;
350 };
351
352 /* The read/write ends of the pipe registered as waitable file in the
353 event loop. */
354 static int linux_event_pipe[2] = { -1, -1 };
355
356 /* True if we're currently in async mode. */
357 #define target_is_async_p() (linux_event_pipe[0] != -1)
358
359 static void send_sigstop (struct lwp_info *lwp);
360
361 /* Return non-zero if HEADER is a 64-bit ELF file. */
362
363 static int
364 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
365 {
366 if (header->e_ident[EI_MAG0] == ELFMAG0
367 && header->e_ident[EI_MAG1] == ELFMAG1
368 && header->e_ident[EI_MAG2] == ELFMAG2
369 && header->e_ident[EI_MAG3] == ELFMAG3)
370 {
371 *machine = header->e_machine;
372 return header->e_ident[EI_CLASS] == ELFCLASS64;
373
374 }
375 *machine = EM_NONE;
376 return -1;
377 }
378
379 /* Return non-zero if FILE is a 64-bit ELF file,
380 zero if the file is not a 64-bit ELF file,
381 and -1 if the file is not accessible or doesn't exist. */
382
383 static int
384 elf_64_file_p (const char *file, unsigned int *machine)
385 {
386 Elf64_Ehdr header;
387 int fd;
388
389 fd = open (file, O_RDONLY);
390 if (fd < 0)
391 return -1;
392
393 if (read (fd, &header, sizeof (header)) != sizeof (header))
394 {
395 close (fd);
396 return 0;
397 }
398 close (fd);
399
400 return elf_64_header_p (&header, machine);
401 }
402
403 /* Accepts an integer PID; Returns true if the executable PID is
404 running is a 64-bit ELF file.. */
405
406 int
407 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
408 {
409 char file[PATH_MAX];
410
411 sprintf (file, "/proc/%d/exe", pid);
412 return elf_64_file_p (file, machine);
413 }
414
415 void
416 linux_process_target::delete_lwp (lwp_info *lwp)
417 {
418 struct thread_info *thr = get_lwp_thread (lwp);
419
420 if (debug_threads)
421 debug_printf ("deleting %ld\n", lwpid_of (thr));
422
423 remove_thread (thr);
424
425 low_delete_thread (lwp->arch_private);
426
427 free (lwp);
428 }
429
430 void
431 linux_process_target::low_delete_thread (arch_lwp_info *info)
432 {
433 /* Default implementation should be overridden if architecture-specific
434 info is being used. */
435 gdb_assert (info == nullptr);
436 }
437
438 process_info *
439 linux_process_target::add_linux_process (int pid, int attached)
440 {
441 struct process_info *proc;
442
443 proc = add_process (pid, attached);
444 proc->priv = XCNEW (struct process_info_private);
445
446 proc->priv->arch_private = low_new_process ();
447
448 return proc;
449 }
450
451 arch_process_info *
452 linux_process_target::low_new_process ()
453 {
454 return nullptr;
455 }
456
457 void
458 linux_process_target::low_delete_process (arch_process_info *info)
459 {
460 /* Default implementation must be overridden if architecture-specific
461 info exists. */
462 gdb_assert (info == nullptr);
463 }
464
465 void
466 linux_process_target::low_new_fork (process_info *parent, process_info *child)
467 {
468 /* Nop. */
469 }
470
471 void
472 linux_process_target::arch_setup_thread (thread_info *thread)
473 {
474 struct thread_info *saved_thread;
475
476 saved_thread = current_thread;
477 current_thread = thread;
478
479 low_arch_setup ();
480
481 current_thread = saved_thread;
482 }
483
484 int
485 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
486 int wstat)
487 {
488 client_state &cs = get_client_state ();
489 struct lwp_info *event_lwp = *orig_event_lwp;
490 int event = linux_ptrace_get_extended_event (wstat);
491 struct thread_info *event_thr = get_lwp_thread (event_lwp);
492 struct lwp_info *new_lwp;
493
494 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
495
496 /* All extended events we currently use are mid-syscall. Only
497 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
498 you have to be using PTRACE_SEIZE to get that. */
499 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
500
501 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
502 || (event == PTRACE_EVENT_CLONE))
503 {
504 ptid_t ptid;
505 unsigned long new_pid;
506 int ret, status;
507
508 /* Get the pid of the new lwp. */
509 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
510 &new_pid);
511
512 /* If we haven't already seen the new PID stop, wait for it now. */
513 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
514 {
515 /* The new child has a pending SIGSTOP. We can't affect it until it
516 hits the SIGSTOP, but we're already attached. */
517
518 ret = my_waitpid (new_pid, &status, __WALL);
519
520 if (ret == -1)
521 perror_with_name ("waiting for new child");
522 else if (ret != new_pid)
523 warning ("wait returned unexpected PID %d", ret);
524 else if (!WIFSTOPPED (status))
525 warning ("wait returned unexpected status 0x%x", status);
526 }
527
528 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
529 {
530 struct process_info *parent_proc;
531 struct process_info *child_proc;
532 struct lwp_info *child_lwp;
533 struct thread_info *child_thr;
534 struct target_desc *tdesc;
535
536 ptid = ptid_t (new_pid, new_pid, 0);
537
538 if (debug_threads)
539 {
540 debug_printf ("HEW: Got fork event from LWP %ld, "
541 "new child is %d\n",
542 ptid_of (event_thr).lwp (),
543 ptid.pid ());
544 }
545
546 /* Add the new process to the tables and clone the breakpoint
547 lists of the parent. We need to do this even if the new process
548 will be detached, since we will need the process object and the
549 breakpoints to remove any breakpoints from memory when we
550 detach, and the client side will access registers. */
551 child_proc = add_linux_process (new_pid, 0);
552 gdb_assert (child_proc != NULL);
553 child_lwp = add_lwp (ptid);
554 gdb_assert (child_lwp != NULL);
555 child_lwp->stopped = 1;
556 child_lwp->must_set_ptrace_flags = 1;
557 child_lwp->status_pending_p = 0;
558 child_thr = get_lwp_thread (child_lwp);
559 child_thr->last_resume_kind = resume_stop;
560 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
561
562 /* If we're suspending all threads, leave this one suspended
563 too. If the fork/clone parent is stepping over a breakpoint,
564 all other threads have been suspended already. Leave the
565 child suspended too. */
566 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
567 || event_lwp->bp_reinsert != 0)
568 {
569 if (debug_threads)
570 debug_printf ("HEW: leaving child suspended\n");
571 child_lwp->suspended = 1;
572 }
573
574 parent_proc = get_thread_process (event_thr);
575 child_proc->attached = parent_proc->attached;
576
577 if (event_lwp->bp_reinsert != 0
578 && supports_software_single_step ()
579 && event == PTRACE_EVENT_VFORK)
580 {
581 /* If we leave single-step breakpoints there, child will
582 hit it, so uninsert single-step breakpoints from parent
583 (and child). Once vfork child is done, reinsert
584 them back to parent. */
585 uninsert_single_step_breakpoints (event_thr);
586 }
587
588 clone_all_breakpoints (child_thr, event_thr);
589
590 tdesc = allocate_target_description ();
591 copy_target_description (tdesc, parent_proc->tdesc);
592 child_proc->tdesc = tdesc;
593
594 /* Clone arch-specific process data. */
595 low_new_fork (parent_proc, child_proc);
596
597 /* Save fork info in the parent thread. */
598 if (event == PTRACE_EVENT_FORK)
599 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
600 else if (event == PTRACE_EVENT_VFORK)
601 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
602
603 event_lwp->waitstatus.value.related_pid = ptid;
604
605 /* The status_pending field contains bits denoting the
606 extended event, so when the pending event is handled,
607 the handler will look at lwp->waitstatus. */
608 event_lwp->status_pending_p = 1;
609 event_lwp->status_pending = wstat;
610
611 /* Link the threads until the parent event is passed on to
612 higher layers. */
613 event_lwp->fork_relative = child_lwp;
614 child_lwp->fork_relative = event_lwp;
615
616 /* If the parent thread is doing step-over with single-step
617 breakpoints, the list of single-step breakpoints are cloned
618 from the parent's. Remove them from the child process.
619 In case of vfork, we'll reinsert them back once vforked
620 child is done. */
621 if (event_lwp->bp_reinsert != 0
622 && supports_software_single_step ())
623 {
624 /* The child process is forked and stopped, so it is safe
625 to access its memory without stopping all other threads
626 from other processes. */
627 delete_single_step_breakpoints (child_thr);
628
629 gdb_assert (has_single_step_breakpoints (event_thr));
630 gdb_assert (!has_single_step_breakpoints (child_thr));
631 }
632
633 /* Report the event. */
634 return 0;
635 }
636
637 if (debug_threads)
638 debug_printf ("HEW: Got clone event "
639 "from LWP %ld, new child is LWP %ld\n",
640 lwpid_of (event_thr), new_pid);
641
642 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
643 new_lwp = add_lwp (ptid);
644
645 /* Either we're going to immediately resume the new thread
646 or leave it stopped. resume_one_lwp is a nop if it
647 thinks the thread is currently running, so set this first
648 before calling resume_one_lwp. */
649 new_lwp->stopped = 1;
650
651 /* If we're suspending all threads, leave this one suspended
652 too. If the fork/clone parent is stepping over a breakpoint,
653 all other threads have been suspended already. Leave the
654 child suspended too. */
655 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
656 || event_lwp->bp_reinsert != 0)
657 new_lwp->suspended = 1;
658
659 /* Normally we will get the pending SIGSTOP. But in some cases
660 we might get another signal delivered to the group first.
661 If we do get another signal, be sure not to lose it. */
662 if (WSTOPSIG (status) != SIGSTOP)
663 {
664 new_lwp->stop_expected = 1;
665 new_lwp->status_pending_p = 1;
666 new_lwp->status_pending = status;
667 }
668 else if (cs.report_thread_events)
669 {
670 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
671 new_lwp->status_pending_p = 1;
672 new_lwp->status_pending = status;
673 }
674
675 #ifdef USE_THREAD_DB
676 thread_db_notice_clone (event_thr, ptid);
677 #endif
678
679 /* Don't report the event. */
680 return 1;
681 }
682 else if (event == PTRACE_EVENT_VFORK_DONE)
683 {
684 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
685
686 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
687 {
688 reinsert_single_step_breakpoints (event_thr);
689
690 gdb_assert (has_single_step_breakpoints (event_thr));
691 }
692
693 /* Report the event. */
694 return 0;
695 }
696 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
697 {
698 struct process_info *proc;
699 std::vector<int> syscalls_to_catch;
700 ptid_t event_ptid;
701 pid_t event_pid;
702
703 if (debug_threads)
704 {
705 debug_printf ("HEW: Got exec event from LWP %ld\n",
706 lwpid_of (event_thr));
707 }
708
709 /* Get the event ptid. */
710 event_ptid = ptid_of (event_thr);
711 event_pid = event_ptid.pid ();
712
713 /* Save the syscall list from the execing process. */
714 proc = get_thread_process (event_thr);
715 syscalls_to_catch = std::move (proc->syscalls_to_catch);
716
717 /* Delete the execing process and all its threads. */
718 mourn (proc);
719 current_thread = NULL;
720
721 /* Create a new process/lwp/thread. */
722 proc = add_linux_process (event_pid, 0);
723 event_lwp = add_lwp (event_ptid);
724 event_thr = get_lwp_thread (event_lwp);
725 gdb_assert (current_thread == event_thr);
726 arch_setup_thread (event_thr);
727
728 /* Set the event status. */
729 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
730 event_lwp->waitstatus.value.execd_pathname
731 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
732
733 /* Mark the exec status as pending. */
734 event_lwp->stopped = 1;
735 event_lwp->status_pending_p = 1;
736 event_lwp->status_pending = wstat;
737 event_thr->last_resume_kind = resume_continue;
738 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
739
740 /* Update syscall state in the new lwp, effectively mid-syscall too. */
741 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
742
743 /* Restore the list to catch. Don't rely on the client, which is free
744 to avoid sending a new list when the architecture doesn't change.
745 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
746 proc->syscalls_to_catch = std::move (syscalls_to_catch);
747
748 /* Report the event. */
749 *orig_event_lwp = event_lwp;
750 return 0;
751 }
752
753 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
754 }
755
756 CORE_ADDR
757 linux_process_target::get_pc (lwp_info *lwp)
758 {
759 struct thread_info *saved_thread;
760 struct regcache *regcache;
761 CORE_ADDR pc;
762
763 if (!low_supports_breakpoints ())
764 return 0;
765
766 saved_thread = current_thread;
767 current_thread = get_lwp_thread (lwp);
768
769 regcache = get_thread_regcache (current_thread, 1);
770 pc = low_get_pc (regcache);
771
772 if (debug_threads)
773 debug_printf ("pc is 0x%lx\n", (long) pc);
774
775 current_thread = saved_thread;
776 return pc;
777 }
778
779 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
780 Fill *SYSNO with the syscall nr trapped. */
781
782 static void
783 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
784 {
785 struct thread_info *saved_thread;
786 struct regcache *regcache;
787
788 if (the_low_target.get_syscall_trapinfo == NULL)
789 {
790 /* If we cannot get the syscall trapinfo, report an unknown
791 system call number. */
792 *sysno = UNKNOWN_SYSCALL;
793 return;
794 }
795
796 saved_thread = current_thread;
797 current_thread = get_lwp_thread (lwp);
798
799 regcache = get_thread_regcache (current_thread, 1);
800 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
801
802 if (debug_threads)
803 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
804
805 current_thread = saved_thread;
806 }
807
808 bool
809 linux_process_target::save_stop_reason (lwp_info *lwp)
810 {
811 CORE_ADDR pc;
812 CORE_ADDR sw_breakpoint_pc;
813 struct thread_info *saved_thread;
814 #if USE_SIGTRAP_SIGINFO
815 siginfo_t siginfo;
816 #endif
817
818 if (!low_supports_breakpoints ())
819 return false;
820
821 pc = get_pc (lwp);
822 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
823
824 /* breakpoint_at reads from the current thread. */
825 saved_thread = current_thread;
826 current_thread = get_lwp_thread (lwp);
827
828 #if USE_SIGTRAP_SIGINFO
829 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
830 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
831 {
832 if (siginfo.si_signo == SIGTRAP)
833 {
834 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
835 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
836 {
837 /* The si_code is ambiguous on this arch -- check debug
838 registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
841 }
842 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
843 {
844 /* If we determine the LWP stopped for a SW breakpoint,
845 trust it. Particularly don't check watchpoint
846 registers, because at least on s390, we'd find
847 stopped-by-watchpoint as long as there's a watchpoint
848 set. */
849 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
850 }
851 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
852 {
853 /* This can indicate either a hardware breakpoint or
854 hardware watchpoint. Check debug registers. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
857 }
858 else if (siginfo.si_code == TRAP_TRACE)
859 {
860 /* We may have single stepped an instruction that
861 triggered a watchpoint. In that case, on some
862 architectures (such as x86), instead of TRAP_HWBKPT,
863 si_code indicates TRAP_TRACE, and we need to check
864 the debug registers separately. */
865 if (!check_stopped_by_watchpoint (lwp))
866 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
867 }
868 }
869 }
870 #else
871 /* We may have just stepped a breakpoint instruction. E.g., in
872 non-stop mode, GDB first tells the thread A to step a range, and
873 then the user inserts a breakpoint inside the range. In that
874 case we need to report the breakpoint PC. */
875 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
876 && low_breakpoint_at (sw_breakpoint_pc))
877 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
878
879 if (hardware_breakpoint_inserted_here (pc))
880 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
881
882 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
883 check_stopped_by_watchpoint (lwp);
884 #endif
885
886 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
887 {
888 if (debug_threads)
889 {
890 struct thread_info *thr = get_lwp_thread (lwp);
891
892 debug_printf ("CSBB: %s stopped by software breakpoint\n",
893 target_pid_to_str (ptid_of (thr)));
894 }
895
896 /* Back up the PC if necessary. */
897 if (pc != sw_breakpoint_pc)
898 {
899 struct regcache *regcache
900 = get_thread_regcache (current_thread, 1);
901 low_set_pc (regcache, sw_breakpoint_pc);
902 }
903
904 /* Update this so we record the correct stop PC below. */
905 pc = sw_breakpoint_pc;
906 }
907 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
908 {
909 if (debug_threads)
910 {
911 struct thread_info *thr = get_lwp_thread (lwp);
912
913 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
914 target_pid_to_str (ptid_of (thr)));
915 }
916 }
917 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
918 {
919 if (debug_threads)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
922
923 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
924 target_pid_to_str (ptid_of (thr)));
925 }
926 }
927 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
928 {
929 if (debug_threads)
930 {
931 struct thread_info *thr = get_lwp_thread (lwp);
932
933 debug_printf ("CSBB: %s stopped by trace\n",
934 target_pid_to_str (ptid_of (thr)));
935 }
936 }
937
938 lwp->stop_pc = pc;
939 current_thread = saved_thread;
940 return true;
941 }
942
943 lwp_info *
944 linux_process_target::add_lwp (ptid_t ptid)
945 {
946 struct lwp_info *lwp;
947
948 lwp = XCNEW (struct lwp_info);
949
950 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
951
952 lwp->thread = add_thread (ptid, lwp);
953
954 low_new_thread (lwp);
955
956 return lwp;
957 }
958
959 void
960 linux_process_target::low_new_thread (lwp_info *info)
961 {
962 /* Nop. */
963 }
964
965 /* Callback to be used when calling fork_inferior, responsible for
966 actually initiating the tracing of the inferior. */
967
968 static void
969 linux_ptrace_fun ()
970 {
971 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
972 (PTRACE_TYPE_ARG4) 0) < 0)
973 trace_start_error_with_name ("ptrace");
974
975 if (setpgid (0, 0) < 0)
976 trace_start_error_with_name ("setpgid");
977
978 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
979 stdout to stderr so that inferior i/o doesn't corrupt the connection.
980 Also, redirect stdin to /dev/null. */
981 if (remote_connection_is_stdio ())
982 {
983 if (close (0) < 0)
984 trace_start_error_with_name ("close");
985 if (open ("/dev/null", O_RDONLY) < 0)
986 trace_start_error_with_name ("open");
987 if (dup2 (2, 1) < 0)
988 trace_start_error_with_name ("dup2");
989 if (write (2, "stdin/stdout redirected\n",
990 sizeof ("stdin/stdout redirected\n") - 1) < 0)
991 {
992 /* Errors ignored. */;
993 }
994 }
995 }
996
997 /* Start an inferior process and returns its pid.
998 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
999 are its arguments. */
1000
1001 int
1002 linux_process_target::create_inferior (const char *program,
1003 const std::vector<char *> &program_args)
1004 {
1005 client_state &cs = get_client_state ();
1006 struct lwp_info *new_lwp;
1007 int pid;
1008 ptid_t ptid;
1009
1010 {
1011 maybe_disable_address_space_randomization restore_personality
1012 (cs.disable_randomization);
1013 std::string str_program_args = stringify_argv (program_args);
1014
1015 pid = fork_inferior (program,
1016 str_program_args.c_str (),
1017 get_environ ()->envp (), linux_ptrace_fun,
1018 NULL, NULL, NULL, NULL);
1019 }
1020
1021 add_linux_process (pid, 0);
1022
1023 ptid = ptid_t (pid, pid, 0);
1024 new_lwp = add_lwp (ptid);
1025 new_lwp->must_set_ptrace_flags = 1;
1026
1027 post_fork_inferior (pid, program);
1028
1029 return pid;
1030 }
1031
1032 /* Implement the post_create_inferior target_ops method. */
1033
1034 void
1035 linux_process_target::post_create_inferior ()
1036 {
1037 struct lwp_info *lwp = get_thread_lwp (current_thread);
1038
1039 low_arch_setup ();
1040
1041 if (lwp->must_set_ptrace_flags)
1042 {
1043 struct process_info *proc = current_process ();
1044 int options = linux_low_ptrace_options (proc->attached);
1045
1046 linux_enable_event_reporting (lwpid_of (current_thread), options);
1047 lwp->must_set_ptrace_flags = 0;
1048 }
1049 }
1050
1051 int
1052 linux_process_target::attach_lwp (ptid_t ptid)
1053 {
1054 struct lwp_info *new_lwp;
1055 int lwpid = ptid.lwp ();
1056
1057 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1058 != 0)
1059 return errno;
1060
1061 new_lwp = add_lwp (ptid);
1062
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp->must_set_ptrace_flags = 1;
1066
1067 if (linux_proc_pid_is_stopped (lwpid))
1068 {
1069 if (debug_threads)
1070 debug_printf ("Attached to a stopped process\n");
1071
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1077
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid, SIGSTOP);
1086
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1091 }
1092
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1095
1096 There are several cases to consider here:
1097
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1103 resume_continue.
1104
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1110
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1114
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1116 existing threads.
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1122
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp->stop_expected = 1;
1129
1130 return 0;
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid) == NULL)
1142 {
1143 int lwpid = ptid.lwp ();
1144 int err;
1145
1146 if (debug_threads)
1147 debug_printf ("Found new lwp %d\n", lwpid);
1148
1149 err = the_linux_target->attach_lwp (ptid);
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1157 {
1158 if (debug_threads)
1159 {
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid, err, safe_strerror (err));
1163 }
1164 }
1165 else if (err != 0)
1166 {
1167 std::string reason
1168 = linux_ptrace_attach_fail_reason_string (ptid, err);
1169
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1171 }
1172
1173 return 1;
1174 }
1175 return 0;
1176 }
1177
1178 static void async_file_mark (void);
1179
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1181 of its threads. */
1182
1183 int
1184 linux_process_target::attach (unsigned long pid)
1185 {
1186 struct process_info *proc;
1187 struct thread_info *initial_thread;
1188 ptid_t ptid = ptid_t (pid, pid, 0);
1189 int err;
1190
1191 proc = add_linux_process (pid, 1);
1192
1193 /* Attach to PID. We will check for other threads
1194 soon. */
1195 err = attach_lwp (ptid);
1196 if (err != 0)
1197 {
1198 remove_process (proc);
1199
1200 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1201 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1202 }
1203
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
1206 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1207 initial_thread->last_resume_kind = resume_stop;
1208
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
1218 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1219
1220 /* GDB will shortly read the xml target description for this
1221 process, to figure out the process' architecture. But the target
1222 description is only filled in when the first process/thread in
1223 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1224 that now, otherwise, if GDB is fast enough, it could read the
1225 target description _before_ that initial stop. */
1226 if (non_stop)
1227 {
1228 struct lwp_info *lwp;
1229 int wstat, lwpid;
1230 ptid_t pid_ptid = ptid_t (pid);
1231
1232 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1233 gdb_assert (lwpid > 0);
1234
1235 lwp = find_lwp_pid (ptid_t (lwpid));
1236
1237 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1238 {
1239 lwp->status_pending_p = 1;
1240 lwp->status_pending = wstat;
1241 }
1242
1243 initial_thread->last_resume_kind = resume_continue;
1244
1245 async_file_mark ();
1246
1247 gdb_assert (proc->tdesc != NULL);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int
1254 last_thread_of_process_p (int pid)
1255 {
1256 bool seen_one = false;
1257
1258 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1259 {
1260 if (!seen_one)
1261 {
1262 /* This is the first thread of this process we see. */
1263 seen_one = true;
1264 return false;
1265 }
1266 else
1267 {
1268 /* This is the second thread of this process we see. */
1269 return true;
1270 }
1271 });
1272
1273 return thread == NULL;
1274 }
1275
1276 /* Kill LWP. */
1277
1278 static void
1279 linux_kill_one_lwp (struct lwp_info *lwp)
1280 {
1281 struct thread_info *thr = get_lwp_thread (lwp);
1282 int pid = lwpid_of (thr);
1283
1284 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1285 there is no signal context, and ptrace(PTRACE_KILL) (or
1286 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1287 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1288 alternative is to kill with SIGKILL. We only need one SIGKILL
1289 per process, not one for each thread. But since we still support
1290 support debugging programs using raw clone without CLONE_THREAD,
1291 we send one for each thread. For years, we used PTRACE_KILL
1292 only, so we're being a bit paranoid about some old kernels where
1293 PTRACE_KILL might work better (dubious if there are any such, but
1294 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1295 second, and so we're fine everywhere. */
1296
1297 errno = 0;
1298 kill_lwp (pid, SIGKILL);
1299 if (debug_threads)
1300 {
1301 int save_errno = errno;
1302
1303 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1304 target_pid_to_str (ptid_of (thr)),
1305 save_errno ? safe_strerror (save_errno) : "OK");
1306 }
1307
1308 errno = 0;
1309 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1310 if (debug_threads)
1311 {
1312 int save_errno = errno;
1313
1314 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1315 target_pid_to_str (ptid_of (thr)),
1316 save_errno ? safe_strerror (save_errno) : "OK");
1317 }
1318 }
1319
1320 /* Kill LWP and wait for it to die. */
1321
1322 static void
1323 kill_wait_lwp (struct lwp_info *lwp)
1324 {
1325 struct thread_info *thr = get_lwp_thread (lwp);
1326 int pid = ptid_of (thr).pid ();
1327 int lwpid = ptid_of (thr).lwp ();
1328 int wstat;
1329 int res;
1330
1331 if (debug_threads)
1332 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1333
1334 do
1335 {
1336 linux_kill_one_lwp (lwp);
1337
1338 /* Make sure it died. Notes:
1339
1340 - The loop is most likely unnecessary.
1341
1342 - We don't use wait_for_event as that could delete lwps
1343 while we're iterating over them. We're not interested in
1344 any pending status at this point, only in making sure all
1345 wait status on the kernel side are collected until the
1346 process is reaped.
1347
1348 - We don't use __WALL here as the __WALL emulation relies on
1349 SIGCHLD, and killing a stopped process doesn't generate
1350 one, nor an exit status.
1351 */
1352 res = my_waitpid (lwpid, &wstat, 0);
1353 if (res == -1 && errno == ECHILD)
1354 res = my_waitpid (lwpid, &wstat, __WCLONE);
1355 } while (res > 0 && WIFSTOPPED (wstat));
1356
1357 /* Even if it was stopped, the child may have already disappeared.
1358 E.g., if it was killed by SIGKILL. */
1359 if (res < 0 && errno != ECHILD)
1360 perror_with_name ("kill_wait_lwp");
1361 }
1362
1363 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1364 except the leader. */
1365
1366 static void
1367 kill_one_lwp_callback (thread_info *thread, int pid)
1368 {
1369 struct lwp_info *lwp = get_thread_lwp (thread);
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 }
1386
1387 int
1388 linux_process_target::kill (process_info *process)
1389 {
1390 int pid = process->pid;
1391
1392 /* If we're killing a running inferior, make sure it is stopped
1393 first, as PTRACE_KILL will not work otherwise. */
1394 stop_all_lwps (0, NULL);
1395
1396 for_each_thread (pid, [&] (thread_info *thread)
1397 {
1398 kill_one_lwp_callback (thread, pid);
1399 });
1400
1401 /* See the comment in linux_kill_one_lwp. We did not kill the first
1402 thread in the list, so do so now. */
1403 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1404
1405 if (lwp == NULL)
1406 {
1407 if (debug_threads)
1408 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1409 pid);
1410 }
1411 else
1412 kill_wait_lwp (lwp);
1413
1414 mourn (process);
1415
1416 /* Since we presently can only stop all lwps of all processes, we
1417 need to unstop lwps of other processes. */
1418 unstop_all_lwps (0, NULL);
1419 return 0;
1420 }
1421
1422 /* Get pending signal of THREAD, for detaching purposes. This is the
1423 signal the thread last stopped for, which we need to deliver to the
1424 thread when detaching, otherwise, it'd be suppressed/lost. */
1425
1426 static int
1427 get_detach_signal (struct thread_info *thread)
1428 {
1429 client_state &cs = get_client_state ();
1430 enum gdb_signal signo = GDB_SIGNAL_0;
1431 int status;
1432 struct lwp_info *lp = get_thread_lwp (thread);
1433
1434 if (lp->status_pending_p)
1435 status = lp->status_pending;
1436 else
1437 {
1438 /* If the thread had been suspended by gdbserver, and it stopped
1439 cleanly, then it'll have stopped with SIGSTOP. But we don't
1440 want to deliver that SIGSTOP. */
1441 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1442 || thread->last_status.value.sig == GDB_SIGNAL_0)
1443 return 0;
1444
1445 /* Otherwise, we may need to deliver the signal we
1446 intercepted. */
1447 status = lp->last_status;
1448 }
1449
1450 if (!WIFSTOPPED (status))
1451 {
1452 if (debug_threads)
1453 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread)));
1455 return 0;
1456 }
1457
1458 /* Extended wait statuses aren't real SIGTRAPs. */
1459 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had stopped with extended "
1463 "status: no pending signal\n",
1464 target_pid_to_str (ptid_of (thread)));
1465 return 0;
1466 }
1467
1468 signo = gdb_signal_from_host (WSTOPSIG (status));
1469
1470 if (cs.program_signals_p && !cs.program_signals[signo])
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476 return 0;
1477 }
1478 else if (!cs.program_signals_p
1479 /* If we have no way to know which signals GDB does not
1480 want to have passed to the program, assume
1481 SIGTRAP/SIGINT, which is GDB's default. */
1482 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1483 {
1484 if (debug_threads)
1485 debug_printf ("GPS: lwp %s had signal %s, "
1486 "but we don't know if we should pass it. "
1487 "Default to not.\n",
1488 target_pid_to_str (ptid_of (thread)),
1489 gdb_signal_to_string (signo));
1490 return 0;
1491 }
1492 else
1493 {
1494 if (debug_threads)
1495 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1496 target_pid_to_str (ptid_of (thread)),
1497 gdb_signal_to_string (signo));
1498
1499 return WSTOPSIG (status);
1500 }
1501 }
1502
1503 void
1504 linux_process_target::detach_one_lwp (lwp_info *lwp)
1505 {
1506 struct thread_info *thread = get_lwp_thread (lwp);
1507 int sig;
1508 int lwpid;
1509
1510 /* If there is a pending SIGSTOP, get rid of it. */
1511 if (lwp->stop_expected)
1512 {
1513 if (debug_threads)
1514 debug_printf ("Sending SIGCONT to %s\n",
1515 target_pid_to_str (ptid_of (thread)));
1516
1517 kill_lwp (lwpid_of (thread), SIGCONT);
1518 lwp->stop_expected = 0;
1519 }
1520
1521 /* Pass on any pending signal for this thread. */
1522 sig = get_detach_signal (thread);
1523
1524 /* Preparing to resume may try to write registers, and fail if the
1525 lwp is zombie. If that happens, ignore the error. We'll handle
1526 it below, when detach fails with ESRCH. */
1527 try
1528 {
1529 /* Flush any pending changes to the process's registers. */
1530 regcache_invalidate_thread (thread);
1531
1532 /* Finally, let it resume. */
1533 low_prepare_to_resume (lwp);
1534 }
1535 catch (const gdb_exception_error &ex)
1536 {
1537 if (!check_ptrace_stopped_lwp_gone (lwp))
1538 throw;
1539 }
1540
1541 lwpid = lwpid_of (thread);
1542 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1543 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1544 {
1545 int save_errno = errno;
1546
1547 /* We know the thread exists, so ESRCH must mean the lwp is
1548 zombie. This can happen if one of the already-detached
1549 threads exits the whole thread group. In that case we're
1550 still attached, and must reap the lwp. */
1551 if (save_errno == ESRCH)
1552 {
1553 int ret, status;
1554
1555 ret = my_waitpid (lwpid, &status, __WALL);
1556 if (ret == -1)
1557 {
1558 warning (_("Couldn't reap LWP %d while detaching: %s"),
1559 lwpid, safe_strerror (errno));
1560 }
1561 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1562 {
1563 warning (_("Reaping LWP %d while detaching "
1564 "returned unexpected status 0x%x"),
1565 lwpid, status);
1566 }
1567 }
1568 else
1569 {
1570 error (_("Can't detach %s: %s"),
1571 target_pid_to_str (ptid_of (thread)),
1572 safe_strerror (save_errno));
1573 }
1574 }
1575 else if (debug_threads)
1576 {
1577 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1578 target_pid_to_str (ptid_of (thread)),
1579 strsignal (sig));
1580 }
1581
1582 delete_lwp (lwp);
1583 }
1584
1585 int
1586 linux_process_target::detach (process_info *process)
1587 {
1588 struct lwp_info *main_lwp;
1589
1590 /* As there's a step over already in progress, let it finish first,
1591 otherwise nesting a stabilize_threads operation on top gets real
1592 messy. */
1593 complete_ongoing_step_over ();
1594
1595 /* Stop all threads before detaching. First, ptrace requires that
1596 the thread is stopped to successfully detach. Second, thread_db
1597 may need to uninstall thread event breakpoints from memory, which
1598 only works with a stopped process anyway. */
1599 stop_all_lwps (0, NULL);
1600
1601 #ifdef USE_THREAD_DB
1602 thread_db_detach (process);
1603 #endif
1604
1605 /* Stabilize threads (move out of jump pads). */
1606 target_stabilize_threads ();
1607
1608 /* Detach from the clone lwps first. If the thread group exits just
1609 while we're detaching, we must reap the clone lwps before we're
1610 able to reap the leader. */
1611 for_each_thread (process->pid, [this] (thread_info *thread)
1612 {
1613 /* We don't actually detach from the thread group leader just yet.
1614 If the thread group exits, we must reap the zombie clone lwps
1615 before we're able to reap the leader. */
1616 if (thread->id.pid () == thread->id.lwp ())
1617 return;
1618
1619 lwp_info *lwp = get_thread_lwp (thread);
1620 detach_one_lwp (lwp);
1621 });
1622
1623 main_lwp = find_lwp_pid (ptid_t (process->pid));
1624 detach_one_lwp (main_lwp);
1625
1626 mourn (process);
1627
1628 /* Since we presently can only stop all lwps of all processes, we
1629 need to unstop lwps of other processes. */
1630 unstop_all_lwps (0, NULL);
1631 return 0;
1632 }
1633
1634 /* Remove all LWPs that belong to process PROC from the lwp list. */
1635
1636 void
1637 linux_process_target::mourn (process_info *process)
1638 {
1639 struct process_info_private *priv;
1640
1641 #ifdef USE_THREAD_DB
1642 thread_db_mourn (process);
1643 #endif
1644
1645 for_each_thread (process->pid, [this] (thread_info *thread)
1646 {
1647 delete_lwp (get_thread_lwp (thread));
1648 });
1649
1650 /* Freeing all private data. */
1651 priv = process->priv;
1652 low_delete_process (priv->arch_private);
1653 free (priv);
1654 process->priv = NULL;
1655
1656 remove_process (process);
1657 }
1658
1659 void
1660 linux_process_target::join (int pid)
1661 {
1662 int status, ret;
1663
1664 do {
1665 ret = my_waitpid (pid, &status, 0);
1666 if (WIFEXITED (status) || WIFSIGNALED (status))
1667 break;
1668 } while (ret != -1 || errno != ECHILD);
1669 }
1670
1671 /* Return true if the given thread is still alive. */
1672
1673 bool
1674 linux_process_target::thread_alive (ptid_t ptid)
1675 {
1676 struct lwp_info *lwp = find_lwp_pid (ptid);
1677
1678 /* We assume we always know if a thread exits. If a whole process
1679 exited but we still haven't been able to report it to GDB, we'll
1680 hold on to the last lwp of the dead process. */
1681 if (lwp != NULL)
1682 return !lwp_is_marked_dead (lwp);
1683 else
1684 return 0;
1685 }
1686
1687 bool
1688 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1689 {
1690 struct lwp_info *lp = get_thread_lwp (thread);
1691
1692 if (!lp->status_pending_p)
1693 return 0;
1694
1695 if (thread->last_resume_kind != resume_stop
1696 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1697 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1698 {
1699 struct thread_info *saved_thread;
1700 CORE_ADDR pc;
1701 int discard = 0;
1702
1703 gdb_assert (lp->last_status != 0);
1704
1705 pc = get_pc (lp);
1706
1707 saved_thread = current_thread;
1708 current_thread = thread;
1709
1710 if (pc != lp->stop_pc)
1711 {
1712 if (debug_threads)
1713 debug_printf ("PC of %ld changed\n",
1714 lwpid_of (thread));
1715 discard = 1;
1716 }
1717
1718 #if !USE_SIGTRAP_SIGINFO
1719 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1720 && !low_breakpoint_at (pc))
1721 {
1722 if (debug_threads)
1723 debug_printf ("previous SW breakpoint of %ld gone\n",
1724 lwpid_of (thread));
1725 discard = 1;
1726 }
1727 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1728 && !hardware_breakpoint_inserted_here (pc))
1729 {
1730 if (debug_threads)
1731 debug_printf ("previous HW breakpoint of %ld gone\n",
1732 lwpid_of (thread));
1733 discard = 1;
1734 }
1735 #endif
1736
1737 current_thread = saved_thread;
1738
1739 if (discard)
1740 {
1741 if (debug_threads)
1742 debug_printf ("discarding pending breakpoint status\n");
1743 lp->status_pending_p = 0;
1744 return 0;
1745 }
1746 }
1747
1748 return 1;
1749 }
1750
1751 /* Returns true if LWP is resumed from the client's perspective. */
1752
1753 static int
1754 lwp_resumed (struct lwp_info *lwp)
1755 {
1756 struct thread_info *thread = get_lwp_thread (lwp);
1757
1758 if (thread->last_resume_kind != resume_stop)
1759 return 1;
1760
1761 /* Did gdb send us a `vCont;t', but we haven't reported the
1762 corresponding stop to gdb yet? If so, the thread is still
1763 resumed/running from gdb's perspective. */
1764 if (thread->last_resume_kind == resume_stop
1765 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1766 return 1;
1767
1768 return 0;
1769 }
1770
1771 bool
1772 linux_process_target::status_pending_p_callback (thread_info *thread,
1773 ptid_t ptid)
1774 {
1775 struct lwp_info *lp = get_thread_lwp (thread);
1776
1777 /* Check if we're only interested in events from a specific process
1778 or a specific LWP. */
1779 if (!thread->id.matches (ptid))
1780 return 0;
1781
1782 if (!lwp_resumed (lp))
1783 return 0;
1784
1785 if (lp->status_pending_p
1786 && !thread_still_has_status_pending (thread))
1787 {
1788 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1789 return 0;
1790 }
1791
1792 return lp->status_pending_p;
1793 }
1794
1795 struct lwp_info *
1796 find_lwp_pid (ptid_t ptid)
1797 {
1798 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1799 {
1800 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1801 return thr_arg->id.lwp () == lwp;
1802 });
1803
1804 if (thread == NULL)
1805 return NULL;
1806
1807 return get_thread_lwp (thread);
1808 }
1809
1810 /* Return the number of known LWPs in the tgid given by PID. */
1811
1812 static int
1813 num_lwps (int pid)
1814 {
1815 int count = 0;
1816
1817 for_each_thread (pid, [&] (thread_info *thread)
1818 {
1819 count++;
1820 });
1821
1822 return count;
1823 }
1824
1825 /* See nat/linux-nat.h. */
1826
1827 struct lwp_info *
1828 iterate_over_lwps (ptid_t filter,
1829 gdb::function_view<iterate_over_lwps_ftype> callback)
1830 {
1831 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1832 {
1833 lwp_info *lwp = get_thread_lwp (thr_arg);
1834
1835 return callback (lwp);
1836 });
1837
1838 if (thread == NULL)
1839 return NULL;
1840
1841 return get_thread_lwp (thread);
1842 }
1843
1844 void
1845 linux_process_target::check_zombie_leaders ()
1846 {
1847 for_each_process ([this] (process_info *proc) {
1848 pid_t leader_pid = pid_of (proc);
1849 struct lwp_info *leader_lp;
1850
1851 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1852
1853 if (debug_threads)
1854 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1855 "num_lwps=%d, zombie=%d\n",
1856 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1857 linux_proc_pid_is_zombie (leader_pid));
1858
1859 if (leader_lp != NULL && !leader_lp->stopped
1860 /* Check if there are other threads in the group, as we may
1861 have raced with the inferior simply exiting. */
1862 && !last_thread_of_process_p (leader_pid)
1863 && linux_proc_pid_is_zombie (leader_pid))
1864 {
1865 /* A leader zombie can mean one of two things:
1866
1867 - It exited, and there's an exit status pending
1868 available, or only the leader exited (not the whole
1869 program). In the latter case, we can't waitpid the
1870 leader's exit status until all other threads are gone.
1871
1872 - There are 3 or more threads in the group, and a thread
1873 other than the leader exec'd. On an exec, the Linux
1874 kernel destroys all other threads (except the execing
1875 one) in the thread group, and resets the execing thread's
1876 tid to the tgid. No exit notification is sent for the
1877 execing thread -- from the ptracer's perspective, it
1878 appears as though the execing thread just vanishes.
1879 Until we reap all other threads except the leader and the
1880 execing thread, the leader will be zombie, and the
1881 execing thread will be in `D (disc sleep)'. As soon as
1882 all other threads are reaped, the execing thread changes
1883 it's tid to the tgid, and the previous (zombie) leader
1884 vanishes, giving place to the "new" leader. We could try
1885 distinguishing the exit and exec cases, by waiting once
1886 more, and seeing if something comes out, but it doesn't
1887 sound useful. The previous leader _does_ go away, and
1888 we'll re-add the new one once we see the exec event
1889 (which is just the same as what would happen if the
1890 previous leader did exit voluntarily before some other
1891 thread execs). */
1892
1893 if (debug_threads)
1894 debug_printf ("CZL: Thread group leader %d zombie "
1895 "(it exited, or another thread execd).\n",
1896 leader_pid);
1897
1898 delete_lwp (leader_lp);
1899 }
1900 });
1901 }
1902
1903 /* Callback for `find_thread'. Returns the first LWP that is not
1904 stopped. */
1905
1906 static bool
1907 not_stopped_callback (thread_info *thread, ptid_t filter)
1908 {
1909 if (!thread->id.matches (filter))
1910 return false;
1911
1912 lwp_info *lwp = get_thread_lwp (thread);
1913
1914 return !lwp->stopped;
1915 }
1916
1917 /* Increment LWP's suspend count. */
1918
1919 static void
1920 lwp_suspended_inc (struct lwp_info *lwp)
1921 {
1922 lwp->suspended++;
1923
1924 if (debug_threads && lwp->suspended > 4)
1925 {
1926 struct thread_info *thread = get_lwp_thread (lwp);
1927
1928 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1929 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1930 }
1931 }
1932
1933 /* Decrement LWP's suspend count. */
1934
1935 static void
1936 lwp_suspended_decr (struct lwp_info *lwp)
1937 {
1938 lwp->suspended--;
1939
1940 if (lwp->suspended < 0)
1941 {
1942 struct thread_info *thread = get_lwp_thread (lwp);
1943
1944 internal_error (__FILE__, __LINE__,
1945 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1946 lwp->suspended);
1947 }
1948 }
1949
1950 /* This function should only be called if the LWP got a SIGTRAP.
1951
1952 Handle any tracepoint steps or hits. Return true if a tracepoint
1953 event was handled, 0 otherwise. */
1954
1955 static int
1956 handle_tracepoints (struct lwp_info *lwp)
1957 {
1958 struct thread_info *tinfo = get_lwp_thread (lwp);
1959 int tpoint_related_event = 0;
1960
1961 gdb_assert (lwp->suspended == 0);
1962
1963 /* If this tracepoint hit causes a tracing stop, we'll immediately
1964 uninsert tracepoints. To do this, we temporarily pause all
1965 threads, unpatch away, and then unpause threads. We need to make
1966 sure the unpausing doesn't resume LWP too. */
1967 lwp_suspended_inc (lwp);
1968
1969 /* And we need to be sure that any all-threads-stopping doesn't try
1970 to move threads out of the jump pads, as it could deadlock the
1971 inferior (LWP could be in the jump pad, maybe even holding the
1972 lock.) */
1973
1974 /* Do any necessary step collect actions. */
1975 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1976
1977 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1978
1979 /* See if we just hit a tracepoint and do its main collect
1980 actions. */
1981 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1982
1983 lwp_suspended_decr (lwp);
1984
1985 gdb_assert (lwp->suspended == 0);
1986 gdb_assert (!stabilizing_threads
1987 || (lwp->collecting_fast_tracepoint
1988 != fast_tpoint_collect_result::not_collecting));
1989
1990 if (tpoint_related_event)
1991 {
1992 if (debug_threads)
1993 debug_printf ("got a tracepoint event\n");
1994 return 1;
1995 }
1996
1997 return 0;
1998 }
1999
2000 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2001 collection status. */
2002
2003 static fast_tpoint_collect_result
2004 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2005 struct fast_tpoint_collect_status *status)
2006 {
2007 CORE_ADDR thread_area;
2008 struct thread_info *thread = get_lwp_thread (lwp);
2009
2010 if (the_low_target.get_thread_area == NULL)
2011 return fast_tpoint_collect_result::not_collecting;
2012
2013 /* Get the thread area address. This is used to recognize which
2014 thread is which when tracing with the in-process agent library.
2015 We don't read anything from the address, and treat it as opaque;
2016 it's the address itself that we assume is unique per-thread. */
2017 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2018 return fast_tpoint_collect_result::not_collecting;
2019
2020 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2021 }
2022
2023 bool
2024 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2025 {
2026 struct thread_info *saved_thread;
2027
2028 saved_thread = current_thread;
2029 current_thread = get_lwp_thread (lwp);
2030
2031 if ((wstat == NULL
2032 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2033 && supports_fast_tracepoints ()
2034 && agent_loaded_p ())
2035 {
2036 struct fast_tpoint_collect_status status;
2037
2038 if (debug_threads)
2039 debug_printf ("Checking whether LWP %ld needs to move out of the "
2040 "jump pad.\n",
2041 lwpid_of (current_thread));
2042
2043 fast_tpoint_collect_result r
2044 = linux_fast_tracepoint_collecting (lwp, &status);
2045
2046 if (wstat == NULL
2047 || (WSTOPSIG (*wstat) != SIGILL
2048 && WSTOPSIG (*wstat) != SIGFPE
2049 && WSTOPSIG (*wstat) != SIGSEGV
2050 && WSTOPSIG (*wstat) != SIGBUS))
2051 {
2052 lwp->collecting_fast_tracepoint = r;
2053
2054 if (r != fast_tpoint_collect_result::not_collecting)
2055 {
2056 if (r == fast_tpoint_collect_result::before_insn
2057 && lwp->exit_jump_pad_bkpt == NULL)
2058 {
2059 /* Haven't executed the original instruction yet.
2060 Set breakpoint there, and wait till it's hit,
2061 then single-step until exiting the jump pad. */
2062 lwp->exit_jump_pad_bkpt
2063 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2064 }
2065
2066 if (debug_threads)
2067 debug_printf ("Checking whether LWP %ld needs to move out of "
2068 "the jump pad...it does\n",
2069 lwpid_of (current_thread));
2070 current_thread = saved_thread;
2071
2072 return true;
2073 }
2074 }
2075 else
2076 {
2077 /* If we get a synchronous signal while collecting, *and*
2078 while executing the (relocated) original instruction,
2079 reset the PC to point at the tpoint address, before
2080 reporting to GDB. Otherwise, it's an IPA lib bug: just
2081 report the signal to GDB, and pray for the best. */
2082
2083 lwp->collecting_fast_tracepoint
2084 = fast_tpoint_collect_result::not_collecting;
2085
2086 if (r != fast_tpoint_collect_result::not_collecting
2087 && (status.adjusted_insn_addr <= lwp->stop_pc
2088 && lwp->stop_pc < status.adjusted_insn_addr_end))
2089 {
2090 siginfo_t info;
2091 struct regcache *regcache;
2092
2093 /* The si_addr on a few signals references the address
2094 of the faulting instruction. Adjust that as
2095 well. */
2096 if ((WSTOPSIG (*wstat) == SIGILL
2097 || WSTOPSIG (*wstat) == SIGFPE
2098 || WSTOPSIG (*wstat) == SIGBUS
2099 || WSTOPSIG (*wstat) == SIGSEGV)
2100 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2101 (PTRACE_TYPE_ARG3) 0, &info) == 0
2102 /* Final check just to make sure we don't clobber
2103 the siginfo of non-kernel-sent signals. */
2104 && (uintptr_t) info.si_addr == lwp->stop_pc)
2105 {
2106 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2107 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2108 (PTRACE_TYPE_ARG3) 0, &info);
2109 }
2110
2111 regcache = get_thread_regcache (current_thread, 1);
2112 low_set_pc (regcache, status.tpoint_addr);
2113 lwp->stop_pc = status.tpoint_addr;
2114
2115 /* Cancel any fast tracepoint lock this thread was
2116 holding. */
2117 force_unlock_trace_buffer ();
2118 }
2119
2120 if (lwp->exit_jump_pad_bkpt != NULL)
2121 {
2122 if (debug_threads)
2123 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2124 "stopping all threads momentarily.\n");
2125
2126 stop_all_lwps (1, lwp);
2127
2128 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2129 lwp->exit_jump_pad_bkpt = NULL;
2130
2131 unstop_all_lwps (1, lwp);
2132
2133 gdb_assert (lwp->suspended >= 0);
2134 }
2135 }
2136 }
2137
2138 if (debug_threads)
2139 debug_printf ("Checking whether LWP %ld needs to move out of the "
2140 "jump pad...no\n",
2141 lwpid_of (current_thread));
2142
2143 current_thread = saved_thread;
2144 return false;
2145 }
2146
2147 /* Enqueue one signal in the "signals to report later when out of the
2148 jump pad" list. */
2149
2150 static void
2151 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2152 {
2153 struct pending_signals *p_sig;
2154 struct thread_info *thread = get_lwp_thread (lwp);
2155
2156 if (debug_threads)
2157 debug_printf ("Deferring signal %d for LWP %ld.\n",
2158 WSTOPSIG (*wstat), lwpid_of (thread));
2159
2160 if (debug_threads)
2161 {
2162 struct pending_signals *sig;
2163
2164 for (sig = lwp->pending_signals_to_report;
2165 sig != NULL;
2166 sig = sig->prev)
2167 debug_printf (" Already queued %d\n",
2168 sig->signal);
2169
2170 debug_printf (" (no more currently queued signals)\n");
2171 }
2172
2173 /* Don't enqueue non-RT signals if they are already in the deferred
2174 queue. (SIGSTOP being the easiest signal to see ending up here
2175 twice) */
2176 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2177 {
2178 struct pending_signals *sig;
2179
2180 for (sig = lwp->pending_signals_to_report;
2181 sig != NULL;
2182 sig = sig->prev)
2183 {
2184 if (sig->signal == WSTOPSIG (*wstat))
2185 {
2186 if (debug_threads)
2187 debug_printf ("Not requeuing already queued non-RT signal %d"
2188 " for LWP %ld\n",
2189 sig->signal,
2190 lwpid_of (thread));
2191 return;
2192 }
2193 }
2194 }
2195
2196 p_sig = XCNEW (struct pending_signals);
2197 p_sig->prev = lwp->pending_signals_to_report;
2198 p_sig->signal = WSTOPSIG (*wstat);
2199
2200 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2201 &p_sig->info);
2202
2203 lwp->pending_signals_to_report = p_sig;
2204 }
2205
2206 /* Dequeue one signal from the "signals to report later when out of
2207 the jump pad" list. */
2208
2209 static int
2210 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2211 {
2212 struct thread_info *thread = get_lwp_thread (lwp);
2213
2214 if (lwp->pending_signals_to_report != NULL)
2215 {
2216 struct pending_signals **p_sig;
2217
2218 p_sig = &lwp->pending_signals_to_report;
2219 while ((*p_sig)->prev != NULL)
2220 p_sig = &(*p_sig)->prev;
2221
2222 *wstat = W_STOPCODE ((*p_sig)->signal);
2223 if ((*p_sig)->info.si_signo != 0)
2224 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2225 &(*p_sig)->info);
2226 free (*p_sig);
2227 *p_sig = NULL;
2228
2229 if (debug_threads)
2230 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2231 WSTOPSIG (*wstat), lwpid_of (thread));
2232
2233 if (debug_threads)
2234 {
2235 struct pending_signals *sig;
2236
2237 for (sig = lwp->pending_signals_to_report;
2238 sig != NULL;
2239 sig = sig->prev)
2240 debug_printf (" Still queued %d\n",
2241 sig->signal);
2242
2243 debug_printf (" (no more queued signals)\n");
2244 }
2245
2246 return 1;
2247 }
2248
2249 return 0;
2250 }
2251
2252 bool
2253 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2254 {
2255 struct thread_info *saved_thread = current_thread;
2256 current_thread = get_lwp_thread (child);
2257
2258 if (low_stopped_by_watchpoint ())
2259 {
2260 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2261 child->stopped_data_address = low_stopped_data_address ();
2262 }
2263
2264 current_thread = saved_thread;
2265
2266 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2267 }
2268
2269 bool
2270 linux_process_target::low_stopped_by_watchpoint ()
2271 {
2272 return false;
2273 }
2274
2275 CORE_ADDR
2276 linux_process_target::low_stopped_data_address ()
2277 {
2278 return 0;
2279 }
2280
2281 /* Return the ptrace options that we want to try to enable. */
2282
2283 static int
2284 linux_low_ptrace_options (int attached)
2285 {
2286 client_state &cs = get_client_state ();
2287 int options = 0;
2288
2289 if (!attached)
2290 options |= PTRACE_O_EXITKILL;
2291
2292 if (cs.report_fork_events)
2293 options |= PTRACE_O_TRACEFORK;
2294
2295 if (cs.report_vfork_events)
2296 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2297
2298 if (cs.report_exec_events)
2299 options |= PTRACE_O_TRACEEXEC;
2300
2301 options |= PTRACE_O_TRACESYSGOOD;
2302
2303 return options;
2304 }
2305
2306 lwp_info *
2307 linux_process_target::filter_event (int lwpid, int wstat)
2308 {
2309 client_state &cs = get_client_state ();
2310 struct lwp_info *child;
2311 struct thread_info *thread;
2312 int have_stop_pc = 0;
2313
2314 child = find_lwp_pid (ptid_t (lwpid));
2315
2316 /* Check for stop events reported by a process we didn't already
2317 know about - anything not already in our LWP list.
2318
2319 If we're expecting to receive stopped processes after
2320 fork, vfork, and clone events, then we'll just add the
2321 new one to our list and go back to waiting for the event
2322 to be reported - the stopped process might be returned
2323 from waitpid before or after the event is.
2324
2325 But note the case of a non-leader thread exec'ing after the
2326 leader having exited, and gone from our lists (because
2327 check_zombie_leaders deleted it). The non-leader thread
2328 changes its tid to the tgid. */
2329
2330 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2331 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2332 {
2333 ptid_t child_ptid;
2334
2335 /* A multi-thread exec after we had seen the leader exiting. */
2336 if (debug_threads)
2337 {
2338 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2339 "after exec.\n", lwpid);
2340 }
2341
2342 child_ptid = ptid_t (lwpid, lwpid, 0);
2343 child = add_lwp (child_ptid);
2344 child->stopped = 1;
2345 current_thread = child->thread;
2346 }
2347
2348 /* If we didn't find a process, one of two things presumably happened:
2349 - A process we started and then detached from has exited. Ignore it.
2350 - A process we are controlling has forked and the new child's stop
2351 was reported to us by the kernel. Save its PID. */
2352 if (child == NULL && WIFSTOPPED (wstat))
2353 {
2354 add_to_pid_list (&stopped_pids, lwpid, wstat);
2355 return NULL;
2356 }
2357 else if (child == NULL)
2358 return NULL;
2359
2360 thread = get_lwp_thread (child);
2361
2362 child->stopped = 1;
2363
2364 child->last_status = wstat;
2365
2366 /* Check if the thread has exited. */
2367 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2368 {
2369 if (debug_threads)
2370 debug_printf ("LLFE: %d exited.\n", lwpid);
2371
2372 if (finish_step_over (child))
2373 {
2374 /* Unsuspend all other LWPs, and set them back running again. */
2375 unsuspend_all_lwps (child);
2376 }
2377
2378 /* If there is at least one more LWP, then the exit signal was
2379 not the end of the debugged application and should be
2380 ignored, unless GDB wants to hear about thread exits. */
2381 if (cs.report_thread_events
2382 || last_thread_of_process_p (pid_of (thread)))
2383 {
2384 /* Since events are serialized to GDB core, and we can't
2385 report this one right now. Leave the status pending for
2386 the next time we're able to report it. */
2387 mark_lwp_dead (child, wstat);
2388 return child;
2389 }
2390 else
2391 {
2392 delete_lwp (child);
2393 return NULL;
2394 }
2395 }
2396
2397 gdb_assert (WIFSTOPPED (wstat));
2398
2399 if (WIFSTOPPED (wstat))
2400 {
2401 struct process_info *proc;
2402
2403 /* Architecture-specific setup after inferior is running. */
2404 proc = find_process_pid (pid_of (thread));
2405 if (proc->tdesc == NULL)
2406 {
2407 if (proc->attached)
2408 {
2409 /* This needs to happen after we have attached to the
2410 inferior and it is stopped for the first time, but
2411 before we access any inferior registers. */
2412 arch_setup_thread (thread);
2413 }
2414 else
2415 {
2416 /* The process is started, but GDBserver will do
2417 architecture-specific setup after the program stops at
2418 the first instruction. */
2419 child->status_pending_p = 1;
2420 child->status_pending = wstat;
2421 return child;
2422 }
2423 }
2424 }
2425
2426 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2427 {
2428 struct process_info *proc = find_process_pid (pid_of (thread));
2429 int options = linux_low_ptrace_options (proc->attached);
2430
2431 linux_enable_event_reporting (lwpid, options);
2432 child->must_set_ptrace_flags = 0;
2433 }
2434
2435 /* Always update syscall_state, even if it will be filtered later. */
2436 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2437 {
2438 child->syscall_state
2439 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2440 ? TARGET_WAITKIND_SYSCALL_RETURN
2441 : TARGET_WAITKIND_SYSCALL_ENTRY);
2442 }
2443 else
2444 {
2445 /* Almost all other ptrace-stops are known to be outside of system
2446 calls, with further exceptions in handle_extended_wait. */
2447 child->syscall_state = TARGET_WAITKIND_IGNORE;
2448 }
2449
2450 /* Be careful to not overwrite stop_pc until save_stop_reason is
2451 called. */
2452 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2453 && linux_is_extended_waitstatus (wstat))
2454 {
2455 child->stop_pc = get_pc (child);
2456 if (handle_extended_wait (&child, wstat))
2457 {
2458 /* The event has been handled, so just return without
2459 reporting it. */
2460 return NULL;
2461 }
2462 }
2463
2464 if (linux_wstatus_maybe_breakpoint (wstat))
2465 {
2466 if (save_stop_reason (child))
2467 have_stop_pc = 1;
2468 }
2469
2470 if (!have_stop_pc)
2471 child->stop_pc = get_pc (child);
2472
2473 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2474 && child->stop_expected)
2475 {
2476 if (debug_threads)
2477 debug_printf ("Expected stop.\n");
2478 child->stop_expected = 0;
2479
2480 if (thread->last_resume_kind == resume_stop)
2481 {
2482 /* We want to report the stop to the core. Treat the
2483 SIGSTOP as a normal event. */
2484 if (debug_threads)
2485 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2486 target_pid_to_str (ptid_of (thread)));
2487 }
2488 else if (stopping_threads != NOT_STOPPING_THREADS)
2489 {
2490 /* Stopping threads. We don't want this SIGSTOP to end up
2491 pending. */
2492 if (debug_threads)
2493 debug_printf ("LLW: SIGSTOP caught for %s "
2494 "while stopping threads.\n",
2495 target_pid_to_str (ptid_of (thread)));
2496 return NULL;
2497 }
2498 else
2499 {
2500 /* This is a delayed SIGSTOP. Filter out the event. */
2501 if (debug_threads)
2502 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2503 child->stepping ? "step" : "continue",
2504 target_pid_to_str (ptid_of (thread)));
2505
2506 resume_one_lwp (child, child->stepping, 0, NULL);
2507 return NULL;
2508 }
2509 }
2510
2511 child->status_pending_p = 1;
2512 child->status_pending = wstat;
2513 return child;
2514 }
2515
2516 /* Return true if THREAD is doing hardware single step. */
2517
2518 static int
2519 maybe_hw_step (struct thread_info *thread)
2520 {
2521 if (can_hardware_single_step ())
2522 return 1;
2523 else
2524 {
2525 /* GDBserver must insert single-step breakpoint for software
2526 single step. */
2527 gdb_assert (has_single_step_breakpoints (thread));
2528 return 0;
2529 }
2530 }
2531
2532 void
2533 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2534 {
2535 struct lwp_info *lp = get_thread_lwp (thread);
2536
2537 if (lp->stopped
2538 && !lp->suspended
2539 && !lp->status_pending_p
2540 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2541 {
2542 int step = 0;
2543
2544 if (thread->last_resume_kind == resume_step)
2545 step = maybe_hw_step (thread);
2546
2547 if (debug_threads)
2548 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2549 target_pid_to_str (ptid_of (thread)),
2550 paddress (lp->stop_pc),
2551 step);
2552
2553 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2554 }
2555 }
2556
2557 int
2558 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2559 ptid_t filter_ptid,
2560 int *wstatp, int options)
2561 {
2562 struct thread_info *event_thread;
2563 struct lwp_info *event_child, *requested_child;
2564 sigset_t block_mask, prev_mask;
2565
2566 retry:
2567 /* N.B. event_thread points to the thread_info struct that contains
2568 event_child. Keep them in sync. */
2569 event_thread = NULL;
2570 event_child = NULL;
2571 requested_child = NULL;
2572
2573 /* Check for a lwp with a pending status. */
2574
2575 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2576 {
2577 event_thread = find_thread_in_random ([&] (thread_info *thread)
2578 {
2579 return status_pending_p_callback (thread, filter_ptid);
2580 });
2581
2582 if (event_thread != NULL)
2583 event_child = get_thread_lwp (event_thread);
2584 if (debug_threads && event_thread)
2585 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2586 }
2587 else if (filter_ptid != null_ptid)
2588 {
2589 requested_child = find_lwp_pid (filter_ptid);
2590
2591 if (stopping_threads == NOT_STOPPING_THREADS
2592 && requested_child->status_pending_p
2593 && (requested_child->collecting_fast_tracepoint
2594 != fast_tpoint_collect_result::not_collecting))
2595 {
2596 enqueue_one_deferred_signal (requested_child,
2597 &requested_child->status_pending);
2598 requested_child->status_pending_p = 0;
2599 requested_child->status_pending = 0;
2600 resume_one_lwp (requested_child, 0, 0, NULL);
2601 }
2602
2603 if (requested_child->suspended
2604 && requested_child->status_pending_p)
2605 {
2606 internal_error (__FILE__, __LINE__,
2607 "requesting an event out of a"
2608 " suspended child?");
2609 }
2610
2611 if (requested_child->status_pending_p)
2612 {
2613 event_child = requested_child;
2614 event_thread = get_lwp_thread (event_child);
2615 }
2616 }
2617
2618 if (event_child != NULL)
2619 {
2620 if (debug_threads)
2621 debug_printf ("Got an event from pending child %ld (%04x)\n",
2622 lwpid_of (event_thread), event_child->status_pending);
2623 *wstatp = event_child->status_pending;
2624 event_child->status_pending_p = 0;
2625 event_child->status_pending = 0;
2626 current_thread = event_thread;
2627 return lwpid_of (event_thread);
2628 }
2629
2630 /* But if we don't find a pending event, we'll have to wait.
2631
2632 We only enter this loop if no process has a pending wait status.
2633 Thus any action taken in response to a wait status inside this
2634 loop is responding as soon as we detect the status, not after any
2635 pending events. */
2636
2637 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2638 all signals while here. */
2639 sigfillset (&block_mask);
2640 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2641
2642 /* Always pull all events out of the kernel. We'll randomly select
2643 an event LWP out of all that have events, to prevent
2644 starvation. */
2645 while (event_child == NULL)
2646 {
2647 pid_t ret = 0;
2648
2649 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2650 quirks:
2651
2652 - If the thread group leader exits while other threads in the
2653 thread group still exist, waitpid(TGID, ...) hangs. That
2654 waitpid won't return an exit status until the other threads
2655 in the group are reaped.
2656
2657 - When a non-leader thread execs, that thread just vanishes
2658 without reporting an exit (so we'd hang if we waited for it
2659 explicitly in that case). The exec event is reported to
2660 the TGID pid. */
2661 errno = 0;
2662 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2663
2664 if (debug_threads)
2665 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2666 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2667
2668 if (ret > 0)
2669 {
2670 if (debug_threads)
2671 {
2672 debug_printf ("LLW: waitpid %ld received %s\n",
2673 (long) ret, status_to_str (*wstatp));
2674 }
2675
2676 /* Filter all events. IOW, leave all events pending. We'll
2677 randomly select an event LWP out of all that have events
2678 below. */
2679 filter_event (ret, *wstatp);
2680 /* Retry until nothing comes out of waitpid. A single
2681 SIGCHLD can indicate more than one child stopped. */
2682 continue;
2683 }
2684
2685 /* Now that we've pulled all events out of the kernel, resume
2686 LWPs that don't have an interesting event to report. */
2687 if (stopping_threads == NOT_STOPPING_THREADS)
2688 for_each_thread ([this] (thread_info *thread)
2689 {
2690 resume_stopped_resumed_lwps (thread);
2691 });
2692
2693 /* ... and find an LWP with a status to report to the core, if
2694 any. */
2695 event_thread = find_thread_in_random ([&] (thread_info *thread)
2696 {
2697 return status_pending_p_callback (thread, filter_ptid);
2698 });
2699
2700 if (event_thread != NULL)
2701 {
2702 event_child = get_thread_lwp (event_thread);
2703 *wstatp = event_child->status_pending;
2704 event_child->status_pending_p = 0;
2705 event_child->status_pending = 0;
2706 break;
2707 }
2708
2709 /* Check for zombie thread group leaders. Those can't be reaped
2710 until all other threads in the thread group are. */
2711 check_zombie_leaders ();
2712
2713 auto not_stopped = [&] (thread_info *thread)
2714 {
2715 return not_stopped_callback (thread, wait_ptid);
2716 };
2717
2718 /* If there are no resumed children left in the set of LWPs we
2719 want to wait for, bail. We can't just block in
2720 waitpid/sigsuspend, because lwps might have been left stopped
2721 in trace-stop state, and we'd be stuck forever waiting for
2722 their status to change (which would only happen if we resumed
2723 them). Even if WNOHANG is set, this return code is preferred
2724 over 0 (below), as it is more detailed. */
2725 if (find_thread (not_stopped) == NULL)
2726 {
2727 if (debug_threads)
2728 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2729 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2730 return -1;
2731 }
2732
2733 /* No interesting event to report to the caller. */
2734 if ((options & WNOHANG))
2735 {
2736 if (debug_threads)
2737 debug_printf ("WNOHANG set, no event found\n");
2738
2739 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2740 return 0;
2741 }
2742
2743 /* Block until we get an event reported with SIGCHLD. */
2744 if (debug_threads)
2745 debug_printf ("sigsuspend'ing\n");
2746
2747 sigsuspend (&prev_mask);
2748 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2749 goto retry;
2750 }
2751
2752 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2753
2754 current_thread = event_thread;
2755
2756 return lwpid_of (event_thread);
2757 }
2758
2759 int
2760 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2761 {
2762 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2763 }
2764
2765 /* Select one LWP out of those that have events pending. */
2766
2767 static void
2768 select_event_lwp (struct lwp_info **orig_lp)
2769 {
2770 struct thread_info *event_thread = NULL;
2771
2772 /* In all-stop, give preference to the LWP that is being
2773 single-stepped. There will be at most one, and it's the LWP that
2774 the core is most interested in. If we didn't do this, then we'd
2775 have to handle pending step SIGTRAPs somehow in case the core
2776 later continues the previously-stepped thread, otherwise we'd
2777 report the pending SIGTRAP, and the core, not having stepped the
2778 thread, wouldn't understand what the trap was for, and therefore
2779 would report it to the user as a random signal. */
2780 if (!non_stop)
2781 {
2782 event_thread = find_thread ([] (thread_info *thread)
2783 {
2784 lwp_info *lp = get_thread_lwp (thread);
2785
2786 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2787 && thread->last_resume_kind == resume_step
2788 && lp->status_pending_p);
2789 });
2790
2791 if (event_thread != NULL)
2792 {
2793 if (debug_threads)
2794 debug_printf ("SEL: Select single-step %s\n",
2795 target_pid_to_str (ptid_of (event_thread)));
2796 }
2797 }
2798 if (event_thread == NULL)
2799 {
2800 /* No single-stepping LWP. Select one at random, out of those
2801 which have had events. */
2802
2803 event_thread = find_thread_in_random ([&] (thread_info *thread)
2804 {
2805 lwp_info *lp = get_thread_lwp (thread);
2806
2807 /* Only resumed LWPs that have an event pending. */
2808 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2809 && lp->status_pending_p);
2810 });
2811 }
2812
2813 if (event_thread != NULL)
2814 {
2815 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2816
2817 /* Switch the event LWP. */
2818 *orig_lp = event_lp;
2819 }
2820 }
2821
2822 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2823 NULL. */
2824
2825 static void
2826 unsuspend_all_lwps (struct lwp_info *except)
2827 {
2828 for_each_thread ([&] (thread_info *thread)
2829 {
2830 lwp_info *lwp = get_thread_lwp (thread);
2831
2832 if (lwp != except)
2833 lwp_suspended_decr (lwp);
2834 });
2835 }
2836
2837 static bool stuck_in_jump_pad_callback (thread_info *thread);
2838 static bool lwp_running (thread_info *thread);
2839
2840 /* Stabilize threads (move out of jump pads).
2841
2842 If a thread is midway collecting a fast tracepoint, we need to
2843 finish the collection and move it out of the jump pad before
2844 reporting the signal.
2845
2846 This avoids recursion while collecting (when a signal arrives
2847 midway, and the signal handler itself collects), which would trash
2848 the trace buffer. In case the user set a breakpoint in a signal
2849 handler, this avoids the backtrace showing the jump pad, etc..
2850 Most importantly, there are certain things we can't do safely if
2851 threads are stopped in a jump pad (or in its callee's). For
2852 example:
2853
2854 - starting a new trace run. A thread still collecting the
2855 previous run, could trash the trace buffer when resumed. The trace
2856 buffer control structures would have been reset but the thread had
2857 no way to tell. The thread could even midway memcpy'ing to the
2858 buffer, which would mean that when resumed, it would clobber the
2859 trace buffer that had been set for a new run.
2860
2861 - we can't rewrite/reuse the jump pads for new tracepoints
2862 safely. Say you do tstart while a thread is stopped midway while
2863 collecting. When the thread is later resumed, it finishes the
2864 collection, and returns to the jump pad, to execute the original
2865 instruction that was under the tracepoint jump at the time the
2866 older run had been started. If the jump pad had been rewritten
2867 since for something else in the new run, the thread would now
2868 execute the wrong / random instructions. */
2869
2870 void
2871 linux_process_target::stabilize_threads ()
2872 {
2873 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2874
2875 if (thread_stuck != NULL)
2876 {
2877 if (debug_threads)
2878 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2879 lwpid_of (thread_stuck));
2880 return;
2881 }
2882
2883 thread_info *saved_thread = current_thread;
2884
2885 stabilizing_threads = 1;
2886
2887 /* Kick 'em all. */
2888 for_each_thread ([this] (thread_info *thread)
2889 {
2890 move_out_of_jump_pad (thread);
2891 });
2892
2893 /* Loop until all are stopped out of the jump pads. */
2894 while (find_thread (lwp_running) != NULL)
2895 {
2896 struct target_waitstatus ourstatus;
2897 struct lwp_info *lwp;
2898 int wstat;
2899
2900 /* Note that we go through the full wait even loop. While
2901 moving threads out of jump pad, we need to be able to step
2902 over internal breakpoints and such. */
2903 wait_1 (minus_one_ptid, &ourstatus, 0);
2904
2905 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2906 {
2907 lwp = get_thread_lwp (current_thread);
2908
2909 /* Lock it. */
2910 lwp_suspended_inc (lwp);
2911
2912 if (ourstatus.value.sig != GDB_SIGNAL_0
2913 || current_thread->last_resume_kind == resume_stop)
2914 {
2915 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2916 enqueue_one_deferred_signal (lwp, &wstat);
2917 }
2918 }
2919 }
2920
2921 unsuspend_all_lwps (NULL);
2922
2923 stabilizing_threads = 0;
2924
2925 current_thread = saved_thread;
2926
2927 if (debug_threads)
2928 {
2929 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2930
2931 if (thread_stuck != NULL)
2932 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2933 lwpid_of (thread_stuck));
2934 }
2935 }
2936
2937 /* Convenience function that is called when the kernel reports an
2938 event that is not passed out to GDB. */
2939
2940 static ptid_t
2941 ignore_event (struct target_waitstatus *ourstatus)
2942 {
2943 /* If we got an event, there may still be others, as a single
2944 SIGCHLD can indicate more than one child stopped. This forces
2945 another target_wait call. */
2946 async_file_mark ();
2947
2948 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2949 return null_ptid;
2950 }
2951
2952 ptid_t
2953 linux_process_target::filter_exit_event (lwp_info *event_child,
2954 target_waitstatus *ourstatus)
2955 {
2956 client_state &cs = get_client_state ();
2957 struct thread_info *thread = get_lwp_thread (event_child);
2958 ptid_t ptid = ptid_of (thread);
2959
2960 if (!last_thread_of_process_p (pid_of (thread)))
2961 {
2962 if (cs.report_thread_events)
2963 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2964 else
2965 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2966
2967 delete_lwp (event_child);
2968 }
2969 return ptid;
2970 }
2971
2972 /* Returns 1 if GDB is interested in any event_child syscalls. */
2973
2974 static int
2975 gdb_catching_syscalls_p (struct lwp_info *event_child)
2976 {
2977 struct thread_info *thread = get_lwp_thread (event_child);
2978 struct process_info *proc = get_thread_process (thread);
2979
2980 return !proc->syscalls_to_catch.empty ();
2981 }
2982
2983 /* Returns 1 if GDB is interested in the event_child syscall.
2984 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2985
2986 static int
2987 gdb_catch_this_syscall_p (struct lwp_info *event_child)
2988 {
2989 int sysno;
2990 struct thread_info *thread = get_lwp_thread (event_child);
2991 struct process_info *proc = get_thread_process (thread);
2992
2993 if (proc->syscalls_to_catch.empty ())
2994 return 0;
2995
2996 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2997 return 1;
2998
2999 get_syscall_trapinfo (event_child, &sysno);
3000
3001 for (int iter : proc->syscalls_to_catch)
3002 if (iter == sysno)
3003 return 1;
3004
3005 return 0;
3006 }
3007
3008 ptid_t
3009 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3010 int target_options)
3011 {
3012 client_state &cs = get_client_state ();
3013 int w;
3014 struct lwp_info *event_child;
3015 int options;
3016 int pid;
3017 int step_over_finished;
3018 int bp_explains_trap;
3019 int maybe_internal_trap;
3020 int report_to_gdb;
3021 int trace_event;
3022 int in_step_range;
3023 int any_resumed;
3024
3025 if (debug_threads)
3026 {
3027 debug_enter ();
3028 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3029 }
3030
3031 /* Translate generic target options into linux options. */
3032 options = __WALL;
3033 if (target_options & TARGET_WNOHANG)
3034 options |= WNOHANG;
3035
3036 bp_explains_trap = 0;
3037 trace_event = 0;
3038 in_step_range = 0;
3039 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3040
3041 auto status_pending_p_any = [&] (thread_info *thread)
3042 {
3043 return status_pending_p_callback (thread, minus_one_ptid);
3044 };
3045
3046 auto not_stopped = [&] (thread_info *thread)
3047 {
3048 return not_stopped_callback (thread, minus_one_ptid);
3049 };
3050
3051 /* Find a resumed LWP, if any. */
3052 if (find_thread (status_pending_p_any) != NULL)
3053 any_resumed = 1;
3054 else if (find_thread (not_stopped) != NULL)
3055 any_resumed = 1;
3056 else
3057 any_resumed = 0;
3058
3059 if (step_over_bkpt == null_ptid)
3060 pid = wait_for_event (ptid, &w, options);
3061 else
3062 {
3063 if (debug_threads)
3064 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3065 target_pid_to_str (step_over_bkpt));
3066 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3067 }
3068
3069 if (pid == 0 || (pid == -1 && !any_resumed))
3070 {
3071 gdb_assert (target_options & TARGET_WNOHANG);
3072
3073 if (debug_threads)
3074 {
3075 debug_printf ("wait_1 ret = null_ptid, "
3076 "TARGET_WAITKIND_IGNORE\n");
3077 debug_exit ();
3078 }
3079
3080 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3081 return null_ptid;
3082 }
3083 else if (pid == -1)
3084 {
3085 if (debug_threads)
3086 {
3087 debug_printf ("wait_1 ret = null_ptid, "
3088 "TARGET_WAITKIND_NO_RESUMED\n");
3089 debug_exit ();
3090 }
3091
3092 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3093 return null_ptid;
3094 }
3095
3096 event_child = get_thread_lwp (current_thread);
3097
3098 /* wait_for_event only returns an exit status for the last
3099 child of a process. Report it. */
3100 if (WIFEXITED (w) || WIFSIGNALED (w))
3101 {
3102 if (WIFEXITED (w))
3103 {
3104 ourstatus->kind = TARGET_WAITKIND_EXITED;
3105 ourstatus->value.integer = WEXITSTATUS (w);
3106
3107 if (debug_threads)
3108 {
3109 debug_printf ("wait_1 ret = %s, exited with "
3110 "retcode %d\n",
3111 target_pid_to_str (ptid_of (current_thread)),
3112 WEXITSTATUS (w));
3113 debug_exit ();
3114 }
3115 }
3116 else
3117 {
3118 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3119 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3120
3121 if (debug_threads)
3122 {
3123 debug_printf ("wait_1 ret = %s, terminated with "
3124 "signal %d\n",
3125 target_pid_to_str (ptid_of (current_thread)),
3126 WTERMSIG (w));
3127 debug_exit ();
3128 }
3129 }
3130
3131 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3132 return filter_exit_event (event_child, ourstatus);
3133
3134 return ptid_of (current_thread);
3135 }
3136
3137 /* If step-over executes a breakpoint instruction, in the case of a
3138 hardware single step it means a gdb/gdbserver breakpoint had been
3139 planted on top of a permanent breakpoint, in the case of a software
3140 single step it may just mean that gdbserver hit the reinsert breakpoint.
3141 The PC has been adjusted by save_stop_reason to point at
3142 the breakpoint address.
3143 So in the case of the hardware single step advance the PC manually
3144 past the breakpoint and in the case of software single step advance only
3145 if it's not the single_step_breakpoint we are hitting.
3146 This avoids that a program would keep trapping a permanent breakpoint
3147 forever. */
3148 if (step_over_bkpt != null_ptid
3149 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3150 && (event_child->stepping
3151 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3152 {
3153 int increment_pc = 0;
3154 int breakpoint_kind = 0;
3155 CORE_ADDR stop_pc = event_child->stop_pc;
3156
3157 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3158 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3159
3160 if (debug_threads)
3161 {
3162 debug_printf ("step-over for %s executed software breakpoint\n",
3163 target_pid_to_str (ptid_of (current_thread)));
3164 }
3165
3166 if (increment_pc != 0)
3167 {
3168 struct regcache *regcache
3169 = get_thread_regcache (current_thread, 1);
3170
3171 event_child->stop_pc += increment_pc;
3172 low_set_pc (regcache, event_child->stop_pc);
3173
3174 if (!low_breakpoint_at (event_child->stop_pc))
3175 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3176 }
3177 }
3178
3179 /* If this event was not handled before, and is not a SIGTRAP, we
3180 report it. SIGILL and SIGSEGV are also treated as traps in case
3181 a breakpoint is inserted at the current PC. If this target does
3182 not support internal breakpoints at all, we also report the
3183 SIGTRAP without further processing; it's of no concern to us. */
3184 maybe_internal_trap
3185 = (low_supports_breakpoints ()
3186 && (WSTOPSIG (w) == SIGTRAP
3187 || ((WSTOPSIG (w) == SIGILL
3188 || WSTOPSIG (w) == SIGSEGV)
3189 && low_breakpoint_at (event_child->stop_pc))));
3190
3191 if (maybe_internal_trap)
3192 {
3193 /* Handle anything that requires bookkeeping before deciding to
3194 report the event or continue waiting. */
3195
3196 /* First check if we can explain the SIGTRAP with an internal
3197 breakpoint, or if we should possibly report the event to GDB.
3198 Do this before anything that may remove or insert a
3199 breakpoint. */
3200 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3201
3202 /* We have a SIGTRAP, possibly a step-over dance has just
3203 finished. If so, tweak the state machine accordingly,
3204 reinsert breakpoints and delete any single-step
3205 breakpoints. */
3206 step_over_finished = finish_step_over (event_child);
3207
3208 /* Now invoke the callbacks of any internal breakpoints there. */
3209 check_breakpoints (event_child->stop_pc);
3210
3211 /* Handle tracepoint data collecting. This may overflow the
3212 trace buffer, and cause a tracing stop, removing
3213 breakpoints. */
3214 trace_event = handle_tracepoints (event_child);
3215
3216 if (bp_explains_trap)
3217 {
3218 if (debug_threads)
3219 debug_printf ("Hit a gdbserver breakpoint.\n");
3220 }
3221 }
3222 else
3223 {
3224 /* We have some other signal, possibly a step-over dance was in
3225 progress, and it should be cancelled too. */
3226 step_over_finished = finish_step_over (event_child);
3227 }
3228
3229 /* We have all the data we need. Either report the event to GDB, or
3230 resume threads and keep waiting for more. */
3231
3232 /* If we're collecting a fast tracepoint, finish the collection and
3233 move out of the jump pad before delivering a signal. See
3234 linux_stabilize_threads. */
3235
3236 if (WIFSTOPPED (w)
3237 && WSTOPSIG (w) != SIGTRAP
3238 && supports_fast_tracepoints ()
3239 && agent_loaded_p ())
3240 {
3241 if (debug_threads)
3242 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3243 "to defer or adjust it.\n",
3244 WSTOPSIG (w), lwpid_of (current_thread));
3245
3246 /* Allow debugging the jump pad itself. */
3247 if (current_thread->last_resume_kind != resume_step
3248 && maybe_move_out_of_jump_pad (event_child, &w))
3249 {
3250 enqueue_one_deferred_signal (event_child, &w);
3251
3252 if (debug_threads)
3253 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3254 WSTOPSIG (w), lwpid_of (current_thread));
3255
3256 resume_one_lwp (event_child, 0, 0, NULL);
3257
3258 if (debug_threads)
3259 debug_exit ();
3260 return ignore_event (ourstatus);
3261 }
3262 }
3263
3264 if (event_child->collecting_fast_tracepoint
3265 != fast_tpoint_collect_result::not_collecting)
3266 {
3267 if (debug_threads)
3268 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3269 "Check if we're already there.\n",
3270 lwpid_of (current_thread),
3271 (int) event_child->collecting_fast_tracepoint);
3272
3273 trace_event = 1;
3274
3275 event_child->collecting_fast_tracepoint
3276 = linux_fast_tracepoint_collecting (event_child, NULL);
3277
3278 if (event_child->collecting_fast_tracepoint
3279 != fast_tpoint_collect_result::before_insn)
3280 {
3281 /* No longer need this breakpoint. */
3282 if (event_child->exit_jump_pad_bkpt != NULL)
3283 {
3284 if (debug_threads)
3285 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3286 "stopping all threads momentarily.\n");
3287
3288 /* Other running threads could hit this breakpoint.
3289 We don't handle moribund locations like GDB does,
3290 instead we always pause all threads when removing
3291 breakpoints, so that any step-over or
3292 decr_pc_after_break adjustment is always taken
3293 care of while the breakpoint is still
3294 inserted. */
3295 stop_all_lwps (1, event_child);
3296
3297 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3298 event_child->exit_jump_pad_bkpt = NULL;
3299
3300 unstop_all_lwps (1, event_child);
3301
3302 gdb_assert (event_child->suspended >= 0);
3303 }
3304 }
3305
3306 if (event_child->collecting_fast_tracepoint
3307 == fast_tpoint_collect_result::not_collecting)
3308 {
3309 if (debug_threads)
3310 debug_printf ("fast tracepoint finished "
3311 "collecting successfully.\n");
3312
3313 /* We may have a deferred signal to report. */
3314 if (dequeue_one_deferred_signal (event_child, &w))
3315 {
3316 if (debug_threads)
3317 debug_printf ("dequeued one signal.\n");
3318 }
3319 else
3320 {
3321 if (debug_threads)
3322 debug_printf ("no deferred signals.\n");
3323
3324 if (stabilizing_threads)
3325 {
3326 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3327 ourstatus->value.sig = GDB_SIGNAL_0;
3328
3329 if (debug_threads)
3330 {
3331 debug_printf ("wait_1 ret = %s, stopped "
3332 "while stabilizing threads\n",
3333 target_pid_to_str (ptid_of (current_thread)));
3334 debug_exit ();
3335 }
3336
3337 return ptid_of (current_thread);
3338 }
3339 }
3340 }
3341 }
3342
3343 /* Check whether GDB would be interested in this event. */
3344
3345 /* Check if GDB is interested in this syscall. */
3346 if (WIFSTOPPED (w)
3347 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3348 && !gdb_catch_this_syscall_p (event_child))
3349 {
3350 if (debug_threads)
3351 {
3352 debug_printf ("Ignored syscall for LWP %ld.\n",
3353 lwpid_of (current_thread));
3354 }
3355
3356 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3357
3358 if (debug_threads)
3359 debug_exit ();
3360 return ignore_event (ourstatus);
3361 }
3362
3363 /* If GDB is not interested in this signal, don't stop other
3364 threads, and don't report it to GDB. Just resume the inferior
3365 right away. We do this for threading-related signals as well as
3366 any that GDB specifically requested we ignore. But never ignore
3367 SIGSTOP if we sent it ourselves, and do not ignore signals when
3368 stepping - they may require special handling to skip the signal
3369 handler. Also never ignore signals that could be caused by a
3370 breakpoint. */
3371 if (WIFSTOPPED (w)
3372 && current_thread->last_resume_kind != resume_step
3373 && (
3374 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3375 (current_process ()->priv->thread_db != NULL
3376 && (WSTOPSIG (w) == __SIGRTMIN
3377 || WSTOPSIG (w) == __SIGRTMIN + 1))
3378 ||
3379 #endif
3380 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3381 && !(WSTOPSIG (w) == SIGSTOP
3382 && current_thread->last_resume_kind == resume_stop)
3383 && !linux_wstatus_maybe_breakpoint (w))))
3384 {
3385 siginfo_t info, *info_p;
3386
3387 if (debug_threads)
3388 debug_printf ("Ignored signal %d for LWP %ld.\n",
3389 WSTOPSIG (w), lwpid_of (current_thread));
3390
3391 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3392 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3393 info_p = &info;
3394 else
3395 info_p = NULL;
3396
3397 if (step_over_finished)
3398 {
3399 /* We cancelled this thread's step-over above. We still
3400 need to unsuspend all other LWPs, and set them back
3401 running again while the signal handler runs. */
3402 unsuspend_all_lwps (event_child);
3403
3404 /* Enqueue the pending signal info so that proceed_all_lwps
3405 doesn't lose it. */
3406 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3407
3408 proceed_all_lwps ();
3409 }
3410 else
3411 {
3412 resume_one_lwp (event_child, event_child->stepping,
3413 WSTOPSIG (w), info_p);
3414 }
3415
3416 if (debug_threads)
3417 debug_exit ();
3418
3419 return ignore_event (ourstatus);
3420 }
3421
3422 /* Note that all addresses are always "out of the step range" when
3423 there's no range to begin with. */
3424 in_step_range = lwp_in_step_range (event_child);
3425
3426 /* If GDB wanted this thread to single step, and the thread is out
3427 of the step range, we always want to report the SIGTRAP, and let
3428 GDB handle it. Watchpoints should always be reported. So should
3429 signals we can't explain. A SIGTRAP we can't explain could be a
3430 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3431 do, we're be able to handle GDB breakpoints on top of internal
3432 breakpoints, by handling the internal breakpoint and still
3433 reporting the event to GDB. If we don't, we're out of luck, GDB
3434 won't see the breakpoint hit. If we see a single-step event but
3435 the thread should be continuing, don't pass the trap to gdb.
3436 That indicates that we had previously finished a single-step but
3437 left the single-step pending -- see
3438 complete_ongoing_step_over. */
3439 report_to_gdb = (!maybe_internal_trap
3440 || (current_thread->last_resume_kind == resume_step
3441 && !in_step_range)
3442 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3443 || (!in_step_range
3444 && !bp_explains_trap
3445 && !trace_event
3446 && !step_over_finished
3447 && !(current_thread->last_resume_kind == resume_continue
3448 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3449 || (gdb_breakpoint_here (event_child->stop_pc)
3450 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3451 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3452 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3453
3454 run_breakpoint_commands (event_child->stop_pc);
3455
3456 /* We found no reason GDB would want us to stop. We either hit one
3457 of our own breakpoints, or finished an internal step GDB
3458 shouldn't know about. */
3459 if (!report_to_gdb)
3460 {
3461 if (debug_threads)
3462 {
3463 if (bp_explains_trap)
3464 debug_printf ("Hit a gdbserver breakpoint.\n");
3465 if (step_over_finished)
3466 debug_printf ("Step-over finished.\n");
3467 if (trace_event)
3468 debug_printf ("Tracepoint event.\n");
3469 if (lwp_in_step_range (event_child))
3470 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3471 paddress (event_child->stop_pc),
3472 paddress (event_child->step_range_start),
3473 paddress (event_child->step_range_end));
3474 }
3475
3476 /* We're not reporting this breakpoint to GDB, so apply the
3477 decr_pc_after_break adjustment to the inferior's regcache
3478 ourselves. */
3479
3480 if (low_supports_breakpoints ())
3481 {
3482 struct regcache *regcache
3483 = get_thread_regcache (current_thread, 1);
3484 low_set_pc (regcache, event_child->stop_pc);
3485 }
3486
3487 if (step_over_finished)
3488 {
3489 /* If we have finished stepping over a breakpoint, we've
3490 stopped and suspended all LWPs momentarily except the
3491 stepping one. This is where we resume them all again.
3492 We're going to keep waiting, so use proceed, which
3493 handles stepping over the next breakpoint. */
3494 unsuspend_all_lwps (event_child);
3495 }
3496 else
3497 {
3498 /* Remove the single-step breakpoints if any. Note that
3499 there isn't single-step breakpoint if we finished stepping
3500 over. */
3501 if (supports_software_single_step ()
3502 && has_single_step_breakpoints (current_thread))
3503 {
3504 stop_all_lwps (0, event_child);
3505 delete_single_step_breakpoints (current_thread);
3506 unstop_all_lwps (0, event_child);
3507 }
3508 }
3509
3510 if (debug_threads)
3511 debug_printf ("proceeding all threads.\n");
3512 proceed_all_lwps ();
3513
3514 if (debug_threads)
3515 debug_exit ();
3516
3517 return ignore_event (ourstatus);
3518 }
3519
3520 if (debug_threads)
3521 {
3522 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3523 {
3524 std::string str
3525 = target_waitstatus_to_string (&event_child->waitstatus);
3526
3527 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3528 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3529 }
3530 if (current_thread->last_resume_kind == resume_step)
3531 {
3532 if (event_child->step_range_start == event_child->step_range_end)
3533 debug_printf ("GDB wanted to single-step, reporting event.\n");
3534 else if (!lwp_in_step_range (event_child))
3535 debug_printf ("Out of step range, reporting event.\n");
3536 }
3537 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3538 debug_printf ("Stopped by watchpoint.\n");
3539 else if (gdb_breakpoint_here (event_child->stop_pc))
3540 debug_printf ("Stopped by GDB breakpoint.\n");
3541 if (debug_threads)
3542 debug_printf ("Hit a non-gdbserver trap event.\n");
3543 }
3544
3545 /* Alright, we're going to report a stop. */
3546
3547 /* Remove single-step breakpoints. */
3548 if (supports_software_single_step ())
3549 {
3550 /* Remove single-step breakpoints or not. It it is true, stop all
3551 lwps, so that other threads won't hit the breakpoint in the
3552 staled memory. */
3553 int remove_single_step_breakpoints_p = 0;
3554
3555 if (non_stop)
3556 {
3557 remove_single_step_breakpoints_p
3558 = has_single_step_breakpoints (current_thread);
3559 }
3560 else
3561 {
3562 /* In all-stop, a stop reply cancels all previous resume
3563 requests. Delete all single-step breakpoints. */
3564
3565 find_thread ([&] (thread_info *thread) {
3566 if (has_single_step_breakpoints (thread))
3567 {
3568 remove_single_step_breakpoints_p = 1;
3569 return true;
3570 }
3571
3572 return false;
3573 });
3574 }
3575
3576 if (remove_single_step_breakpoints_p)
3577 {
3578 /* If we remove single-step breakpoints from memory, stop all lwps,
3579 so that other threads won't hit the breakpoint in the staled
3580 memory. */
3581 stop_all_lwps (0, event_child);
3582
3583 if (non_stop)
3584 {
3585 gdb_assert (has_single_step_breakpoints (current_thread));
3586 delete_single_step_breakpoints (current_thread);
3587 }
3588 else
3589 {
3590 for_each_thread ([] (thread_info *thread){
3591 if (has_single_step_breakpoints (thread))
3592 delete_single_step_breakpoints (thread);
3593 });
3594 }
3595
3596 unstop_all_lwps (0, event_child);
3597 }
3598 }
3599
3600 if (!stabilizing_threads)
3601 {
3602 /* In all-stop, stop all threads. */
3603 if (!non_stop)
3604 stop_all_lwps (0, NULL);
3605
3606 if (step_over_finished)
3607 {
3608 if (!non_stop)
3609 {
3610 /* If we were doing a step-over, all other threads but
3611 the stepping one had been paused in start_step_over,
3612 with their suspend counts incremented. We don't want
3613 to do a full unstop/unpause, because we're in
3614 all-stop mode (so we want threads stopped), but we
3615 still need to unsuspend the other threads, to
3616 decrement their `suspended' count back. */
3617 unsuspend_all_lwps (event_child);
3618 }
3619 else
3620 {
3621 /* If we just finished a step-over, then all threads had
3622 been momentarily paused. In all-stop, that's fine,
3623 we want threads stopped by now anyway. In non-stop,
3624 we need to re-resume threads that GDB wanted to be
3625 running. */
3626 unstop_all_lwps (1, event_child);
3627 }
3628 }
3629
3630 /* If we're not waiting for a specific LWP, choose an event LWP
3631 from among those that have had events. Giving equal priority
3632 to all LWPs that have had events helps prevent
3633 starvation. */
3634 if (ptid == minus_one_ptid)
3635 {
3636 event_child->status_pending_p = 1;
3637 event_child->status_pending = w;
3638
3639 select_event_lwp (&event_child);
3640
3641 /* current_thread and event_child must stay in sync. */
3642 current_thread = get_lwp_thread (event_child);
3643
3644 event_child->status_pending_p = 0;
3645 w = event_child->status_pending;
3646 }
3647
3648
3649 /* Stabilize threads (move out of jump pads). */
3650 if (!non_stop)
3651 target_stabilize_threads ();
3652 }
3653 else
3654 {
3655 /* If we just finished a step-over, then all threads had been
3656 momentarily paused. In all-stop, that's fine, we want
3657 threads stopped by now anyway. In non-stop, we need to
3658 re-resume threads that GDB wanted to be running. */
3659 if (step_over_finished)
3660 unstop_all_lwps (1, event_child);
3661 }
3662
3663 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3664 {
3665 /* If the reported event is an exit, fork, vfork or exec, let
3666 GDB know. */
3667
3668 /* Break the unreported fork relationship chain. */
3669 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3670 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3671 {
3672 event_child->fork_relative->fork_relative = NULL;
3673 event_child->fork_relative = NULL;
3674 }
3675
3676 *ourstatus = event_child->waitstatus;
3677 /* Clear the event lwp's waitstatus since we handled it already. */
3678 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3679 }
3680 else
3681 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3682
3683 /* Now that we've selected our final event LWP, un-adjust its PC if
3684 it was a software breakpoint, and the client doesn't know we can
3685 adjust the breakpoint ourselves. */
3686 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3687 && !cs.swbreak_feature)
3688 {
3689 int decr_pc = low_decr_pc_after_break ();
3690
3691 if (decr_pc != 0)
3692 {
3693 struct regcache *regcache
3694 = get_thread_regcache (current_thread, 1);
3695 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3696 }
3697 }
3698
3699 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3700 {
3701 get_syscall_trapinfo (event_child,
3702 &ourstatus->value.syscall_number);
3703 ourstatus->kind = event_child->syscall_state;
3704 }
3705 else if (current_thread->last_resume_kind == resume_stop
3706 && WSTOPSIG (w) == SIGSTOP)
3707 {
3708 /* A thread that has been requested to stop by GDB with vCont;t,
3709 and it stopped cleanly, so report as SIG0. The use of
3710 SIGSTOP is an implementation detail. */
3711 ourstatus->value.sig = GDB_SIGNAL_0;
3712 }
3713 else if (current_thread->last_resume_kind == resume_stop
3714 && WSTOPSIG (w) != SIGSTOP)
3715 {
3716 /* A thread that has been requested to stop by GDB with vCont;t,
3717 but, it stopped for other reasons. */
3718 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3719 }
3720 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3721 {
3722 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3723 }
3724
3725 gdb_assert (step_over_bkpt == null_ptid);
3726
3727 if (debug_threads)
3728 {
3729 debug_printf ("wait_1 ret = %s, %d, %d\n",
3730 target_pid_to_str (ptid_of (current_thread)),
3731 ourstatus->kind, ourstatus->value.sig);
3732 debug_exit ();
3733 }
3734
3735 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3736 return filter_exit_event (event_child, ourstatus);
3737
3738 return ptid_of (current_thread);
3739 }
3740
3741 /* Get rid of any pending event in the pipe. */
3742 static void
3743 async_file_flush (void)
3744 {
3745 int ret;
3746 char buf;
3747
3748 do
3749 ret = read (linux_event_pipe[0], &buf, 1);
3750 while (ret >= 0 || (ret == -1 && errno == EINTR));
3751 }
3752
3753 /* Put something in the pipe, so the event loop wakes up. */
3754 static void
3755 async_file_mark (void)
3756 {
3757 int ret;
3758
3759 async_file_flush ();
3760
3761 do
3762 ret = write (linux_event_pipe[1], "+", 1);
3763 while (ret == 0 || (ret == -1 && errno == EINTR));
3764
3765 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3766 be awakened anyway. */
3767 }
3768
3769 ptid_t
3770 linux_process_target::wait (ptid_t ptid,
3771 target_waitstatus *ourstatus,
3772 int target_options)
3773 {
3774 ptid_t event_ptid;
3775
3776 /* Flush the async file first. */
3777 if (target_is_async_p ())
3778 async_file_flush ();
3779
3780 do
3781 {
3782 event_ptid = wait_1 (ptid, ourstatus, target_options);
3783 }
3784 while ((target_options & TARGET_WNOHANG) == 0
3785 && event_ptid == null_ptid
3786 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3787
3788 /* If at least one stop was reported, there may be more. A single
3789 SIGCHLD can signal more than one child stop. */
3790 if (target_is_async_p ()
3791 && (target_options & TARGET_WNOHANG) != 0
3792 && event_ptid != null_ptid)
3793 async_file_mark ();
3794
3795 return event_ptid;
3796 }
3797
3798 /* Send a signal to an LWP. */
3799
3800 static int
3801 kill_lwp (unsigned long lwpid, int signo)
3802 {
3803 int ret;
3804
3805 errno = 0;
3806 ret = syscall (__NR_tkill, lwpid, signo);
3807 if (errno == ENOSYS)
3808 {
3809 /* If tkill fails, then we are not using nptl threads, a
3810 configuration we no longer support. */
3811 perror_with_name (("tkill"));
3812 }
3813 return ret;
3814 }
3815
3816 void
3817 linux_stop_lwp (struct lwp_info *lwp)
3818 {
3819 send_sigstop (lwp);
3820 }
3821
3822 static void
3823 send_sigstop (struct lwp_info *lwp)
3824 {
3825 int pid;
3826
3827 pid = lwpid_of (get_lwp_thread (lwp));
3828
3829 /* If we already have a pending stop signal for this process, don't
3830 send another. */
3831 if (lwp->stop_expected)
3832 {
3833 if (debug_threads)
3834 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3835
3836 return;
3837 }
3838
3839 if (debug_threads)
3840 debug_printf ("Sending sigstop to lwp %d\n", pid);
3841
3842 lwp->stop_expected = 1;
3843 kill_lwp (pid, SIGSTOP);
3844 }
3845
3846 static void
3847 send_sigstop (thread_info *thread, lwp_info *except)
3848 {
3849 struct lwp_info *lwp = get_thread_lwp (thread);
3850
3851 /* Ignore EXCEPT. */
3852 if (lwp == except)
3853 return;
3854
3855 if (lwp->stopped)
3856 return;
3857
3858 send_sigstop (lwp);
3859 }
3860
3861 /* Increment the suspend count of an LWP, and stop it, if not stopped
3862 yet. */
3863 static void
3864 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3865 {
3866 struct lwp_info *lwp = get_thread_lwp (thread);
3867
3868 /* Ignore EXCEPT. */
3869 if (lwp == except)
3870 return;
3871
3872 lwp_suspended_inc (lwp);
3873
3874 send_sigstop (thread, except);
3875 }
3876
3877 static void
3878 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3879 {
3880 /* Store the exit status for later. */
3881 lwp->status_pending_p = 1;
3882 lwp->status_pending = wstat;
3883
3884 /* Store in waitstatus as well, as there's nothing else to process
3885 for this event. */
3886 if (WIFEXITED (wstat))
3887 {
3888 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3889 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3890 }
3891 else if (WIFSIGNALED (wstat))
3892 {
3893 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3894 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3895 }
3896
3897 /* Prevent trying to stop it. */
3898 lwp->stopped = 1;
3899
3900 /* No further stops are expected from a dead lwp. */
3901 lwp->stop_expected = 0;
3902 }
3903
3904 /* Return true if LWP has exited already, and has a pending exit event
3905 to report to GDB. */
3906
3907 static int
3908 lwp_is_marked_dead (struct lwp_info *lwp)
3909 {
3910 return (lwp->status_pending_p
3911 && (WIFEXITED (lwp->status_pending)
3912 || WIFSIGNALED (lwp->status_pending)));
3913 }
3914
3915 void
3916 linux_process_target::wait_for_sigstop ()
3917 {
3918 struct thread_info *saved_thread;
3919 ptid_t saved_tid;
3920 int wstat;
3921 int ret;
3922
3923 saved_thread = current_thread;
3924 if (saved_thread != NULL)
3925 saved_tid = saved_thread->id;
3926 else
3927 saved_tid = null_ptid; /* avoid bogus unused warning */
3928
3929 if (debug_threads)
3930 debug_printf ("wait_for_sigstop: pulling events\n");
3931
3932 /* Passing NULL_PTID as filter indicates we want all events to be
3933 left pending. Eventually this returns when there are no
3934 unwaited-for children left. */
3935 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3936 gdb_assert (ret == -1);
3937
3938 if (saved_thread == NULL || mythread_alive (saved_tid))
3939 current_thread = saved_thread;
3940 else
3941 {
3942 if (debug_threads)
3943 debug_printf ("Previously current thread died.\n");
3944
3945 /* We can't change the current inferior behind GDB's back,
3946 otherwise, a subsequent command may apply to the wrong
3947 process. */
3948 current_thread = NULL;
3949 }
3950 }
3951
3952 /* Returns true if THREAD is stopped in a jump pad, and we can't
3953 move it out, because we need to report the stop event to GDB. For
3954 example, if the user puts a breakpoint in the jump pad, it's
3955 because she wants to debug it. */
3956
3957 static bool
3958 stuck_in_jump_pad_callback (thread_info *thread)
3959 {
3960 struct lwp_info *lwp = get_thread_lwp (thread);
3961
3962 if (lwp->suspended != 0)
3963 {
3964 internal_error (__FILE__, __LINE__,
3965 "LWP %ld is suspended, suspended=%d\n",
3966 lwpid_of (thread), lwp->suspended);
3967 }
3968 gdb_assert (lwp->stopped);
3969
3970 /* Allow debugging the jump pad, gdb_collect, etc.. */
3971 return (supports_fast_tracepoints ()
3972 && agent_loaded_p ()
3973 && (gdb_breakpoint_here (lwp->stop_pc)
3974 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3975 || thread->last_resume_kind == resume_step)
3976 && (linux_fast_tracepoint_collecting (lwp, NULL)
3977 != fast_tpoint_collect_result::not_collecting));
3978 }
3979
3980 void
3981 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3982 {
3983 struct thread_info *saved_thread;
3984 struct lwp_info *lwp = get_thread_lwp (thread);
3985 int *wstat;
3986
3987 if (lwp->suspended != 0)
3988 {
3989 internal_error (__FILE__, __LINE__,
3990 "LWP %ld is suspended, suspended=%d\n",
3991 lwpid_of (thread), lwp->suspended);
3992 }
3993 gdb_assert (lwp->stopped);
3994
3995 /* For gdb_breakpoint_here. */
3996 saved_thread = current_thread;
3997 current_thread = thread;
3998
3999 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4000
4001 /* Allow debugging the jump pad, gdb_collect, etc. */
4002 if (!gdb_breakpoint_here (lwp->stop_pc)
4003 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4004 && thread->last_resume_kind != resume_step
4005 && maybe_move_out_of_jump_pad (lwp, wstat))
4006 {
4007 if (debug_threads)
4008 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4009 lwpid_of (thread));
4010
4011 if (wstat)
4012 {
4013 lwp->status_pending_p = 0;
4014 enqueue_one_deferred_signal (lwp, wstat);
4015
4016 if (debug_threads)
4017 debug_printf ("Signal %d for LWP %ld deferred "
4018 "(in jump pad)\n",
4019 WSTOPSIG (*wstat), lwpid_of (thread));
4020 }
4021
4022 resume_one_lwp (lwp, 0, 0, NULL);
4023 }
4024 else
4025 lwp_suspended_inc (lwp);
4026
4027 current_thread = saved_thread;
4028 }
4029
4030 static bool
4031 lwp_running (thread_info *thread)
4032 {
4033 struct lwp_info *lwp = get_thread_lwp (thread);
4034
4035 if (lwp_is_marked_dead (lwp))
4036 return false;
4037
4038 return !lwp->stopped;
4039 }
4040
4041 void
4042 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4043 {
4044 /* Should not be called recursively. */
4045 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4046
4047 if (debug_threads)
4048 {
4049 debug_enter ();
4050 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4051 suspend ? "stop-and-suspend" : "stop",
4052 except != NULL
4053 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4054 : "none");
4055 }
4056
4057 stopping_threads = (suspend
4058 ? STOPPING_AND_SUSPENDING_THREADS
4059 : STOPPING_THREADS);
4060
4061 if (suspend)
4062 for_each_thread ([&] (thread_info *thread)
4063 {
4064 suspend_and_send_sigstop (thread, except);
4065 });
4066 else
4067 for_each_thread ([&] (thread_info *thread)
4068 {
4069 send_sigstop (thread, except);
4070 });
4071
4072 wait_for_sigstop ();
4073 stopping_threads = NOT_STOPPING_THREADS;
4074
4075 if (debug_threads)
4076 {
4077 debug_printf ("stop_all_lwps done, setting stopping_threads "
4078 "back to !stopping\n");
4079 debug_exit ();
4080 }
4081 }
4082
4083 /* Enqueue one signal in the chain of signals which need to be
4084 delivered to this process on next resume. */
4085
4086 static void
4087 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4088 {
4089 struct pending_signals *p_sig = XNEW (struct pending_signals);
4090
4091 p_sig->prev = lwp->pending_signals;
4092 p_sig->signal = signal;
4093 if (info == NULL)
4094 memset (&p_sig->info, 0, sizeof (siginfo_t));
4095 else
4096 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4097 lwp->pending_signals = p_sig;
4098 }
4099
4100 void
4101 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4102 {
4103 struct thread_info *thread = get_lwp_thread (lwp);
4104 struct regcache *regcache = get_thread_regcache (thread, 1);
4105
4106 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4107
4108 current_thread = thread;
4109 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4110
4111 for (CORE_ADDR pc : next_pcs)
4112 set_single_step_breakpoint (pc, current_ptid);
4113 }
4114
4115 int
4116 linux_process_target::single_step (lwp_info* lwp)
4117 {
4118 int step = 0;
4119
4120 if (can_hardware_single_step ())
4121 {
4122 step = 1;
4123 }
4124 else if (supports_software_single_step ())
4125 {
4126 install_software_single_step_breakpoints (lwp);
4127 step = 0;
4128 }
4129 else
4130 {
4131 if (debug_threads)
4132 debug_printf ("stepping is not implemented on this target");
4133 }
4134
4135 return step;
4136 }
4137
4138 /* The signal can be delivered to the inferior if we are not trying to
4139 finish a fast tracepoint collect. Since signal can be delivered in
4140 the step-over, the program may go to signal handler and trap again
4141 after return from the signal handler. We can live with the spurious
4142 double traps. */
4143
4144 static int
4145 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4146 {
4147 return (lwp->collecting_fast_tracepoint
4148 == fast_tpoint_collect_result::not_collecting);
4149 }
4150
4151 void
4152 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4153 int signal, siginfo_t *info)
4154 {
4155 struct thread_info *thread = get_lwp_thread (lwp);
4156 struct thread_info *saved_thread;
4157 int ptrace_request;
4158 struct process_info *proc = get_thread_process (thread);
4159
4160 /* Note that target description may not be initialised
4161 (proc->tdesc == NULL) at this point because the program hasn't
4162 stopped at the first instruction yet. It means GDBserver skips
4163 the extra traps from the wrapper program (see option --wrapper).
4164 Code in this function that requires register access should be
4165 guarded by proc->tdesc == NULL or something else. */
4166
4167 if (lwp->stopped == 0)
4168 return;
4169
4170 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4171
4172 fast_tpoint_collect_result fast_tp_collecting
4173 = lwp->collecting_fast_tracepoint;
4174
4175 gdb_assert (!stabilizing_threads
4176 || (fast_tp_collecting
4177 != fast_tpoint_collect_result::not_collecting));
4178
4179 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4180 user used the "jump" command, or "set $pc = foo"). */
4181 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4182 {
4183 /* Collecting 'while-stepping' actions doesn't make sense
4184 anymore. */
4185 release_while_stepping_state_list (thread);
4186 }
4187
4188 /* If we have pending signals or status, and a new signal, enqueue the
4189 signal. Also enqueue the signal if it can't be delivered to the
4190 inferior right now. */
4191 if (signal != 0
4192 && (lwp->status_pending_p
4193 || lwp->pending_signals != NULL
4194 || !lwp_signal_can_be_delivered (lwp)))
4195 {
4196 enqueue_pending_signal (lwp, signal, info);
4197
4198 /* Postpone any pending signal. It was enqueued above. */
4199 signal = 0;
4200 }
4201
4202 if (lwp->status_pending_p)
4203 {
4204 if (debug_threads)
4205 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4206 " has pending status\n",
4207 lwpid_of (thread), step ? "step" : "continue",
4208 lwp->stop_expected ? "expected" : "not expected");
4209 return;
4210 }
4211
4212 saved_thread = current_thread;
4213 current_thread = thread;
4214
4215 /* This bit needs some thinking about. If we get a signal that
4216 we must report while a single-step reinsert is still pending,
4217 we often end up resuming the thread. It might be better to
4218 (ew) allow a stack of pending events; then we could be sure that
4219 the reinsert happened right away and not lose any signals.
4220
4221 Making this stack would also shrink the window in which breakpoints are
4222 uninserted (see comment in linux_wait_for_lwp) but not enough for
4223 complete correctness, so it won't solve that problem. It may be
4224 worthwhile just to solve this one, however. */
4225 if (lwp->bp_reinsert != 0)
4226 {
4227 if (debug_threads)
4228 debug_printf (" pending reinsert at 0x%s\n",
4229 paddress (lwp->bp_reinsert));
4230
4231 if (can_hardware_single_step ())
4232 {
4233 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4234 {
4235 if (step == 0)
4236 warning ("BAD - reinserting but not stepping.");
4237 if (lwp->suspended)
4238 warning ("BAD - reinserting and suspended(%d).",
4239 lwp->suspended);
4240 }
4241 }
4242
4243 step = maybe_hw_step (thread);
4244 }
4245
4246 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4247 {
4248 if (debug_threads)
4249 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4250 " (exit-jump-pad-bkpt)\n",
4251 lwpid_of (thread));
4252 }
4253 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4254 {
4255 if (debug_threads)
4256 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4257 " single-stepping\n",
4258 lwpid_of (thread));
4259
4260 if (can_hardware_single_step ())
4261 step = 1;
4262 else
4263 {
4264 internal_error (__FILE__, __LINE__,
4265 "moving out of jump pad single-stepping"
4266 " not implemented on this target");
4267 }
4268 }
4269
4270 /* If we have while-stepping actions in this thread set it stepping.
4271 If we have a signal to deliver, it may or may not be set to
4272 SIG_IGN, we don't know. Assume so, and allow collecting
4273 while-stepping into a signal handler. A possible smart thing to
4274 do would be to set an internal breakpoint at the signal return
4275 address, continue, and carry on catching this while-stepping
4276 action only when that breakpoint is hit. A future
4277 enhancement. */
4278 if (thread->while_stepping != NULL)
4279 {
4280 if (debug_threads)
4281 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4282 lwpid_of (thread));
4283
4284 step = single_step (lwp);
4285 }
4286
4287 if (proc->tdesc != NULL && low_supports_breakpoints ())
4288 {
4289 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4290
4291 lwp->stop_pc = low_get_pc (regcache);
4292
4293 if (debug_threads)
4294 {
4295 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4296 (long) lwp->stop_pc);
4297 }
4298 }
4299
4300 /* If we have pending signals, consume one if it can be delivered to
4301 the inferior. */
4302 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4303 {
4304 struct pending_signals **p_sig;
4305
4306 p_sig = &lwp->pending_signals;
4307 while ((*p_sig)->prev != NULL)
4308 p_sig = &(*p_sig)->prev;
4309
4310 signal = (*p_sig)->signal;
4311 if ((*p_sig)->info.si_signo != 0)
4312 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4313 &(*p_sig)->info);
4314
4315 free (*p_sig);
4316 *p_sig = NULL;
4317 }
4318
4319 if (debug_threads)
4320 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4321 lwpid_of (thread), step ? "step" : "continue", signal,
4322 lwp->stop_expected ? "expected" : "not expected");
4323
4324 low_prepare_to_resume (lwp);
4325
4326 regcache_invalidate_thread (thread);
4327 errno = 0;
4328 lwp->stepping = step;
4329 if (step)
4330 ptrace_request = PTRACE_SINGLESTEP;
4331 else if (gdb_catching_syscalls_p (lwp))
4332 ptrace_request = PTRACE_SYSCALL;
4333 else
4334 ptrace_request = PTRACE_CONT;
4335 ptrace (ptrace_request,
4336 lwpid_of (thread),
4337 (PTRACE_TYPE_ARG3) 0,
4338 /* Coerce to a uintptr_t first to avoid potential gcc warning
4339 of coercing an 8 byte integer to a 4 byte pointer. */
4340 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4341
4342 current_thread = saved_thread;
4343 if (errno)
4344 perror_with_name ("resuming thread");
4345
4346 /* Successfully resumed. Clear state that no longer makes sense,
4347 and mark the LWP as running. Must not do this before resuming
4348 otherwise if that fails other code will be confused. E.g., we'd
4349 later try to stop the LWP and hang forever waiting for a stop
4350 status. Note that we must not throw after this is cleared,
4351 otherwise handle_zombie_lwp_error would get confused. */
4352 lwp->stopped = 0;
4353 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4354 }
4355
4356 void
4357 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4358 {
4359 /* Nop. */
4360 }
4361
4362 /* Called when we try to resume a stopped LWP and that errors out. If
4363 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4364 or about to become), discard the error, clear any pending status
4365 the LWP may have, and return true (we'll collect the exit status
4366 soon enough). Otherwise, return false. */
4367
4368 static int
4369 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4370 {
4371 struct thread_info *thread = get_lwp_thread (lp);
4372
4373 /* If we get an error after resuming the LWP successfully, we'd
4374 confuse !T state for the LWP being gone. */
4375 gdb_assert (lp->stopped);
4376
4377 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4378 because even if ptrace failed with ESRCH, the tracee may be "not
4379 yet fully dead", but already refusing ptrace requests. In that
4380 case the tracee has 'R (Running)' state for a little bit
4381 (observed in Linux 3.18). See also the note on ESRCH in the
4382 ptrace(2) man page. Instead, check whether the LWP has any state
4383 other than ptrace-stopped. */
4384
4385 /* Don't assume anything if /proc/PID/status can't be read. */
4386 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4387 {
4388 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4389 lp->status_pending_p = 0;
4390 return 1;
4391 }
4392 return 0;
4393 }
4394
4395 void
4396 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4397 siginfo_t *info)
4398 {
4399 try
4400 {
4401 resume_one_lwp_throw (lwp, step, signal, info);
4402 }
4403 catch (const gdb_exception_error &ex)
4404 {
4405 if (!check_ptrace_stopped_lwp_gone (lwp))
4406 throw;
4407 }
4408 }
4409
4410 /* This function is called once per thread via for_each_thread.
4411 We look up which resume request applies to THREAD and mark it with a
4412 pointer to the appropriate resume request.
4413
4414 This algorithm is O(threads * resume elements), but resume elements
4415 is small (and will remain small at least until GDB supports thread
4416 suspension). */
4417
4418 static void
4419 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4420 {
4421 struct lwp_info *lwp = get_thread_lwp (thread);
4422
4423 for (int ndx = 0; ndx < n; ndx++)
4424 {
4425 ptid_t ptid = resume[ndx].thread;
4426 if (ptid == minus_one_ptid
4427 || ptid == thread->id
4428 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4429 of PID'. */
4430 || (ptid.pid () == pid_of (thread)
4431 && (ptid.is_pid ()
4432 || ptid.lwp () == -1)))
4433 {
4434 if (resume[ndx].kind == resume_stop
4435 && thread->last_resume_kind == resume_stop)
4436 {
4437 if (debug_threads)
4438 debug_printf ("already %s LWP %ld at GDB's request\n",
4439 (thread->last_status.kind
4440 == TARGET_WAITKIND_STOPPED)
4441 ? "stopped"
4442 : "stopping",
4443 lwpid_of (thread));
4444
4445 continue;
4446 }
4447
4448 /* Ignore (wildcard) resume requests for already-resumed
4449 threads. */
4450 if (resume[ndx].kind != resume_stop
4451 && thread->last_resume_kind != resume_stop)
4452 {
4453 if (debug_threads)
4454 debug_printf ("already %s LWP %ld at GDB's request\n",
4455 (thread->last_resume_kind
4456 == resume_step)
4457 ? "stepping"
4458 : "continuing",
4459 lwpid_of (thread));
4460 continue;
4461 }
4462
4463 /* Don't let wildcard resumes resume fork children that GDB
4464 does not yet know are new fork children. */
4465 if (lwp->fork_relative != NULL)
4466 {
4467 struct lwp_info *rel = lwp->fork_relative;
4468
4469 if (rel->status_pending_p
4470 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4471 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4472 {
4473 if (debug_threads)
4474 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4475 lwpid_of (thread));
4476 continue;
4477 }
4478 }
4479
4480 /* If the thread has a pending event that has already been
4481 reported to GDBserver core, but GDB has not pulled the
4482 event out of the vStopped queue yet, likewise, ignore the
4483 (wildcard) resume request. */
4484 if (in_queued_stop_replies (thread->id))
4485 {
4486 if (debug_threads)
4487 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4488 lwpid_of (thread));
4489 continue;
4490 }
4491
4492 lwp->resume = &resume[ndx];
4493 thread->last_resume_kind = lwp->resume->kind;
4494
4495 lwp->step_range_start = lwp->resume->step_range_start;
4496 lwp->step_range_end = lwp->resume->step_range_end;
4497
4498 /* If we had a deferred signal to report, dequeue one now.
4499 This can happen if LWP gets more than one signal while
4500 trying to get out of a jump pad. */
4501 if (lwp->stopped
4502 && !lwp->status_pending_p
4503 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4504 {
4505 lwp->status_pending_p = 1;
4506
4507 if (debug_threads)
4508 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4509 "leaving status pending.\n",
4510 WSTOPSIG (lwp->status_pending),
4511 lwpid_of (thread));
4512 }
4513
4514 return;
4515 }
4516 }
4517
4518 /* No resume action for this thread. */
4519 lwp->resume = NULL;
4520 }
4521
4522 bool
4523 linux_process_target::resume_status_pending (thread_info *thread)
4524 {
4525 struct lwp_info *lwp = get_thread_lwp (thread);
4526
4527 /* LWPs which will not be resumed are not interesting, because
4528 we might not wait for them next time through linux_wait. */
4529 if (lwp->resume == NULL)
4530 return false;
4531
4532 return thread_still_has_status_pending (thread);
4533 }
4534
4535 bool
4536 linux_process_target::thread_needs_step_over (thread_info *thread)
4537 {
4538 struct lwp_info *lwp = get_thread_lwp (thread);
4539 struct thread_info *saved_thread;
4540 CORE_ADDR pc;
4541 struct process_info *proc = get_thread_process (thread);
4542
4543 /* GDBserver is skipping the extra traps from the wrapper program,
4544 don't have to do step over. */
4545 if (proc->tdesc == NULL)
4546 return false;
4547
4548 /* LWPs which will not be resumed are not interesting, because we
4549 might not wait for them next time through linux_wait. */
4550
4551 if (!lwp->stopped)
4552 {
4553 if (debug_threads)
4554 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4555 lwpid_of (thread));
4556 return false;
4557 }
4558
4559 if (thread->last_resume_kind == resume_stop)
4560 {
4561 if (debug_threads)
4562 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4563 " stopped\n",
4564 lwpid_of (thread));
4565 return false;
4566 }
4567
4568 gdb_assert (lwp->suspended >= 0);
4569
4570 if (lwp->suspended)
4571 {
4572 if (debug_threads)
4573 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4574 lwpid_of (thread));
4575 return false;
4576 }
4577
4578 if (lwp->status_pending_p)
4579 {
4580 if (debug_threads)
4581 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4582 " status.\n",
4583 lwpid_of (thread));
4584 return false;
4585 }
4586
4587 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4588 or we have. */
4589 pc = get_pc (lwp);
4590
4591 /* If the PC has changed since we stopped, then don't do anything,
4592 and let the breakpoint/tracepoint be hit. This happens if, for
4593 instance, GDB handled the decr_pc_after_break subtraction itself,
4594 GDB is OOL stepping this thread, or the user has issued a "jump"
4595 command, or poked thread's registers herself. */
4596 if (pc != lwp->stop_pc)
4597 {
4598 if (debug_threads)
4599 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4600 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4601 lwpid_of (thread),
4602 paddress (lwp->stop_pc), paddress (pc));
4603 return false;
4604 }
4605
4606 /* On software single step target, resume the inferior with signal
4607 rather than stepping over. */
4608 if (supports_software_single_step ()
4609 && lwp->pending_signals != NULL
4610 && lwp_signal_can_be_delivered (lwp))
4611 {
4612 if (debug_threads)
4613 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4614 " signals.\n",
4615 lwpid_of (thread));
4616
4617 return false;
4618 }
4619
4620 saved_thread = current_thread;
4621 current_thread = thread;
4622
4623 /* We can only step over breakpoints we know about. */
4624 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4625 {
4626 /* Don't step over a breakpoint that GDB expects to hit
4627 though. If the condition is being evaluated on the target's side
4628 and it evaluate to false, step over this breakpoint as well. */
4629 if (gdb_breakpoint_here (pc)
4630 && gdb_condition_true_at_breakpoint (pc)
4631 && gdb_no_commands_at_breakpoint (pc))
4632 {
4633 if (debug_threads)
4634 debug_printf ("Need step over [LWP %ld]? yes, but found"
4635 " GDB breakpoint at 0x%s; skipping step over\n",
4636 lwpid_of (thread), paddress (pc));
4637
4638 current_thread = saved_thread;
4639 return false;
4640 }
4641 else
4642 {
4643 if (debug_threads)
4644 debug_printf ("Need step over [LWP %ld]? yes, "
4645 "found breakpoint at 0x%s\n",
4646 lwpid_of (thread), paddress (pc));
4647
4648 /* We've found an lwp that needs stepping over --- return 1 so
4649 that find_thread stops looking. */
4650 current_thread = saved_thread;
4651
4652 return true;
4653 }
4654 }
4655
4656 current_thread = saved_thread;
4657
4658 if (debug_threads)
4659 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4660 " at 0x%s\n",
4661 lwpid_of (thread), paddress (pc));
4662
4663 return false;
4664 }
4665
4666 void
4667 linux_process_target::start_step_over (lwp_info *lwp)
4668 {
4669 struct thread_info *thread = get_lwp_thread (lwp);
4670 struct thread_info *saved_thread;
4671 CORE_ADDR pc;
4672 int step;
4673
4674 if (debug_threads)
4675 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4676 lwpid_of (thread));
4677
4678 stop_all_lwps (1, lwp);
4679
4680 if (lwp->suspended != 0)
4681 {
4682 internal_error (__FILE__, __LINE__,
4683 "LWP %ld suspended=%d\n", lwpid_of (thread),
4684 lwp->suspended);
4685 }
4686
4687 if (debug_threads)
4688 debug_printf ("Done stopping all threads for step-over.\n");
4689
4690 /* Note, we should always reach here with an already adjusted PC,
4691 either by GDB (if we're resuming due to GDB's request), or by our
4692 caller, if we just finished handling an internal breakpoint GDB
4693 shouldn't care about. */
4694 pc = get_pc (lwp);
4695
4696 saved_thread = current_thread;
4697 current_thread = thread;
4698
4699 lwp->bp_reinsert = pc;
4700 uninsert_breakpoints_at (pc);
4701 uninsert_fast_tracepoint_jumps_at (pc);
4702
4703 step = single_step (lwp);
4704
4705 current_thread = saved_thread;
4706
4707 resume_one_lwp (lwp, step, 0, NULL);
4708
4709 /* Require next event from this LWP. */
4710 step_over_bkpt = thread->id;
4711 }
4712
4713 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4714 start_step_over, if still there, and delete any single-step
4715 breakpoints we've set, on non hardware single-step targets. */
4716
4717 static int
4718 finish_step_over (struct lwp_info *lwp)
4719 {
4720 if (lwp->bp_reinsert != 0)
4721 {
4722 struct thread_info *saved_thread = current_thread;
4723
4724 if (debug_threads)
4725 debug_printf ("Finished step over.\n");
4726
4727 current_thread = get_lwp_thread (lwp);
4728
4729 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4730 may be no breakpoint to reinsert there by now. */
4731 reinsert_breakpoints_at (lwp->bp_reinsert);
4732 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4733
4734 lwp->bp_reinsert = 0;
4735
4736 /* Delete any single-step breakpoints. No longer needed. We
4737 don't have to worry about other threads hitting this trap,
4738 and later not being able to explain it, because we were
4739 stepping over a breakpoint, and we hold all threads but
4740 LWP stopped while doing that. */
4741 if (!can_hardware_single_step ())
4742 {
4743 gdb_assert (has_single_step_breakpoints (current_thread));
4744 delete_single_step_breakpoints (current_thread);
4745 }
4746
4747 step_over_bkpt = null_ptid;
4748 current_thread = saved_thread;
4749 return 1;
4750 }
4751 else
4752 return 0;
4753 }
4754
4755 void
4756 linux_process_target::complete_ongoing_step_over ()
4757 {
4758 if (step_over_bkpt != null_ptid)
4759 {
4760 struct lwp_info *lwp;
4761 int wstat;
4762 int ret;
4763
4764 if (debug_threads)
4765 debug_printf ("detach: step over in progress, finish it first\n");
4766
4767 /* Passing NULL_PTID as filter indicates we want all events to
4768 be left pending. Eventually this returns when there are no
4769 unwaited-for children left. */
4770 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4771 __WALL);
4772 gdb_assert (ret == -1);
4773
4774 lwp = find_lwp_pid (step_over_bkpt);
4775 if (lwp != NULL)
4776 finish_step_over (lwp);
4777 step_over_bkpt = null_ptid;
4778 unsuspend_all_lwps (lwp);
4779 }
4780 }
4781
4782 void
4783 linux_process_target::resume_one_thread (thread_info *thread,
4784 bool leave_all_stopped)
4785 {
4786 struct lwp_info *lwp = get_thread_lwp (thread);
4787 int leave_pending;
4788
4789 if (lwp->resume == NULL)
4790 return;
4791
4792 if (lwp->resume->kind == resume_stop)
4793 {
4794 if (debug_threads)
4795 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4796
4797 if (!lwp->stopped)
4798 {
4799 if (debug_threads)
4800 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4801
4802 /* Stop the thread, and wait for the event asynchronously,
4803 through the event loop. */
4804 send_sigstop (lwp);
4805 }
4806 else
4807 {
4808 if (debug_threads)
4809 debug_printf ("already stopped LWP %ld\n",
4810 lwpid_of (thread));
4811
4812 /* The LWP may have been stopped in an internal event that
4813 was not meant to be notified back to GDB (e.g., gdbserver
4814 breakpoint), so we should be reporting a stop event in
4815 this case too. */
4816
4817 /* If the thread already has a pending SIGSTOP, this is a
4818 no-op. Otherwise, something later will presumably resume
4819 the thread and this will cause it to cancel any pending
4820 operation, due to last_resume_kind == resume_stop. If
4821 the thread already has a pending status to report, we
4822 will still report it the next time we wait - see
4823 status_pending_p_callback. */
4824
4825 /* If we already have a pending signal to report, then
4826 there's no need to queue a SIGSTOP, as this means we're
4827 midway through moving the LWP out of the jumppad, and we
4828 will report the pending signal as soon as that is
4829 finished. */
4830 if (lwp->pending_signals_to_report == NULL)
4831 send_sigstop (lwp);
4832 }
4833
4834 /* For stop requests, we're done. */
4835 lwp->resume = NULL;
4836 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4837 return;
4838 }
4839
4840 /* If this thread which is about to be resumed has a pending status,
4841 then don't resume it - we can just report the pending status.
4842 Likewise if it is suspended, because e.g., another thread is
4843 stepping past a breakpoint. Make sure to queue any signals that
4844 would otherwise be sent. In all-stop mode, we do this decision
4845 based on if *any* thread has a pending status. If there's a
4846 thread that needs the step-over-breakpoint dance, then don't
4847 resume any other thread but that particular one. */
4848 leave_pending = (lwp->suspended
4849 || lwp->status_pending_p
4850 || leave_all_stopped);
4851
4852 /* If we have a new signal, enqueue the signal. */
4853 if (lwp->resume->sig != 0)
4854 {
4855 siginfo_t info, *info_p;
4856
4857 /* If this is the same signal we were previously stopped by,
4858 make sure to queue its siginfo. */
4859 if (WIFSTOPPED (lwp->last_status)
4860 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4861 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4862 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4863 info_p = &info;
4864 else
4865 info_p = NULL;
4866
4867 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4868 }
4869
4870 if (!leave_pending)
4871 {
4872 if (debug_threads)
4873 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4874
4875 proceed_one_lwp (thread, NULL);
4876 }
4877 else
4878 {
4879 if (debug_threads)
4880 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4881 }
4882
4883 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4884 lwp->resume = NULL;
4885 }
4886
4887 void
4888 linux_process_target::resume (thread_resume *resume_info, size_t n)
4889 {
4890 struct thread_info *need_step_over = NULL;
4891
4892 if (debug_threads)
4893 {
4894 debug_enter ();
4895 debug_printf ("linux_resume:\n");
4896 }
4897
4898 for_each_thread ([&] (thread_info *thread)
4899 {
4900 linux_set_resume_request (thread, resume_info, n);
4901 });
4902
4903 /* If there is a thread which would otherwise be resumed, which has
4904 a pending status, then don't resume any threads - we can just
4905 report the pending status. Make sure to queue any signals that
4906 would otherwise be sent. In non-stop mode, we'll apply this
4907 logic to each thread individually. We consume all pending events
4908 before considering to start a step-over (in all-stop). */
4909 bool any_pending = false;
4910 if (!non_stop)
4911 any_pending = find_thread ([this] (thread_info *thread)
4912 {
4913 return resume_status_pending (thread);
4914 }) != nullptr;
4915
4916 /* If there is a thread which would otherwise be resumed, which is
4917 stopped at a breakpoint that needs stepping over, then don't
4918 resume any threads - have it step over the breakpoint with all
4919 other threads stopped, then resume all threads again. Make sure
4920 to queue any signals that would otherwise be delivered or
4921 queued. */
4922 if (!any_pending && low_supports_breakpoints ())
4923 need_step_over = find_thread ([this] (thread_info *thread)
4924 {
4925 return thread_needs_step_over (thread);
4926 });
4927
4928 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4929
4930 if (debug_threads)
4931 {
4932 if (need_step_over != NULL)
4933 debug_printf ("Not resuming all, need step over\n");
4934 else if (any_pending)
4935 debug_printf ("Not resuming, all-stop and found "
4936 "an LWP with pending status\n");
4937 else
4938 debug_printf ("Resuming, no pending status or step over needed\n");
4939 }
4940
4941 /* Even if we're leaving threads stopped, queue all signals we'd
4942 otherwise deliver. */
4943 for_each_thread ([&] (thread_info *thread)
4944 {
4945 resume_one_thread (thread, leave_all_stopped);
4946 });
4947
4948 if (need_step_over)
4949 start_step_over (get_thread_lwp (need_step_over));
4950
4951 if (debug_threads)
4952 {
4953 debug_printf ("linux_resume done\n");
4954 debug_exit ();
4955 }
4956
4957 /* We may have events that were pending that can/should be sent to
4958 the client now. Trigger a linux_wait call. */
4959 if (target_is_async_p ())
4960 async_file_mark ();
4961 }
4962
4963 void
4964 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4965 {
4966 struct lwp_info *lwp = get_thread_lwp (thread);
4967 int step;
4968
4969 if (lwp == except)
4970 return;
4971
4972 if (debug_threads)
4973 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4974
4975 if (!lwp->stopped)
4976 {
4977 if (debug_threads)
4978 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4979 return;
4980 }
4981
4982 if (thread->last_resume_kind == resume_stop
4983 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4984 {
4985 if (debug_threads)
4986 debug_printf (" client wants LWP to remain %ld stopped\n",
4987 lwpid_of (thread));
4988 return;
4989 }
4990
4991 if (lwp->status_pending_p)
4992 {
4993 if (debug_threads)
4994 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4995 lwpid_of (thread));
4996 return;
4997 }
4998
4999 gdb_assert (lwp->suspended >= 0);
5000
5001 if (lwp->suspended)
5002 {
5003 if (debug_threads)
5004 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5005 return;
5006 }
5007
5008 if (thread->last_resume_kind == resume_stop
5009 && lwp->pending_signals_to_report == NULL
5010 && (lwp->collecting_fast_tracepoint
5011 == fast_tpoint_collect_result::not_collecting))
5012 {
5013 /* We haven't reported this LWP as stopped yet (otherwise, the
5014 last_status.kind check above would catch it, and we wouldn't
5015 reach here. This LWP may have been momentarily paused by a
5016 stop_all_lwps call while handling for example, another LWP's
5017 step-over. In that case, the pending expected SIGSTOP signal
5018 that was queued at vCont;t handling time will have already
5019 been consumed by wait_for_sigstop, and so we need to requeue
5020 another one here. Note that if the LWP already has a SIGSTOP
5021 pending, this is a no-op. */
5022
5023 if (debug_threads)
5024 debug_printf ("Client wants LWP %ld to stop. "
5025 "Making sure it has a SIGSTOP pending\n",
5026 lwpid_of (thread));
5027
5028 send_sigstop (lwp);
5029 }
5030
5031 if (thread->last_resume_kind == resume_step)
5032 {
5033 if (debug_threads)
5034 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5035 lwpid_of (thread));
5036
5037 /* If resume_step is requested by GDB, install single-step
5038 breakpoints when the thread is about to be actually resumed if
5039 the single-step breakpoints weren't removed. */
5040 if (supports_software_single_step ()
5041 && !has_single_step_breakpoints (thread))
5042 install_software_single_step_breakpoints (lwp);
5043
5044 step = maybe_hw_step (thread);
5045 }
5046 else if (lwp->bp_reinsert != 0)
5047 {
5048 if (debug_threads)
5049 debug_printf (" stepping LWP %ld, reinsert set\n",
5050 lwpid_of (thread));
5051
5052 step = maybe_hw_step (thread);
5053 }
5054 else
5055 step = 0;
5056
5057 resume_one_lwp (lwp, step, 0, NULL);
5058 }
5059
5060 void
5061 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5062 lwp_info *except)
5063 {
5064 struct lwp_info *lwp = get_thread_lwp (thread);
5065
5066 if (lwp == except)
5067 return;
5068
5069 lwp_suspended_decr (lwp);
5070
5071 proceed_one_lwp (thread, except);
5072 }
5073
5074 void
5075 linux_process_target::proceed_all_lwps ()
5076 {
5077 struct thread_info *need_step_over;
5078
5079 /* If there is a thread which would otherwise be resumed, which is
5080 stopped at a breakpoint that needs stepping over, then don't
5081 resume any threads - have it step over the breakpoint with all
5082 other threads stopped, then resume all threads again. */
5083
5084 if (low_supports_breakpoints ())
5085 {
5086 need_step_over = find_thread ([this] (thread_info *thread)
5087 {
5088 return thread_needs_step_over (thread);
5089 });
5090
5091 if (need_step_over != NULL)
5092 {
5093 if (debug_threads)
5094 debug_printf ("proceed_all_lwps: found "
5095 "thread %ld needing a step-over\n",
5096 lwpid_of (need_step_over));
5097
5098 start_step_over (get_thread_lwp (need_step_over));
5099 return;
5100 }
5101 }
5102
5103 if (debug_threads)
5104 debug_printf ("Proceeding, no step-over needed\n");
5105
5106 for_each_thread ([this] (thread_info *thread)
5107 {
5108 proceed_one_lwp (thread, NULL);
5109 });
5110 }
5111
5112 void
5113 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5114 {
5115 if (debug_threads)
5116 {
5117 debug_enter ();
5118 if (except)
5119 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5120 lwpid_of (get_lwp_thread (except)));
5121 else
5122 debug_printf ("unstopping all lwps\n");
5123 }
5124
5125 if (unsuspend)
5126 for_each_thread ([&] (thread_info *thread)
5127 {
5128 unsuspend_and_proceed_one_lwp (thread, except);
5129 });
5130 else
5131 for_each_thread ([&] (thread_info *thread)
5132 {
5133 proceed_one_lwp (thread, except);
5134 });
5135
5136 if (debug_threads)
5137 {
5138 debug_printf ("unstop_all_lwps done\n");
5139 debug_exit ();
5140 }
5141 }
5142
5143
5144 #ifdef HAVE_LINUX_REGSETS
5145
5146 #define use_linux_regsets 1
5147
5148 /* Returns true if REGSET has been disabled. */
5149
5150 static int
5151 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5152 {
5153 return (info->disabled_regsets != NULL
5154 && info->disabled_regsets[regset - info->regsets]);
5155 }
5156
5157 /* Disable REGSET. */
5158
5159 static void
5160 disable_regset (struct regsets_info *info, struct regset_info *regset)
5161 {
5162 int dr_offset;
5163
5164 dr_offset = regset - info->regsets;
5165 if (info->disabled_regsets == NULL)
5166 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5167 info->disabled_regsets[dr_offset] = 1;
5168 }
5169
5170 static int
5171 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5172 struct regcache *regcache)
5173 {
5174 struct regset_info *regset;
5175 int saw_general_regs = 0;
5176 int pid;
5177 struct iovec iov;
5178
5179 pid = lwpid_of (current_thread);
5180 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5181 {
5182 void *buf, *data;
5183 int nt_type, res;
5184
5185 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5186 continue;
5187
5188 buf = xmalloc (regset->size);
5189
5190 nt_type = regset->nt_type;
5191 if (nt_type)
5192 {
5193 iov.iov_base = buf;
5194 iov.iov_len = regset->size;
5195 data = (void *) &iov;
5196 }
5197 else
5198 data = buf;
5199
5200 #ifndef __sparc__
5201 res = ptrace (regset->get_request, pid,
5202 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5203 #else
5204 res = ptrace (regset->get_request, pid, data, nt_type);
5205 #endif
5206 if (res < 0)
5207 {
5208 if (errno == EIO
5209 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5210 {
5211 /* If we get EIO on a regset, or an EINVAL and the regset is
5212 optional, do not try it again for this process mode. */
5213 disable_regset (regsets_info, regset);
5214 }
5215 else if (errno == ENODATA)
5216 {
5217 /* ENODATA may be returned if the regset is currently
5218 not "active". This can happen in normal operation,
5219 so suppress the warning in this case. */
5220 }
5221 else if (errno == ESRCH)
5222 {
5223 /* At this point, ESRCH should mean the process is
5224 already gone, in which case we simply ignore attempts
5225 to read its registers. */
5226 }
5227 else
5228 {
5229 char s[256];
5230 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5231 pid);
5232 perror (s);
5233 }
5234 }
5235 else
5236 {
5237 if (regset->type == GENERAL_REGS)
5238 saw_general_regs = 1;
5239 regset->store_function (regcache, buf);
5240 }
5241 free (buf);
5242 }
5243 if (saw_general_regs)
5244 return 0;
5245 else
5246 return 1;
5247 }
5248
5249 static int
5250 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5251 struct regcache *regcache)
5252 {
5253 struct regset_info *regset;
5254 int saw_general_regs = 0;
5255 int pid;
5256 struct iovec iov;
5257
5258 pid = lwpid_of (current_thread);
5259 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5260 {
5261 void *buf, *data;
5262 int nt_type, res;
5263
5264 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5265 || regset->fill_function == NULL)
5266 continue;
5267
5268 buf = xmalloc (regset->size);
5269
5270 /* First fill the buffer with the current register set contents,
5271 in case there are any items in the kernel's regset that are
5272 not in gdbserver's regcache. */
5273
5274 nt_type = regset->nt_type;
5275 if (nt_type)
5276 {
5277 iov.iov_base = buf;
5278 iov.iov_len = regset->size;
5279 data = (void *) &iov;
5280 }
5281 else
5282 data = buf;
5283
5284 #ifndef __sparc__
5285 res = ptrace (regset->get_request, pid,
5286 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5287 #else
5288 res = ptrace (regset->get_request, pid, data, nt_type);
5289 #endif
5290
5291 if (res == 0)
5292 {
5293 /* Then overlay our cached registers on that. */
5294 regset->fill_function (regcache, buf);
5295
5296 /* Only now do we write the register set. */
5297 #ifndef __sparc__
5298 res = ptrace (regset->set_request, pid,
5299 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5300 #else
5301 res = ptrace (regset->set_request, pid, data, nt_type);
5302 #endif
5303 }
5304
5305 if (res < 0)
5306 {
5307 if (errno == EIO
5308 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5309 {
5310 /* If we get EIO on a regset, or an EINVAL and the regset is
5311 optional, do not try it again for this process mode. */
5312 disable_regset (regsets_info, regset);
5313 }
5314 else if (errno == ESRCH)
5315 {
5316 /* At this point, ESRCH should mean the process is
5317 already gone, in which case we simply ignore attempts
5318 to change its registers. See also the related
5319 comment in resume_one_lwp. */
5320 free (buf);
5321 return 0;
5322 }
5323 else
5324 {
5325 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5326 }
5327 }
5328 else if (regset->type == GENERAL_REGS)
5329 saw_general_regs = 1;
5330 free (buf);
5331 }
5332 if (saw_general_regs)
5333 return 0;
5334 else
5335 return 1;
5336 }
5337
5338 #else /* !HAVE_LINUX_REGSETS */
5339
5340 #define use_linux_regsets 0
5341 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5342 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5343
5344 #endif
5345
5346 /* Return 1 if register REGNO is supported by one of the regset ptrace
5347 calls or 0 if it has to be transferred individually. */
5348
5349 static int
5350 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5351 {
5352 unsigned char mask = 1 << (regno % 8);
5353 size_t index = regno / 8;
5354
5355 return (use_linux_regsets
5356 && (regs_info->regset_bitmap == NULL
5357 || (regs_info->regset_bitmap[index] & mask) != 0));
5358 }
5359
5360 #ifdef HAVE_LINUX_USRREGS
5361
5362 static int
5363 register_addr (const struct usrregs_info *usrregs, int regnum)
5364 {
5365 int addr;
5366
5367 if (regnum < 0 || regnum >= usrregs->num_regs)
5368 error ("Invalid register number %d.", regnum);
5369
5370 addr = usrregs->regmap[regnum];
5371
5372 return addr;
5373 }
5374
5375
5376 void
5377 linux_process_target::fetch_register (const usrregs_info *usrregs,
5378 regcache *regcache, int regno)
5379 {
5380 CORE_ADDR regaddr;
5381 int i, size;
5382 char *buf;
5383 int pid;
5384
5385 if (regno >= usrregs->num_regs)
5386 return;
5387 if (low_cannot_fetch_register (regno))
5388 return;
5389
5390 regaddr = register_addr (usrregs, regno);
5391 if (regaddr == -1)
5392 return;
5393
5394 size = ((register_size (regcache->tdesc, regno)
5395 + sizeof (PTRACE_XFER_TYPE) - 1)
5396 & -sizeof (PTRACE_XFER_TYPE));
5397 buf = (char *) alloca (size);
5398
5399 pid = lwpid_of (current_thread);
5400 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5401 {
5402 errno = 0;
5403 *(PTRACE_XFER_TYPE *) (buf + i) =
5404 ptrace (PTRACE_PEEKUSER, pid,
5405 /* Coerce to a uintptr_t first to avoid potential gcc warning
5406 of coercing an 8 byte integer to a 4 byte pointer. */
5407 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5408 regaddr += sizeof (PTRACE_XFER_TYPE);
5409 if (errno != 0)
5410 {
5411 /* Mark register REGNO unavailable. */
5412 supply_register (regcache, regno, NULL);
5413 return;
5414 }
5415 }
5416
5417 low_supply_ptrace_register (regcache, regno, buf);
5418 }
5419
5420 void
5421 linux_process_target::store_register (const usrregs_info *usrregs,
5422 regcache *regcache, int regno)
5423 {
5424 CORE_ADDR regaddr;
5425 int i, size;
5426 char *buf;
5427 int pid;
5428
5429 if (regno >= usrregs->num_regs)
5430 return;
5431 if (low_cannot_store_register (regno))
5432 return;
5433
5434 regaddr = register_addr (usrregs, regno);
5435 if (regaddr == -1)
5436 return;
5437
5438 size = ((register_size (regcache->tdesc, regno)
5439 + sizeof (PTRACE_XFER_TYPE) - 1)
5440 & -sizeof (PTRACE_XFER_TYPE));
5441 buf = (char *) alloca (size);
5442 memset (buf, 0, size);
5443
5444 low_collect_ptrace_register (regcache, regno, buf);
5445
5446 pid = lwpid_of (current_thread);
5447 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5448 {
5449 errno = 0;
5450 ptrace (PTRACE_POKEUSER, pid,
5451 /* Coerce to a uintptr_t first to avoid potential gcc warning
5452 about coercing an 8 byte integer to a 4 byte pointer. */
5453 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5454 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5455 if (errno != 0)
5456 {
5457 /* At this point, ESRCH should mean the process is
5458 already gone, in which case we simply ignore attempts
5459 to change its registers. See also the related
5460 comment in resume_one_lwp. */
5461 if (errno == ESRCH)
5462 return;
5463
5464
5465 if (!low_cannot_store_register (regno))
5466 error ("writing register %d: %s", regno, safe_strerror (errno));
5467 }
5468 regaddr += sizeof (PTRACE_XFER_TYPE);
5469 }
5470 }
5471 #endif /* HAVE_LINUX_USRREGS */
5472
5473 void
5474 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5475 int regno, char *buf)
5476 {
5477 collect_register (regcache, regno, buf);
5478 }
5479
5480 void
5481 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5482 int regno, const char *buf)
5483 {
5484 supply_register (regcache, regno, buf);
5485 }
5486
5487 void
5488 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5489 regcache *regcache,
5490 int regno, int all)
5491 {
5492 #ifdef HAVE_LINUX_USRREGS
5493 struct usrregs_info *usr = regs_info->usrregs;
5494
5495 if (regno == -1)
5496 {
5497 for (regno = 0; regno < usr->num_regs; regno++)
5498 if (all || !linux_register_in_regsets (regs_info, regno))
5499 fetch_register (usr, regcache, regno);
5500 }
5501 else
5502 fetch_register (usr, regcache, regno);
5503 #endif
5504 }
5505
5506 void
5507 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5508 regcache *regcache,
5509 int regno, int all)
5510 {
5511 #ifdef HAVE_LINUX_USRREGS
5512 struct usrregs_info *usr = regs_info->usrregs;
5513
5514 if (regno == -1)
5515 {
5516 for (regno = 0; regno < usr->num_regs; regno++)
5517 if (all || !linux_register_in_regsets (regs_info, regno))
5518 store_register (usr, regcache, regno);
5519 }
5520 else
5521 store_register (usr, regcache, regno);
5522 #endif
5523 }
5524
5525 void
5526 linux_process_target::fetch_registers (regcache *regcache, int regno)
5527 {
5528 int use_regsets;
5529 int all = 0;
5530 const regs_info *regs_info = get_regs_info ();
5531
5532 if (regno == -1)
5533 {
5534 if (regs_info->usrregs != NULL)
5535 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5536 low_fetch_register (regcache, regno);
5537
5538 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5539 if (regs_info->usrregs != NULL)
5540 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5541 }
5542 else
5543 {
5544 if (low_fetch_register (regcache, regno))
5545 return;
5546
5547 use_regsets = linux_register_in_regsets (regs_info, regno);
5548 if (use_regsets)
5549 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5550 regcache);
5551 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5552 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5553 }
5554 }
5555
5556 void
5557 linux_process_target::store_registers (regcache *regcache, int regno)
5558 {
5559 int use_regsets;
5560 int all = 0;
5561 const regs_info *regs_info = get_regs_info ();
5562
5563 if (regno == -1)
5564 {
5565 all = regsets_store_inferior_registers (regs_info->regsets_info,
5566 regcache);
5567 if (regs_info->usrregs != NULL)
5568 usr_store_inferior_registers (regs_info, regcache, regno, all);
5569 }
5570 else
5571 {
5572 use_regsets = linux_register_in_regsets (regs_info, regno);
5573 if (use_regsets)
5574 all = regsets_store_inferior_registers (regs_info->regsets_info,
5575 regcache);
5576 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5577 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5578 }
5579 }
5580
5581 bool
5582 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5583 {
5584 return false;
5585 }
5586
5587 /* A wrapper for the read_memory target op. */
5588
5589 static int
5590 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5591 {
5592 return the_target->read_memory (memaddr, myaddr, len);
5593 }
5594
5595 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5596 to debugger memory starting at MYADDR. */
5597
5598 int
5599 linux_process_target::read_memory (CORE_ADDR memaddr,
5600 unsigned char *myaddr, int len)
5601 {
5602 int pid = lwpid_of (current_thread);
5603 PTRACE_XFER_TYPE *buffer;
5604 CORE_ADDR addr;
5605 int count;
5606 char filename[64];
5607 int i;
5608 int ret;
5609 int fd;
5610
5611 /* Try using /proc. Don't bother for one word. */
5612 if (len >= 3 * sizeof (long))
5613 {
5614 int bytes;
5615
5616 /* We could keep this file open and cache it - possibly one per
5617 thread. That requires some juggling, but is even faster. */
5618 sprintf (filename, "/proc/%d/mem", pid);
5619 fd = open (filename, O_RDONLY | O_LARGEFILE);
5620 if (fd == -1)
5621 goto no_proc;
5622
5623 /* If pread64 is available, use it. It's faster if the kernel
5624 supports it (only one syscall), and it's 64-bit safe even on
5625 32-bit platforms (for instance, SPARC debugging a SPARC64
5626 application). */
5627 #ifdef HAVE_PREAD64
5628 bytes = pread64 (fd, myaddr, len, memaddr);
5629 #else
5630 bytes = -1;
5631 if (lseek (fd, memaddr, SEEK_SET) != -1)
5632 bytes = read (fd, myaddr, len);
5633 #endif
5634
5635 close (fd);
5636 if (bytes == len)
5637 return 0;
5638
5639 /* Some data was read, we'll try to get the rest with ptrace. */
5640 if (bytes > 0)
5641 {
5642 memaddr += bytes;
5643 myaddr += bytes;
5644 len -= bytes;
5645 }
5646 }
5647
5648 no_proc:
5649 /* Round starting address down to longword boundary. */
5650 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5651 /* Round ending address up; get number of longwords that makes. */
5652 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5653 / sizeof (PTRACE_XFER_TYPE));
5654 /* Allocate buffer of that many longwords. */
5655 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5656
5657 /* Read all the longwords */
5658 errno = 0;
5659 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5660 {
5661 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5662 about coercing an 8 byte integer to a 4 byte pointer. */
5663 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5664 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5665 (PTRACE_TYPE_ARG4) 0);
5666 if (errno)
5667 break;
5668 }
5669 ret = errno;
5670
5671 /* Copy appropriate bytes out of the buffer. */
5672 if (i > 0)
5673 {
5674 i *= sizeof (PTRACE_XFER_TYPE);
5675 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5676 memcpy (myaddr,
5677 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5678 i < len ? i : len);
5679 }
5680
5681 return ret;
5682 }
5683
5684 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5685 memory at MEMADDR. On failure (cannot write to the inferior)
5686 returns the value of errno. Always succeeds if LEN is zero. */
5687
5688 int
5689 linux_process_target::write_memory (CORE_ADDR memaddr,
5690 const unsigned char *myaddr, int len)
5691 {
5692 int i;
5693 /* Round starting address down to longword boundary. */
5694 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5695 /* Round ending address up; get number of longwords that makes. */
5696 int count
5697 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5698 / sizeof (PTRACE_XFER_TYPE);
5699
5700 /* Allocate buffer of that many longwords. */
5701 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5702
5703 int pid = lwpid_of (current_thread);
5704
5705 if (len == 0)
5706 {
5707 /* Zero length write always succeeds. */
5708 return 0;
5709 }
5710
5711 if (debug_threads)
5712 {
5713 /* Dump up to four bytes. */
5714 char str[4 * 2 + 1];
5715 char *p = str;
5716 int dump = len < 4 ? len : 4;
5717
5718 for (i = 0; i < dump; i++)
5719 {
5720 sprintf (p, "%02x", myaddr[i]);
5721 p += 2;
5722 }
5723 *p = '\0';
5724
5725 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5726 str, (long) memaddr, pid);
5727 }
5728
5729 /* Fill start and end extra bytes of buffer with existing memory data. */
5730
5731 errno = 0;
5732 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5733 about coercing an 8 byte integer to a 4 byte pointer. */
5734 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5735 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5736 (PTRACE_TYPE_ARG4) 0);
5737 if (errno)
5738 return errno;
5739
5740 if (count > 1)
5741 {
5742 errno = 0;
5743 buffer[count - 1]
5744 = ptrace (PTRACE_PEEKTEXT, pid,
5745 /* Coerce to a uintptr_t first to avoid potential gcc warning
5746 about coercing an 8 byte integer to a 4 byte pointer. */
5747 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5748 * sizeof (PTRACE_XFER_TYPE)),
5749 (PTRACE_TYPE_ARG4) 0);
5750 if (errno)
5751 return errno;
5752 }
5753
5754 /* Copy data to be written over corresponding part of buffer. */
5755
5756 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5757 myaddr, len);
5758
5759 /* Write the entire buffer. */
5760
5761 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5762 {
5763 errno = 0;
5764 ptrace (PTRACE_POKETEXT, pid,
5765 /* Coerce to a uintptr_t first to avoid potential gcc warning
5766 about coercing an 8 byte integer to a 4 byte pointer. */
5767 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5768 (PTRACE_TYPE_ARG4) buffer[i]);
5769 if (errno)
5770 return errno;
5771 }
5772
5773 return 0;
5774 }
5775
5776 void
5777 linux_process_target::look_up_symbols ()
5778 {
5779 #ifdef USE_THREAD_DB
5780 struct process_info *proc = current_process ();
5781
5782 if (proc->priv->thread_db != NULL)
5783 return;
5784
5785 thread_db_init ();
5786 #endif
5787 }
5788
5789 void
5790 linux_process_target::request_interrupt ()
5791 {
5792 /* Send a SIGINT to the process group. This acts just like the user
5793 typed a ^C on the controlling terminal. */
5794 ::kill (-signal_pid, SIGINT);
5795 }
5796
5797 bool
5798 linux_process_target::supports_read_auxv ()
5799 {
5800 return true;
5801 }
5802
5803 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5804 to debugger memory starting at MYADDR. */
5805
5806 int
5807 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5808 unsigned int len)
5809 {
5810 char filename[PATH_MAX];
5811 int fd, n;
5812 int pid = lwpid_of (current_thread);
5813
5814 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5815
5816 fd = open (filename, O_RDONLY);
5817 if (fd < 0)
5818 return -1;
5819
5820 if (offset != (CORE_ADDR) 0
5821 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5822 n = -1;
5823 else
5824 n = read (fd, myaddr, len);
5825
5826 close (fd);
5827
5828 return n;
5829 }
5830
5831 int
5832 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5833 int size, raw_breakpoint *bp)
5834 {
5835 if (type == raw_bkpt_type_sw)
5836 return insert_memory_breakpoint (bp);
5837 else
5838 return low_insert_point (type, addr, size, bp);
5839 }
5840
5841 int
5842 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5843 int size, raw_breakpoint *bp)
5844 {
5845 /* Unsupported (see target.h). */
5846 return 1;
5847 }
5848
5849 int
5850 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5851 int size, raw_breakpoint *bp)
5852 {
5853 if (type == raw_bkpt_type_sw)
5854 return remove_memory_breakpoint (bp);
5855 else
5856 return low_remove_point (type, addr, size, bp);
5857 }
5858
5859 int
5860 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5861 int size, raw_breakpoint *bp)
5862 {
5863 /* Unsupported (see target.h). */
5864 return 1;
5865 }
5866
5867 /* Implement the stopped_by_sw_breakpoint target_ops
5868 method. */
5869
5870 bool
5871 linux_process_target::stopped_by_sw_breakpoint ()
5872 {
5873 struct lwp_info *lwp = get_thread_lwp (current_thread);
5874
5875 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5876 }
5877
5878 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5879 method. */
5880
5881 bool
5882 linux_process_target::supports_stopped_by_sw_breakpoint ()
5883 {
5884 return USE_SIGTRAP_SIGINFO;
5885 }
5886
5887 /* Implement the stopped_by_hw_breakpoint target_ops
5888 method. */
5889
5890 bool
5891 linux_process_target::stopped_by_hw_breakpoint ()
5892 {
5893 struct lwp_info *lwp = get_thread_lwp (current_thread);
5894
5895 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5896 }
5897
5898 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5899 method. */
5900
5901 bool
5902 linux_process_target::supports_stopped_by_hw_breakpoint ()
5903 {
5904 return USE_SIGTRAP_SIGINFO;
5905 }
5906
5907 /* Implement the supports_hardware_single_step target_ops method. */
5908
5909 bool
5910 linux_process_target::supports_hardware_single_step ()
5911 {
5912 return can_hardware_single_step ();
5913 }
5914
5915 bool
5916 linux_process_target::stopped_by_watchpoint ()
5917 {
5918 struct lwp_info *lwp = get_thread_lwp (current_thread);
5919
5920 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5921 }
5922
5923 CORE_ADDR
5924 linux_process_target::stopped_data_address ()
5925 {
5926 struct lwp_info *lwp = get_thread_lwp (current_thread);
5927
5928 return lwp->stopped_data_address;
5929 }
5930
5931 /* This is only used for targets that define PT_TEXT_ADDR,
5932 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5933 the target has different ways of acquiring this information, like
5934 loadmaps. */
5935
5936 bool
5937 linux_process_target::supports_read_offsets ()
5938 {
5939 #ifdef SUPPORTS_READ_OFFSETS
5940 return true;
5941 #else
5942 return false;
5943 #endif
5944 }
5945
5946 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5947 to tell gdb about. */
5948
5949 int
5950 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5951 {
5952 #ifdef SUPPORTS_READ_OFFSETS
5953 unsigned long text, text_end, data;
5954 int pid = lwpid_of (current_thread);
5955
5956 errno = 0;
5957
5958 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5959 (PTRACE_TYPE_ARG4) 0);
5960 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5961 (PTRACE_TYPE_ARG4) 0);
5962 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5963 (PTRACE_TYPE_ARG4) 0);
5964
5965 if (errno == 0)
5966 {
5967 /* Both text and data offsets produced at compile-time (and so
5968 used by gdb) are relative to the beginning of the program,
5969 with the data segment immediately following the text segment.
5970 However, the actual runtime layout in memory may put the data
5971 somewhere else, so when we send gdb a data base-address, we
5972 use the real data base address and subtract the compile-time
5973 data base-address from it (which is just the length of the
5974 text segment). BSS immediately follows data in both
5975 cases. */
5976 *text_p = text;
5977 *data_p = data - (text_end - text);
5978
5979 return 1;
5980 }
5981 return 0;
5982 #else
5983 gdb_assert_not_reached ("target op read_offsets not supported");
5984 #endif
5985 }
5986
5987 bool
5988 linux_process_target::supports_get_tls_address ()
5989 {
5990 #ifdef USE_THREAD_DB
5991 return true;
5992 #else
5993 return false;
5994 #endif
5995 }
5996
5997 int
5998 linux_process_target::get_tls_address (thread_info *thread,
5999 CORE_ADDR offset,
6000 CORE_ADDR load_module,
6001 CORE_ADDR *address)
6002 {
6003 #ifdef USE_THREAD_DB
6004 return thread_db_get_tls_address (thread, offset, load_module, address);
6005 #else
6006 return -1;
6007 #endif
6008 }
6009
6010 bool
6011 linux_process_target::supports_qxfer_osdata ()
6012 {
6013 return true;
6014 }
6015
6016 int
6017 linux_process_target::qxfer_osdata (const char *annex,
6018 unsigned char *readbuf,
6019 unsigned const char *writebuf,
6020 CORE_ADDR offset, int len)
6021 {
6022 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6023 }
6024
6025 void
6026 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
6027 gdb_byte *inf_siginfo, int direction)
6028 {
6029 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
6030
6031 /* If there was no callback, or the callback didn't do anything,
6032 then just do a straight memcpy. */
6033 if (!done)
6034 {
6035 if (direction == 1)
6036 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6037 else
6038 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6039 }
6040 }
6041
6042 bool
6043 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
6044 int direction)
6045 {
6046 return false;
6047 }
6048
6049 bool
6050 linux_process_target::supports_qxfer_siginfo ()
6051 {
6052 return true;
6053 }
6054
6055 int
6056 linux_process_target::qxfer_siginfo (const char *annex,
6057 unsigned char *readbuf,
6058 unsigned const char *writebuf,
6059 CORE_ADDR offset, int len)
6060 {
6061 int pid;
6062 siginfo_t siginfo;
6063 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6064
6065 if (current_thread == NULL)
6066 return -1;
6067
6068 pid = lwpid_of (current_thread);
6069
6070 if (debug_threads)
6071 debug_printf ("%s siginfo for lwp %d.\n",
6072 readbuf != NULL ? "Reading" : "Writing",
6073 pid);
6074
6075 if (offset >= sizeof (siginfo))
6076 return -1;
6077
6078 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6079 return -1;
6080
6081 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6082 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6083 inferior with a 64-bit GDBSERVER should look the same as debugging it
6084 with a 32-bit GDBSERVER, we need to convert it. */
6085 siginfo_fixup (&siginfo, inf_siginfo, 0);
6086
6087 if (offset + len > sizeof (siginfo))
6088 len = sizeof (siginfo) - offset;
6089
6090 if (readbuf != NULL)
6091 memcpy (readbuf, inf_siginfo + offset, len);
6092 else
6093 {
6094 memcpy (inf_siginfo + offset, writebuf, len);
6095
6096 /* Convert back to ptrace layout before flushing it out. */
6097 siginfo_fixup (&siginfo, inf_siginfo, 1);
6098
6099 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6100 return -1;
6101 }
6102
6103 return len;
6104 }
6105
6106 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6107 so we notice when children change state; as the handler for the
6108 sigsuspend in my_waitpid. */
6109
6110 static void
6111 sigchld_handler (int signo)
6112 {
6113 int old_errno = errno;
6114
6115 if (debug_threads)
6116 {
6117 do
6118 {
6119 /* Use the async signal safe debug function. */
6120 if (debug_write ("sigchld_handler\n",
6121 sizeof ("sigchld_handler\n") - 1) < 0)
6122 break; /* just ignore */
6123 } while (0);
6124 }
6125
6126 if (target_is_async_p ())
6127 async_file_mark (); /* trigger a linux_wait */
6128
6129 errno = old_errno;
6130 }
6131
6132 bool
6133 linux_process_target::supports_non_stop ()
6134 {
6135 return true;
6136 }
6137
6138 bool
6139 linux_process_target::async (bool enable)
6140 {
6141 bool previous = target_is_async_p ();
6142
6143 if (debug_threads)
6144 debug_printf ("linux_async (%d), previous=%d\n",
6145 enable, previous);
6146
6147 if (previous != enable)
6148 {
6149 sigset_t mask;
6150 sigemptyset (&mask);
6151 sigaddset (&mask, SIGCHLD);
6152
6153 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6154
6155 if (enable)
6156 {
6157 if (pipe (linux_event_pipe) == -1)
6158 {
6159 linux_event_pipe[0] = -1;
6160 linux_event_pipe[1] = -1;
6161 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6162
6163 warning ("creating event pipe failed.");
6164 return previous;
6165 }
6166
6167 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6168 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6169
6170 /* Register the event loop handler. */
6171 add_file_handler (linux_event_pipe[0],
6172 handle_target_event, NULL);
6173
6174 /* Always trigger a linux_wait. */
6175 async_file_mark ();
6176 }
6177 else
6178 {
6179 delete_file_handler (linux_event_pipe[0]);
6180
6181 close (linux_event_pipe[0]);
6182 close (linux_event_pipe[1]);
6183 linux_event_pipe[0] = -1;
6184 linux_event_pipe[1] = -1;
6185 }
6186
6187 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6188 }
6189
6190 return previous;
6191 }
6192
6193 int
6194 linux_process_target::start_non_stop (bool nonstop)
6195 {
6196 /* Register or unregister from event-loop accordingly. */
6197 target_async (nonstop);
6198
6199 if (target_is_async_p () != (nonstop != false))
6200 return -1;
6201
6202 return 0;
6203 }
6204
6205 bool
6206 linux_process_target::supports_multi_process ()
6207 {
6208 return true;
6209 }
6210
6211 /* Check if fork events are supported. */
6212
6213 bool
6214 linux_process_target::supports_fork_events ()
6215 {
6216 return linux_supports_tracefork ();
6217 }
6218
6219 /* Check if vfork events are supported. */
6220
6221 bool
6222 linux_process_target::supports_vfork_events ()
6223 {
6224 return linux_supports_tracefork ();
6225 }
6226
6227 /* Check if exec events are supported. */
6228
6229 bool
6230 linux_process_target::supports_exec_events ()
6231 {
6232 return linux_supports_traceexec ();
6233 }
6234
6235 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6236 ptrace flags for all inferiors. This is in case the new GDB connection
6237 doesn't support the same set of events that the previous one did. */
6238
6239 void
6240 linux_process_target::handle_new_gdb_connection ()
6241 {
6242 /* Request that all the lwps reset their ptrace options. */
6243 for_each_thread ([] (thread_info *thread)
6244 {
6245 struct lwp_info *lwp = get_thread_lwp (thread);
6246
6247 if (!lwp->stopped)
6248 {
6249 /* Stop the lwp so we can modify its ptrace options. */
6250 lwp->must_set_ptrace_flags = 1;
6251 linux_stop_lwp (lwp);
6252 }
6253 else
6254 {
6255 /* Already stopped; go ahead and set the ptrace options. */
6256 struct process_info *proc = find_process_pid (pid_of (thread));
6257 int options = linux_low_ptrace_options (proc->attached);
6258
6259 linux_enable_event_reporting (lwpid_of (thread), options);
6260 lwp->must_set_ptrace_flags = 0;
6261 }
6262 });
6263 }
6264
6265 int
6266 linux_process_target::handle_monitor_command (char *mon)
6267 {
6268 #ifdef USE_THREAD_DB
6269 return thread_db_handle_monitor_command (mon);
6270 #else
6271 return 0;
6272 #endif
6273 }
6274
6275 int
6276 linux_process_target::core_of_thread (ptid_t ptid)
6277 {
6278 return linux_common_core_of_thread (ptid);
6279 }
6280
6281 bool
6282 linux_process_target::supports_disable_randomization ()
6283 {
6284 #ifdef HAVE_PERSONALITY
6285 return true;
6286 #else
6287 return false;
6288 #endif
6289 }
6290
6291 bool
6292 linux_process_target::supports_agent ()
6293 {
6294 return true;
6295 }
6296
6297 bool
6298 linux_process_target::supports_range_stepping ()
6299 {
6300 if (supports_software_single_step ())
6301 return true;
6302 if (*the_low_target.supports_range_stepping == NULL)
6303 return false;
6304
6305 return (*the_low_target.supports_range_stepping) ();
6306 }
6307
6308 bool
6309 linux_process_target::supports_pid_to_exec_file ()
6310 {
6311 return true;
6312 }
6313
6314 char *
6315 linux_process_target::pid_to_exec_file (int pid)
6316 {
6317 return linux_proc_pid_to_exec_file (pid);
6318 }
6319
6320 bool
6321 linux_process_target::supports_multifs ()
6322 {
6323 return true;
6324 }
6325
6326 int
6327 linux_process_target::multifs_open (int pid, const char *filename,
6328 int flags, mode_t mode)
6329 {
6330 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6331 }
6332
6333 int
6334 linux_process_target::multifs_unlink (int pid, const char *filename)
6335 {
6336 return linux_mntns_unlink (pid, filename);
6337 }
6338
6339 ssize_t
6340 linux_process_target::multifs_readlink (int pid, const char *filename,
6341 char *buf, size_t bufsiz)
6342 {
6343 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6344 }
6345
6346 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6347 struct target_loadseg
6348 {
6349 /* Core address to which the segment is mapped. */
6350 Elf32_Addr addr;
6351 /* VMA recorded in the program header. */
6352 Elf32_Addr p_vaddr;
6353 /* Size of this segment in memory. */
6354 Elf32_Word p_memsz;
6355 };
6356
6357 # if defined PT_GETDSBT
6358 struct target_loadmap
6359 {
6360 /* Protocol version number, must be zero. */
6361 Elf32_Word version;
6362 /* Pointer to the DSBT table, its size, and the DSBT index. */
6363 unsigned *dsbt_table;
6364 unsigned dsbt_size, dsbt_index;
6365 /* Number of segments in this map. */
6366 Elf32_Word nsegs;
6367 /* The actual memory map. */
6368 struct target_loadseg segs[/*nsegs*/];
6369 };
6370 # define LINUX_LOADMAP PT_GETDSBT
6371 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6372 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6373 # else
6374 struct target_loadmap
6375 {
6376 /* Protocol version number, must be zero. */
6377 Elf32_Half version;
6378 /* Number of segments in this map. */
6379 Elf32_Half nsegs;
6380 /* The actual memory map. */
6381 struct target_loadseg segs[/*nsegs*/];
6382 };
6383 # define LINUX_LOADMAP PTRACE_GETFDPIC
6384 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6385 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6386 # endif
6387
6388 bool
6389 linux_process_target::supports_read_loadmap ()
6390 {
6391 return true;
6392 }
6393
6394 int
6395 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6396 unsigned char *myaddr, unsigned int len)
6397 {
6398 int pid = lwpid_of (current_thread);
6399 int addr = -1;
6400 struct target_loadmap *data = NULL;
6401 unsigned int actual_length, copy_length;
6402
6403 if (strcmp (annex, "exec") == 0)
6404 addr = (int) LINUX_LOADMAP_EXEC;
6405 else if (strcmp (annex, "interp") == 0)
6406 addr = (int) LINUX_LOADMAP_INTERP;
6407 else
6408 return -1;
6409
6410 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6411 return -1;
6412
6413 if (data == NULL)
6414 return -1;
6415
6416 actual_length = sizeof (struct target_loadmap)
6417 + sizeof (struct target_loadseg) * data->nsegs;
6418
6419 if (offset < 0 || offset > actual_length)
6420 return -1;
6421
6422 copy_length = actual_length - offset < len ? actual_length - offset : len;
6423 memcpy (myaddr, (char *) data + offset, copy_length);
6424 return copy_length;
6425 }
6426 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6427
6428 void
6429 linux_process_target::process_qsupported (char **features, int count)
6430 {
6431 if (the_low_target.process_qsupported != NULL)
6432 the_low_target.process_qsupported (features, count);
6433 }
6434
6435 bool
6436 linux_process_target::supports_catch_syscall ()
6437 {
6438 return (the_low_target.get_syscall_trapinfo != NULL
6439 && linux_supports_tracesysgood ());
6440 }
6441
6442 int
6443 linux_process_target::get_ipa_tdesc_idx ()
6444 {
6445 if (the_low_target.get_ipa_tdesc_idx == NULL)
6446 return 0;
6447
6448 return (*the_low_target.get_ipa_tdesc_idx) ();
6449 }
6450
6451 bool
6452 linux_process_target::supports_tracepoints ()
6453 {
6454 if (*the_low_target.supports_tracepoints == NULL)
6455 return false;
6456
6457 return (*the_low_target.supports_tracepoints) ();
6458 }
6459
6460 CORE_ADDR
6461 linux_process_target::read_pc (regcache *regcache)
6462 {
6463 if (!low_supports_breakpoints ())
6464 return 0;
6465
6466 return low_get_pc (regcache);
6467 }
6468
6469 void
6470 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6471 {
6472 gdb_assert (low_supports_breakpoints ());
6473
6474 low_set_pc (regcache, pc);
6475 }
6476
6477 bool
6478 linux_process_target::supports_thread_stopped ()
6479 {
6480 return true;
6481 }
6482
6483 bool
6484 linux_process_target::thread_stopped (thread_info *thread)
6485 {
6486 return get_thread_lwp (thread)->stopped;
6487 }
6488
6489 /* This exposes stop-all-threads functionality to other modules. */
6490
6491 void
6492 linux_process_target::pause_all (bool freeze)
6493 {
6494 stop_all_lwps (freeze, NULL);
6495 }
6496
6497 /* This exposes unstop-all-threads functionality to other gdbserver
6498 modules. */
6499
6500 void
6501 linux_process_target::unpause_all (bool unfreeze)
6502 {
6503 unstop_all_lwps (unfreeze, NULL);
6504 }
6505
6506 int
6507 linux_process_target::prepare_to_access_memory ()
6508 {
6509 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6510 running LWP. */
6511 if (non_stop)
6512 target_pause_all (true);
6513 return 0;
6514 }
6515
6516 void
6517 linux_process_target::done_accessing_memory ()
6518 {
6519 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6520 running LWP. */
6521 if (non_stop)
6522 target_unpause_all (true);
6523 }
6524
6525 bool
6526 linux_process_target::supports_fast_tracepoints ()
6527 {
6528 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6529 }
6530
6531 int
6532 linux_process_target::install_fast_tracepoint_jump_pad
6533 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6534 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6535 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6536 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6537 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6538 char *err)
6539 {
6540 return (*the_low_target.install_fast_tracepoint_jump_pad)
6541 (tpoint, tpaddr, collector, lockaddr, orig_size,
6542 jump_entry, trampoline, trampoline_size,
6543 jjump_pad_insn, jjump_pad_insn_size,
6544 adjusted_insn_addr, adjusted_insn_addr_end,
6545 err);
6546 }
6547
6548 emit_ops *
6549 linux_process_target::emit_ops ()
6550 {
6551 if (the_low_target.emit_ops != NULL)
6552 return (*the_low_target.emit_ops) ();
6553 else
6554 return NULL;
6555 }
6556
6557 int
6558 linux_process_target::get_min_fast_tracepoint_insn_len ()
6559 {
6560 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6561 }
6562
6563 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6564
6565 static int
6566 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6567 CORE_ADDR *phdr_memaddr, int *num_phdr)
6568 {
6569 char filename[PATH_MAX];
6570 int fd;
6571 const int auxv_size = is_elf64
6572 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6573 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6574
6575 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6576
6577 fd = open (filename, O_RDONLY);
6578 if (fd < 0)
6579 return 1;
6580
6581 *phdr_memaddr = 0;
6582 *num_phdr = 0;
6583 while (read (fd, buf, auxv_size) == auxv_size
6584 && (*phdr_memaddr == 0 || *num_phdr == 0))
6585 {
6586 if (is_elf64)
6587 {
6588 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6589
6590 switch (aux->a_type)
6591 {
6592 case AT_PHDR:
6593 *phdr_memaddr = aux->a_un.a_val;
6594 break;
6595 case AT_PHNUM:
6596 *num_phdr = aux->a_un.a_val;
6597 break;
6598 }
6599 }
6600 else
6601 {
6602 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6603
6604 switch (aux->a_type)
6605 {
6606 case AT_PHDR:
6607 *phdr_memaddr = aux->a_un.a_val;
6608 break;
6609 case AT_PHNUM:
6610 *num_phdr = aux->a_un.a_val;
6611 break;
6612 }
6613 }
6614 }
6615
6616 close (fd);
6617
6618 if (*phdr_memaddr == 0 || *num_phdr == 0)
6619 {
6620 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6621 "phdr_memaddr = %ld, phdr_num = %d",
6622 (long) *phdr_memaddr, *num_phdr);
6623 return 2;
6624 }
6625
6626 return 0;
6627 }
6628
6629 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6630
6631 static CORE_ADDR
6632 get_dynamic (const int pid, const int is_elf64)
6633 {
6634 CORE_ADDR phdr_memaddr, relocation;
6635 int num_phdr, i;
6636 unsigned char *phdr_buf;
6637 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6638
6639 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6640 return 0;
6641
6642 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6643 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6644
6645 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6646 return 0;
6647
6648 /* Compute relocation: it is expected to be 0 for "regular" executables,
6649 non-zero for PIE ones. */
6650 relocation = -1;
6651 for (i = 0; relocation == -1 && i < num_phdr; i++)
6652 if (is_elf64)
6653 {
6654 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6655
6656 if (p->p_type == PT_PHDR)
6657 relocation = phdr_memaddr - p->p_vaddr;
6658 }
6659 else
6660 {
6661 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6662
6663 if (p->p_type == PT_PHDR)
6664 relocation = phdr_memaddr - p->p_vaddr;
6665 }
6666
6667 if (relocation == -1)
6668 {
6669 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6670 any real world executables, including PIE executables, have always
6671 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6672 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6673 or present DT_DEBUG anyway (fpc binaries are statically linked).
6674
6675 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6676
6677 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6678
6679 return 0;
6680 }
6681
6682 for (i = 0; i < num_phdr; i++)
6683 {
6684 if (is_elf64)
6685 {
6686 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6687
6688 if (p->p_type == PT_DYNAMIC)
6689 return p->p_vaddr + relocation;
6690 }
6691 else
6692 {
6693 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6694
6695 if (p->p_type == PT_DYNAMIC)
6696 return p->p_vaddr + relocation;
6697 }
6698 }
6699
6700 return 0;
6701 }
6702
6703 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6704 can be 0 if the inferior does not yet have the library list initialized.
6705 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6706 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6707
6708 static CORE_ADDR
6709 get_r_debug (const int pid, const int is_elf64)
6710 {
6711 CORE_ADDR dynamic_memaddr;
6712 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6713 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6714 CORE_ADDR map = -1;
6715
6716 dynamic_memaddr = get_dynamic (pid, is_elf64);
6717 if (dynamic_memaddr == 0)
6718 return map;
6719
6720 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6721 {
6722 if (is_elf64)
6723 {
6724 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6725 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6726 union
6727 {
6728 Elf64_Xword map;
6729 unsigned char buf[sizeof (Elf64_Xword)];
6730 }
6731 rld_map;
6732 #endif
6733 #ifdef DT_MIPS_RLD_MAP
6734 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6735 {
6736 if (linux_read_memory (dyn->d_un.d_val,
6737 rld_map.buf, sizeof (rld_map.buf)) == 0)
6738 return rld_map.map;
6739 else
6740 break;
6741 }
6742 #endif /* DT_MIPS_RLD_MAP */
6743 #ifdef DT_MIPS_RLD_MAP_REL
6744 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6745 {
6746 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6747 rld_map.buf, sizeof (rld_map.buf)) == 0)
6748 return rld_map.map;
6749 else
6750 break;
6751 }
6752 #endif /* DT_MIPS_RLD_MAP_REL */
6753
6754 if (dyn->d_tag == DT_DEBUG && map == -1)
6755 map = dyn->d_un.d_val;
6756
6757 if (dyn->d_tag == DT_NULL)
6758 break;
6759 }
6760 else
6761 {
6762 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6763 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6764 union
6765 {
6766 Elf32_Word map;
6767 unsigned char buf[sizeof (Elf32_Word)];
6768 }
6769 rld_map;
6770 #endif
6771 #ifdef DT_MIPS_RLD_MAP
6772 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6773 {
6774 if (linux_read_memory (dyn->d_un.d_val,
6775 rld_map.buf, sizeof (rld_map.buf)) == 0)
6776 return rld_map.map;
6777 else
6778 break;
6779 }
6780 #endif /* DT_MIPS_RLD_MAP */
6781 #ifdef DT_MIPS_RLD_MAP_REL
6782 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6783 {
6784 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6785 rld_map.buf, sizeof (rld_map.buf)) == 0)
6786 return rld_map.map;
6787 else
6788 break;
6789 }
6790 #endif /* DT_MIPS_RLD_MAP_REL */
6791
6792 if (dyn->d_tag == DT_DEBUG && map == -1)
6793 map = dyn->d_un.d_val;
6794
6795 if (dyn->d_tag == DT_NULL)
6796 break;
6797 }
6798
6799 dynamic_memaddr += dyn_size;
6800 }
6801
6802 return map;
6803 }
6804
6805 /* Read one pointer from MEMADDR in the inferior. */
6806
6807 static int
6808 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6809 {
6810 int ret;
6811
6812 /* Go through a union so this works on either big or little endian
6813 hosts, when the inferior's pointer size is smaller than the size
6814 of CORE_ADDR. It is assumed the inferior's endianness is the
6815 same of the superior's. */
6816 union
6817 {
6818 CORE_ADDR core_addr;
6819 unsigned int ui;
6820 unsigned char uc;
6821 } addr;
6822
6823 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6824 if (ret == 0)
6825 {
6826 if (ptr_size == sizeof (CORE_ADDR))
6827 *ptr = addr.core_addr;
6828 else if (ptr_size == sizeof (unsigned int))
6829 *ptr = addr.ui;
6830 else
6831 gdb_assert_not_reached ("unhandled pointer size");
6832 }
6833 return ret;
6834 }
6835
6836 bool
6837 linux_process_target::supports_qxfer_libraries_svr4 ()
6838 {
6839 return true;
6840 }
6841
6842 struct link_map_offsets
6843 {
6844 /* Offset and size of r_debug.r_version. */
6845 int r_version_offset;
6846
6847 /* Offset and size of r_debug.r_map. */
6848 int r_map_offset;
6849
6850 /* Offset to l_addr field in struct link_map. */
6851 int l_addr_offset;
6852
6853 /* Offset to l_name field in struct link_map. */
6854 int l_name_offset;
6855
6856 /* Offset to l_ld field in struct link_map. */
6857 int l_ld_offset;
6858
6859 /* Offset to l_next field in struct link_map. */
6860 int l_next_offset;
6861
6862 /* Offset to l_prev field in struct link_map. */
6863 int l_prev_offset;
6864 };
6865
6866 /* Construct qXfer:libraries-svr4:read reply. */
6867
6868 int
6869 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6870 unsigned char *readbuf,
6871 unsigned const char *writebuf,
6872 CORE_ADDR offset, int len)
6873 {
6874 struct process_info_private *const priv = current_process ()->priv;
6875 char filename[PATH_MAX];
6876 int pid, is_elf64;
6877
6878 static const struct link_map_offsets lmo_32bit_offsets =
6879 {
6880 0, /* r_version offset. */
6881 4, /* r_debug.r_map offset. */
6882 0, /* l_addr offset in link_map. */
6883 4, /* l_name offset in link_map. */
6884 8, /* l_ld offset in link_map. */
6885 12, /* l_next offset in link_map. */
6886 16 /* l_prev offset in link_map. */
6887 };
6888
6889 static const struct link_map_offsets lmo_64bit_offsets =
6890 {
6891 0, /* r_version offset. */
6892 8, /* r_debug.r_map offset. */
6893 0, /* l_addr offset in link_map. */
6894 8, /* l_name offset in link_map. */
6895 16, /* l_ld offset in link_map. */
6896 24, /* l_next offset in link_map. */
6897 32 /* l_prev offset in link_map. */
6898 };
6899 const struct link_map_offsets *lmo;
6900 unsigned int machine;
6901 int ptr_size;
6902 CORE_ADDR lm_addr = 0, lm_prev = 0;
6903 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6904 int header_done = 0;
6905
6906 if (writebuf != NULL)
6907 return -2;
6908 if (readbuf == NULL)
6909 return -1;
6910
6911 pid = lwpid_of (current_thread);
6912 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6913 is_elf64 = elf_64_file_p (filename, &machine);
6914 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6915 ptr_size = is_elf64 ? 8 : 4;
6916
6917 while (annex[0] != '\0')
6918 {
6919 const char *sep;
6920 CORE_ADDR *addrp;
6921 int name_len;
6922
6923 sep = strchr (annex, '=');
6924 if (sep == NULL)
6925 break;
6926
6927 name_len = sep - annex;
6928 if (name_len == 5 && startswith (annex, "start"))
6929 addrp = &lm_addr;
6930 else if (name_len == 4 && startswith (annex, "prev"))
6931 addrp = &lm_prev;
6932 else
6933 {
6934 annex = strchr (sep, ';');
6935 if (annex == NULL)
6936 break;
6937 annex++;
6938 continue;
6939 }
6940
6941 annex = decode_address_to_semicolon (addrp, sep + 1);
6942 }
6943
6944 if (lm_addr == 0)
6945 {
6946 int r_version = 0;
6947
6948 if (priv->r_debug == 0)
6949 priv->r_debug = get_r_debug (pid, is_elf64);
6950
6951 /* We failed to find DT_DEBUG. Such situation will not change
6952 for this inferior - do not retry it. Report it to GDB as
6953 E01, see for the reasons at the GDB solib-svr4.c side. */
6954 if (priv->r_debug == (CORE_ADDR) -1)
6955 return -1;
6956
6957 if (priv->r_debug != 0)
6958 {
6959 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6960 (unsigned char *) &r_version,
6961 sizeof (r_version)) != 0
6962 || r_version != 1)
6963 {
6964 warning ("unexpected r_debug version %d", r_version);
6965 }
6966 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6967 &lm_addr, ptr_size) != 0)
6968 {
6969 warning ("unable to read r_map from 0x%lx",
6970 (long) priv->r_debug + lmo->r_map_offset);
6971 }
6972 }
6973 }
6974
6975 std::string document = "<library-list-svr4 version=\"1.0\"";
6976
6977 while (lm_addr
6978 && read_one_ptr (lm_addr + lmo->l_name_offset,
6979 &l_name, ptr_size) == 0
6980 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6981 &l_addr, ptr_size) == 0
6982 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6983 &l_ld, ptr_size) == 0
6984 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6985 &l_prev, ptr_size) == 0
6986 && read_one_ptr (lm_addr + lmo->l_next_offset,
6987 &l_next, ptr_size) == 0)
6988 {
6989 unsigned char libname[PATH_MAX];
6990
6991 if (lm_prev != l_prev)
6992 {
6993 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6994 (long) lm_prev, (long) l_prev);
6995 break;
6996 }
6997
6998 /* Ignore the first entry even if it has valid name as the first entry
6999 corresponds to the main executable. The first entry should not be
7000 skipped if the dynamic loader was loaded late by a static executable
7001 (see solib-svr4.c parameter ignore_first). But in such case the main
7002 executable does not have PT_DYNAMIC present and this function already
7003 exited above due to failed get_r_debug. */
7004 if (lm_prev == 0)
7005 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7006 else
7007 {
7008 /* Not checking for error because reading may stop before
7009 we've got PATH_MAX worth of characters. */
7010 libname[0] = '\0';
7011 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7012 libname[sizeof (libname) - 1] = '\0';
7013 if (libname[0] != '\0')
7014 {
7015 if (!header_done)
7016 {
7017 /* Terminate `<library-list-svr4'. */
7018 document += '>';
7019 header_done = 1;
7020 }
7021
7022 string_appendf (document, "<library name=\"");
7023 xml_escape_text_append (&document, (char *) libname);
7024 string_appendf (document, "\" lm=\"0x%lx\" "
7025 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7026 (unsigned long) lm_addr, (unsigned long) l_addr,
7027 (unsigned long) l_ld);
7028 }
7029 }
7030
7031 lm_prev = lm_addr;
7032 lm_addr = l_next;
7033 }
7034
7035 if (!header_done)
7036 {
7037 /* Empty list; terminate `<library-list-svr4'. */
7038 document += "/>";
7039 }
7040 else
7041 document += "</library-list-svr4>";
7042
7043 int document_len = document.length ();
7044 if (offset < document_len)
7045 document_len -= offset;
7046 else
7047 document_len = 0;
7048 if (len > document_len)
7049 len = document_len;
7050
7051 memcpy (readbuf, document.data () + offset, len);
7052
7053 return len;
7054 }
7055
7056 #ifdef HAVE_LINUX_BTRACE
7057
7058 btrace_target_info *
7059 linux_process_target::enable_btrace (ptid_t ptid,
7060 const btrace_config *conf)
7061 {
7062 return linux_enable_btrace (ptid, conf);
7063 }
7064
7065 /* See to_disable_btrace target method. */
7066
7067 int
7068 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7069 {
7070 enum btrace_error err;
7071
7072 err = linux_disable_btrace (tinfo);
7073 return (err == BTRACE_ERR_NONE ? 0 : -1);
7074 }
7075
7076 /* Encode an Intel Processor Trace configuration. */
7077
7078 static void
7079 linux_low_encode_pt_config (struct buffer *buffer,
7080 const struct btrace_data_pt_config *config)
7081 {
7082 buffer_grow_str (buffer, "<pt-config>\n");
7083
7084 switch (config->cpu.vendor)
7085 {
7086 case CV_INTEL:
7087 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7088 "model=\"%u\" stepping=\"%u\"/>\n",
7089 config->cpu.family, config->cpu.model,
7090 config->cpu.stepping);
7091 break;
7092
7093 default:
7094 break;
7095 }
7096
7097 buffer_grow_str (buffer, "</pt-config>\n");
7098 }
7099
7100 /* Encode a raw buffer. */
7101
7102 static void
7103 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7104 unsigned int size)
7105 {
7106 if (size == 0)
7107 return;
7108
7109 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7110 buffer_grow_str (buffer, "<raw>\n");
7111
7112 while (size-- > 0)
7113 {
7114 char elem[2];
7115
7116 elem[0] = tohex ((*data >> 4) & 0xf);
7117 elem[1] = tohex (*data++ & 0xf);
7118
7119 buffer_grow (buffer, elem, 2);
7120 }
7121
7122 buffer_grow_str (buffer, "</raw>\n");
7123 }
7124
7125 /* See to_read_btrace target method. */
7126
7127 int
7128 linux_process_target::read_btrace (btrace_target_info *tinfo,
7129 buffer *buffer,
7130 enum btrace_read_type type)
7131 {
7132 struct btrace_data btrace;
7133 enum btrace_error err;
7134
7135 err = linux_read_btrace (&btrace, tinfo, type);
7136 if (err != BTRACE_ERR_NONE)
7137 {
7138 if (err == BTRACE_ERR_OVERFLOW)
7139 buffer_grow_str0 (buffer, "E.Overflow.");
7140 else
7141 buffer_grow_str0 (buffer, "E.Generic Error.");
7142
7143 return -1;
7144 }
7145
7146 switch (btrace.format)
7147 {
7148 case BTRACE_FORMAT_NONE:
7149 buffer_grow_str0 (buffer, "E.No Trace.");
7150 return -1;
7151
7152 case BTRACE_FORMAT_BTS:
7153 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7154 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7155
7156 for (const btrace_block &block : *btrace.variant.bts.blocks)
7157 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7158 paddress (block.begin), paddress (block.end));
7159
7160 buffer_grow_str0 (buffer, "</btrace>\n");
7161 break;
7162
7163 case BTRACE_FORMAT_PT:
7164 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7165 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7166 buffer_grow_str (buffer, "<pt>\n");
7167
7168 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7169
7170 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7171 btrace.variant.pt.size);
7172
7173 buffer_grow_str (buffer, "</pt>\n");
7174 buffer_grow_str0 (buffer, "</btrace>\n");
7175 break;
7176
7177 default:
7178 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7179 return -1;
7180 }
7181
7182 return 0;
7183 }
7184
7185 /* See to_btrace_conf target method. */
7186
7187 int
7188 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7189 buffer *buffer)
7190 {
7191 const struct btrace_config *conf;
7192
7193 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7194 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7195
7196 conf = linux_btrace_conf (tinfo);
7197 if (conf != NULL)
7198 {
7199 switch (conf->format)
7200 {
7201 case BTRACE_FORMAT_NONE:
7202 break;
7203
7204 case BTRACE_FORMAT_BTS:
7205 buffer_xml_printf (buffer, "<bts");
7206 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7207 buffer_xml_printf (buffer, " />\n");
7208 break;
7209
7210 case BTRACE_FORMAT_PT:
7211 buffer_xml_printf (buffer, "<pt");
7212 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7213 buffer_xml_printf (buffer, "/>\n");
7214 break;
7215 }
7216 }
7217
7218 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7219 return 0;
7220 }
7221 #endif /* HAVE_LINUX_BTRACE */
7222
7223 /* See nat/linux-nat.h. */
7224
7225 ptid_t
7226 current_lwp_ptid (void)
7227 {
7228 return ptid_of (current_thread);
7229 }
7230
7231 const char *
7232 linux_process_target::thread_name (ptid_t thread)
7233 {
7234 return linux_proc_tid_get_name (thread);
7235 }
7236
7237 #if USE_THREAD_DB
7238 bool
7239 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7240 int *handle_len)
7241 {
7242 return thread_db_thread_handle (ptid, handle, handle_len);
7243 }
7244 #endif
7245
7246 /* Default implementation of linux_target_ops method "set_pc" for
7247 32-bit pc register which is literally named "pc". */
7248
7249 void
7250 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7251 {
7252 uint32_t newpc = pc;
7253
7254 supply_register_by_name (regcache, "pc", &newpc);
7255 }
7256
7257 /* Default implementation of linux_target_ops method "get_pc" for
7258 32-bit pc register which is literally named "pc". */
7259
7260 CORE_ADDR
7261 linux_get_pc_32bit (struct regcache *regcache)
7262 {
7263 uint32_t pc;
7264
7265 collect_register_by_name (regcache, "pc", &pc);
7266 if (debug_threads)
7267 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7268 return pc;
7269 }
7270
7271 /* Default implementation of linux_target_ops method "set_pc" for
7272 64-bit pc register which is literally named "pc". */
7273
7274 void
7275 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7276 {
7277 uint64_t newpc = pc;
7278
7279 supply_register_by_name (regcache, "pc", &newpc);
7280 }
7281
7282 /* Default implementation of linux_target_ops method "get_pc" for
7283 64-bit pc register which is literally named "pc". */
7284
7285 CORE_ADDR
7286 linux_get_pc_64bit (struct regcache *regcache)
7287 {
7288 uint64_t pc;
7289
7290 collect_register_by_name (regcache, "pc", &pc);
7291 if (debug_threads)
7292 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7293 return pc;
7294 }
7295
7296 /* See linux-low.h. */
7297
7298 int
7299 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7300 {
7301 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7302 int offset = 0;
7303
7304 gdb_assert (wordsize == 4 || wordsize == 8);
7305
7306 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7307 {
7308 if (wordsize == 4)
7309 {
7310 uint32_t *data_p = (uint32_t *) data;
7311 if (data_p[0] == match)
7312 {
7313 *valp = data_p[1];
7314 return 1;
7315 }
7316 }
7317 else
7318 {
7319 uint64_t *data_p = (uint64_t *) data;
7320 if (data_p[0] == match)
7321 {
7322 *valp = data_p[1];
7323 return 1;
7324 }
7325 }
7326
7327 offset += 2 * wordsize;
7328 }
7329
7330 return 0;
7331 }
7332
7333 /* See linux-low.h. */
7334
7335 CORE_ADDR
7336 linux_get_hwcap (int wordsize)
7337 {
7338 CORE_ADDR hwcap = 0;
7339 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7340 return hwcap;
7341 }
7342
7343 /* See linux-low.h. */
7344
7345 CORE_ADDR
7346 linux_get_hwcap2 (int wordsize)
7347 {
7348 CORE_ADDR hwcap2 = 0;
7349 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7350 return hwcap2;
7351 }
7352
7353 #ifdef HAVE_LINUX_REGSETS
7354 void
7355 initialize_regsets_info (struct regsets_info *info)
7356 {
7357 for (info->num_regsets = 0;
7358 info->regsets[info->num_regsets].size >= 0;
7359 info->num_regsets++)
7360 ;
7361 }
7362 #endif
7363
7364 void
7365 initialize_low (void)
7366 {
7367 struct sigaction sigchld_action;
7368
7369 memset (&sigchld_action, 0, sizeof (sigchld_action));
7370 set_target_ops (the_linux_target);
7371
7372 linux_ptrace_init_warnings ();
7373 linux_proc_init_warnings ();
7374
7375 sigchld_action.sa_handler = sigchld_handler;
7376 sigemptyset (&sigchld_action.sa_mask);
7377 sigchld_action.sa_flags = SA_RESTART;
7378 sigaction (SIGCHLD, &sigchld_action, NULL);
7379
7380 initialize_low_arch ();
7381
7382 linux_check_ptrace_features ();
7383 }
This page took 0.325414 seconds and 5 git commands to generate.