019b1231034253a585f09a72e02b2025f734f0e8
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void unsuspend_all_lwps (struct lwp_info *except);
271 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
272 static int lwp_is_marked_dead (struct lwp_info *lwp);
273 static int finish_step_over (struct lwp_info *lwp);
274 static int kill_lwp (unsigned long lwpid, int signo);
275 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
276 static int linux_low_ptrace_options (int attached);
277 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
278
279 /* When the event-loop is doing a step-over, this points at the thread
280 being stepped. */
281 ptid_t step_over_bkpt;
282
283 /* True if the low target can hardware single-step. */
284
285 static int
286 can_hardware_single_step (void)
287 {
288 if (the_low_target.supports_hardware_single_step != NULL)
289 return the_low_target.supports_hardware_single_step ();
290 else
291 return 0;
292 }
293
294 bool
295 linux_process_target::low_supports_breakpoints ()
296 {
297 return false;
298 }
299
300 CORE_ADDR
301 linux_process_target::low_get_pc (regcache *regcache)
302 {
303 return 0;
304 }
305
306 void
307 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
308 {
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
310 }
311
312 std::vector<CORE_ADDR>
313 linux_process_target::low_get_next_pcs (regcache *regcache)
314 {
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
317 }
318
319 int
320 linux_process_target::low_decr_pc_after_break ()
321 {
322 return 0;
323 }
324
325 /* Returns true if this target can support fast tracepoints. This
326 does not mean that the in-process agent has been loaded in the
327 inferior. */
328
329 static int
330 supports_fast_tracepoints (void)
331 {
332 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
333 }
334
335 /* True if LWP is stopped in its stepping range. */
336
337 static int
338 lwp_in_step_range (struct lwp_info *lwp)
339 {
340 CORE_ADDR pc = lwp->stop_pc;
341
342 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
343 }
344
345 struct pending_signals
346 {
347 int signal;
348 siginfo_t info;
349 struct pending_signals *prev;
350 };
351
352 /* The read/write ends of the pipe registered as waitable file in the
353 event loop. */
354 static int linux_event_pipe[2] = { -1, -1 };
355
356 /* True if we're currently in async mode. */
357 #define target_is_async_p() (linux_event_pipe[0] != -1)
358
359 static void send_sigstop (struct lwp_info *lwp);
360
361 /* Return non-zero if HEADER is a 64-bit ELF file. */
362
363 static int
364 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
365 {
366 if (header->e_ident[EI_MAG0] == ELFMAG0
367 && header->e_ident[EI_MAG1] == ELFMAG1
368 && header->e_ident[EI_MAG2] == ELFMAG2
369 && header->e_ident[EI_MAG3] == ELFMAG3)
370 {
371 *machine = header->e_machine;
372 return header->e_ident[EI_CLASS] == ELFCLASS64;
373
374 }
375 *machine = EM_NONE;
376 return -1;
377 }
378
379 /* Return non-zero if FILE is a 64-bit ELF file,
380 zero if the file is not a 64-bit ELF file,
381 and -1 if the file is not accessible or doesn't exist. */
382
383 static int
384 elf_64_file_p (const char *file, unsigned int *machine)
385 {
386 Elf64_Ehdr header;
387 int fd;
388
389 fd = open (file, O_RDONLY);
390 if (fd < 0)
391 return -1;
392
393 if (read (fd, &header, sizeof (header)) != sizeof (header))
394 {
395 close (fd);
396 return 0;
397 }
398 close (fd);
399
400 return elf_64_header_p (&header, machine);
401 }
402
403 /* Accepts an integer PID; Returns true if the executable PID is
404 running is a 64-bit ELF file.. */
405
406 int
407 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
408 {
409 char file[PATH_MAX];
410
411 sprintf (file, "/proc/%d/exe", pid);
412 return elf_64_file_p (file, machine);
413 }
414
415 void
416 linux_process_target::delete_lwp (lwp_info *lwp)
417 {
418 struct thread_info *thr = get_lwp_thread (lwp);
419
420 if (debug_threads)
421 debug_printf ("deleting %ld\n", lwpid_of (thr));
422
423 remove_thread (thr);
424
425 low_delete_thread (lwp->arch_private);
426
427 free (lwp);
428 }
429
430 void
431 linux_process_target::low_delete_thread (arch_lwp_info *info)
432 {
433 /* Default implementation should be overridden if architecture-specific
434 info is being used. */
435 gdb_assert (info == nullptr);
436 }
437
438 process_info *
439 linux_process_target::add_linux_process (int pid, int attached)
440 {
441 struct process_info *proc;
442
443 proc = add_process (pid, attached);
444 proc->priv = XCNEW (struct process_info_private);
445
446 proc->priv->arch_private = low_new_process ();
447
448 return proc;
449 }
450
451 arch_process_info *
452 linux_process_target::low_new_process ()
453 {
454 return nullptr;
455 }
456
457 void
458 linux_process_target::low_delete_process (arch_process_info *info)
459 {
460 /* Default implementation must be overridden if architecture-specific
461 info exists. */
462 gdb_assert (info == nullptr);
463 }
464
465 void
466 linux_process_target::low_new_fork (process_info *parent, process_info *child)
467 {
468 /* Nop. */
469 }
470
471 void
472 linux_process_target::arch_setup_thread (thread_info *thread)
473 {
474 struct thread_info *saved_thread;
475
476 saved_thread = current_thread;
477 current_thread = thread;
478
479 low_arch_setup ();
480
481 current_thread = saved_thread;
482 }
483
484 int
485 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
486 int wstat)
487 {
488 client_state &cs = get_client_state ();
489 struct lwp_info *event_lwp = *orig_event_lwp;
490 int event = linux_ptrace_get_extended_event (wstat);
491 struct thread_info *event_thr = get_lwp_thread (event_lwp);
492 struct lwp_info *new_lwp;
493
494 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
495
496 /* All extended events we currently use are mid-syscall. Only
497 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
498 you have to be using PTRACE_SEIZE to get that. */
499 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
500
501 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
502 || (event == PTRACE_EVENT_CLONE))
503 {
504 ptid_t ptid;
505 unsigned long new_pid;
506 int ret, status;
507
508 /* Get the pid of the new lwp. */
509 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
510 &new_pid);
511
512 /* If we haven't already seen the new PID stop, wait for it now. */
513 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
514 {
515 /* The new child has a pending SIGSTOP. We can't affect it until it
516 hits the SIGSTOP, but we're already attached. */
517
518 ret = my_waitpid (new_pid, &status, __WALL);
519
520 if (ret == -1)
521 perror_with_name ("waiting for new child");
522 else if (ret != new_pid)
523 warning ("wait returned unexpected PID %d", ret);
524 else if (!WIFSTOPPED (status))
525 warning ("wait returned unexpected status 0x%x", status);
526 }
527
528 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
529 {
530 struct process_info *parent_proc;
531 struct process_info *child_proc;
532 struct lwp_info *child_lwp;
533 struct thread_info *child_thr;
534 struct target_desc *tdesc;
535
536 ptid = ptid_t (new_pid, new_pid, 0);
537
538 if (debug_threads)
539 {
540 debug_printf ("HEW: Got fork event from LWP %ld, "
541 "new child is %d\n",
542 ptid_of (event_thr).lwp (),
543 ptid.pid ());
544 }
545
546 /* Add the new process to the tables and clone the breakpoint
547 lists of the parent. We need to do this even if the new process
548 will be detached, since we will need the process object and the
549 breakpoints to remove any breakpoints from memory when we
550 detach, and the client side will access registers. */
551 child_proc = add_linux_process (new_pid, 0);
552 gdb_assert (child_proc != NULL);
553 child_lwp = add_lwp (ptid);
554 gdb_assert (child_lwp != NULL);
555 child_lwp->stopped = 1;
556 child_lwp->must_set_ptrace_flags = 1;
557 child_lwp->status_pending_p = 0;
558 child_thr = get_lwp_thread (child_lwp);
559 child_thr->last_resume_kind = resume_stop;
560 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
561
562 /* If we're suspending all threads, leave this one suspended
563 too. If the fork/clone parent is stepping over a breakpoint,
564 all other threads have been suspended already. Leave the
565 child suspended too. */
566 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
567 || event_lwp->bp_reinsert != 0)
568 {
569 if (debug_threads)
570 debug_printf ("HEW: leaving child suspended\n");
571 child_lwp->suspended = 1;
572 }
573
574 parent_proc = get_thread_process (event_thr);
575 child_proc->attached = parent_proc->attached;
576
577 if (event_lwp->bp_reinsert != 0
578 && supports_software_single_step ()
579 && event == PTRACE_EVENT_VFORK)
580 {
581 /* If we leave single-step breakpoints there, child will
582 hit it, so uninsert single-step breakpoints from parent
583 (and child). Once vfork child is done, reinsert
584 them back to parent. */
585 uninsert_single_step_breakpoints (event_thr);
586 }
587
588 clone_all_breakpoints (child_thr, event_thr);
589
590 tdesc = allocate_target_description ();
591 copy_target_description (tdesc, parent_proc->tdesc);
592 child_proc->tdesc = tdesc;
593
594 /* Clone arch-specific process data. */
595 low_new_fork (parent_proc, child_proc);
596
597 /* Save fork info in the parent thread. */
598 if (event == PTRACE_EVENT_FORK)
599 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
600 else if (event == PTRACE_EVENT_VFORK)
601 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
602
603 event_lwp->waitstatus.value.related_pid = ptid;
604
605 /* The status_pending field contains bits denoting the
606 extended event, so when the pending event is handled,
607 the handler will look at lwp->waitstatus. */
608 event_lwp->status_pending_p = 1;
609 event_lwp->status_pending = wstat;
610
611 /* Link the threads until the parent event is passed on to
612 higher layers. */
613 event_lwp->fork_relative = child_lwp;
614 child_lwp->fork_relative = event_lwp;
615
616 /* If the parent thread is doing step-over with single-step
617 breakpoints, the list of single-step breakpoints are cloned
618 from the parent's. Remove them from the child process.
619 In case of vfork, we'll reinsert them back once vforked
620 child is done. */
621 if (event_lwp->bp_reinsert != 0
622 && supports_software_single_step ())
623 {
624 /* The child process is forked and stopped, so it is safe
625 to access its memory without stopping all other threads
626 from other processes. */
627 delete_single_step_breakpoints (child_thr);
628
629 gdb_assert (has_single_step_breakpoints (event_thr));
630 gdb_assert (!has_single_step_breakpoints (child_thr));
631 }
632
633 /* Report the event. */
634 return 0;
635 }
636
637 if (debug_threads)
638 debug_printf ("HEW: Got clone event "
639 "from LWP %ld, new child is LWP %ld\n",
640 lwpid_of (event_thr), new_pid);
641
642 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
643 new_lwp = add_lwp (ptid);
644
645 /* Either we're going to immediately resume the new thread
646 or leave it stopped. resume_one_lwp is a nop if it
647 thinks the thread is currently running, so set this first
648 before calling resume_one_lwp. */
649 new_lwp->stopped = 1;
650
651 /* If we're suspending all threads, leave this one suspended
652 too. If the fork/clone parent is stepping over a breakpoint,
653 all other threads have been suspended already. Leave the
654 child suspended too. */
655 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
656 || event_lwp->bp_reinsert != 0)
657 new_lwp->suspended = 1;
658
659 /* Normally we will get the pending SIGSTOP. But in some cases
660 we might get another signal delivered to the group first.
661 If we do get another signal, be sure not to lose it. */
662 if (WSTOPSIG (status) != SIGSTOP)
663 {
664 new_lwp->stop_expected = 1;
665 new_lwp->status_pending_p = 1;
666 new_lwp->status_pending = status;
667 }
668 else if (cs.report_thread_events)
669 {
670 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
671 new_lwp->status_pending_p = 1;
672 new_lwp->status_pending = status;
673 }
674
675 #ifdef USE_THREAD_DB
676 thread_db_notice_clone (event_thr, ptid);
677 #endif
678
679 /* Don't report the event. */
680 return 1;
681 }
682 else if (event == PTRACE_EVENT_VFORK_DONE)
683 {
684 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
685
686 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
687 {
688 reinsert_single_step_breakpoints (event_thr);
689
690 gdb_assert (has_single_step_breakpoints (event_thr));
691 }
692
693 /* Report the event. */
694 return 0;
695 }
696 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
697 {
698 struct process_info *proc;
699 std::vector<int> syscalls_to_catch;
700 ptid_t event_ptid;
701 pid_t event_pid;
702
703 if (debug_threads)
704 {
705 debug_printf ("HEW: Got exec event from LWP %ld\n",
706 lwpid_of (event_thr));
707 }
708
709 /* Get the event ptid. */
710 event_ptid = ptid_of (event_thr);
711 event_pid = event_ptid.pid ();
712
713 /* Save the syscall list from the execing process. */
714 proc = get_thread_process (event_thr);
715 syscalls_to_catch = std::move (proc->syscalls_to_catch);
716
717 /* Delete the execing process and all its threads. */
718 mourn (proc);
719 current_thread = NULL;
720
721 /* Create a new process/lwp/thread. */
722 proc = add_linux_process (event_pid, 0);
723 event_lwp = add_lwp (event_ptid);
724 event_thr = get_lwp_thread (event_lwp);
725 gdb_assert (current_thread == event_thr);
726 arch_setup_thread (event_thr);
727
728 /* Set the event status. */
729 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
730 event_lwp->waitstatus.value.execd_pathname
731 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
732
733 /* Mark the exec status as pending. */
734 event_lwp->stopped = 1;
735 event_lwp->status_pending_p = 1;
736 event_lwp->status_pending = wstat;
737 event_thr->last_resume_kind = resume_continue;
738 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
739
740 /* Update syscall state in the new lwp, effectively mid-syscall too. */
741 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
742
743 /* Restore the list to catch. Don't rely on the client, which is free
744 to avoid sending a new list when the architecture doesn't change.
745 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
746 proc->syscalls_to_catch = std::move (syscalls_to_catch);
747
748 /* Report the event. */
749 *orig_event_lwp = event_lwp;
750 return 0;
751 }
752
753 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
754 }
755
756 CORE_ADDR
757 linux_process_target::get_pc (lwp_info *lwp)
758 {
759 struct thread_info *saved_thread;
760 struct regcache *regcache;
761 CORE_ADDR pc;
762
763 if (!low_supports_breakpoints ())
764 return 0;
765
766 saved_thread = current_thread;
767 current_thread = get_lwp_thread (lwp);
768
769 regcache = get_thread_regcache (current_thread, 1);
770 pc = low_get_pc (regcache);
771
772 if (debug_threads)
773 debug_printf ("pc is 0x%lx\n", (long) pc);
774
775 current_thread = saved_thread;
776 return pc;
777 }
778
779 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
780 Fill *SYSNO with the syscall nr trapped. */
781
782 static void
783 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
784 {
785 struct thread_info *saved_thread;
786 struct regcache *regcache;
787
788 if (the_low_target.get_syscall_trapinfo == NULL)
789 {
790 /* If we cannot get the syscall trapinfo, report an unknown
791 system call number. */
792 *sysno = UNKNOWN_SYSCALL;
793 return;
794 }
795
796 saved_thread = current_thread;
797 current_thread = get_lwp_thread (lwp);
798
799 regcache = get_thread_regcache (current_thread, 1);
800 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
801
802 if (debug_threads)
803 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
804
805 current_thread = saved_thread;
806 }
807
808 bool
809 linux_process_target::save_stop_reason (lwp_info *lwp)
810 {
811 CORE_ADDR pc;
812 CORE_ADDR sw_breakpoint_pc;
813 struct thread_info *saved_thread;
814 #if USE_SIGTRAP_SIGINFO
815 siginfo_t siginfo;
816 #endif
817
818 if (!low_supports_breakpoints ())
819 return false;
820
821 pc = get_pc (lwp);
822 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
823
824 /* breakpoint_at reads from the current thread. */
825 saved_thread = current_thread;
826 current_thread = get_lwp_thread (lwp);
827
828 #if USE_SIGTRAP_SIGINFO
829 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
830 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
831 {
832 if (siginfo.si_signo == SIGTRAP)
833 {
834 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
835 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
836 {
837 /* The si_code is ambiguous on this arch -- check debug
838 registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
841 }
842 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
843 {
844 /* If we determine the LWP stopped for a SW breakpoint,
845 trust it. Particularly don't check watchpoint
846 registers, because at least on s390, we'd find
847 stopped-by-watchpoint as long as there's a watchpoint
848 set. */
849 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
850 }
851 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
852 {
853 /* This can indicate either a hardware breakpoint or
854 hardware watchpoint. Check debug registers. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
857 }
858 else if (siginfo.si_code == TRAP_TRACE)
859 {
860 /* We may have single stepped an instruction that
861 triggered a watchpoint. In that case, on some
862 architectures (such as x86), instead of TRAP_HWBKPT,
863 si_code indicates TRAP_TRACE, and we need to check
864 the debug registers separately. */
865 if (!check_stopped_by_watchpoint (lwp))
866 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
867 }
868 }
869 }
870 #else
871 /* We may have just stepped a breakpoint instruction. E.g., in
872 non-stop mode, GDB first tells the thread A to step a range, and
873 then the user inserts a breakpoint inside the range. In that
874 case we need to report the breakpoint PC. */
875 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
876 && low_breakpoint_at (sw_breakpoint_pc))
877 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
878
879 if (hardware_breakpoint_inserted_here (pc))
880 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
881
882 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
883 check_stopped_by_watchpoint (lwp);
884 #endif
885
886 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
887 {
888 if (debug_threads)
889 {
890 struct thread_info *thr = get_lwp_thread (lwp);
891
892 debug_printf ("CSBB: %s stopped by software breakpoint\n",
893 target_pid_to_str (ptid_of (thr)));
894 }
895
896 /* Back up the PC if necessary. */
897 if (pc != sw_breakpoint_pc)
898 {
899 struct regcache *regcache
900 = get_thread_regcache (current_thread, 1);
901 low_set_pc (regcache, sw_breakpoint_pc);
902 }
903
904 /* Update this so we record the correct stop PC below. */
905 pc = sw_breakpoint_pc;
906 }
907 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
908 {
909 if (debug_threads)
910 {
911 struct thread_info *thr = get_lwp_thread (lwp);
912
913 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
914 target_pid_to_str (ptid_of (thr)));
915 }
916 }
917 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
918 {
919 if (debug_threads)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
922
923 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
924 target_pid_to_str (ptid_of (thr)));
925 }
926 }
927 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
928 {
929 if (debug_threads)
930 {
931 struct thread_info *thr = get_lwp_thread (lwp);
932
933 debug_printf ("CSBB: %s stopped by trace\n",
934 target_pid_to_str (ptid_of (thr)));
935 }
936 }
937
938 lwp->stop_pc = pc;
939 current_thread = saved_thread;
940 return true;
941 }
942
943 lwp_info *
944 linux_process_target::add_lwp (ptid_t ptid)
945 {
946 struct lwp_info *lwp;
947
948 lwp = XCNEW (struct lwp_info);
949
950 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
951
952 lwp->thread = add_thread (ptid, lwp);
953
954 low_new_thread (lwp);
955
956 return lwp;
957 }
958
959 void
960 linux_process_target::low_new_thread (lwp_info *info)
961 {
962 /* Nop. */
963 }
964
965 /* Callback to be used when calling fork_inferior, responsible for
966 actually initiating the tracing of the inferior. */
967
968 static void
969 linux_ptrace_fun ()
970 {
971 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
972 (PTRACE_TYPE_ARG4) 0) < 0)
973 trace_start_error_with_name ("ptrace");
974
975 if (setpgid (0, 0) < 0)
976 trace_start_error_with_name ("setpgid");
977
978 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
979 stdout to stderr so that inferior i/o doesn't corrupt the connection.
980 Also, redirect stdin to /dev/null. */
981 if (remote_connection_is_stdio ())
982 {
983 if (close (0) < 0)
984 trace_start_error_with_name ("close");
985 if (open ("/dev/null", O_RDONLY) < 0)
986 trace_start_error_with_name ("open");
987 if (dup2 (2, 1) < 0)
988 trace_start_error_with_name ("dup2");
989 if (write (2, "stdin/stdout redirected\n",
990 sizeof ("stdin/stdout redirected\n") - 1) < 0)
991 {
992 /* Errors ignored. */;
993 }
994 }
995 }
996
997 /* Start an inferior process and returns its pid.
998 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
999 are its arguments. */
1000
1001 int
1002 linux_process_target::create_inferior (const char *program,
1003 const std::vector<char *> &program_args)
1004 {
1005 client_state &cs = get_client_state ();
1006 struct lwp_info *new_lwp;
1007 int pid;
1008 ptid_t ptid;
1009
1010 {
1011 maybe_disable_address_space_randomization restore_personality
1012 (cs.disable_randomization);
1013 std::string str_program_args = stringify_argv (program_args);
1014
1015 pid = fork_inferior (program,
1016 str_program_args.c_str (),
1017 get_environ ()->envp (), linux_ptrace_fun,
1018 NULL, NULL, NULL, NULL);
1019 }
1020
1021 add_linux_process (pid, 0);
1022
1023 ptid = ptid_t (pid, pid, 0);
1024 new_lwp = add_lwp (ptid);
1025 new_lwp->must_set_ptrace_flags = 1;
1026
1027 post_fork_inferior (pid, program);
1028
1029 return pid;
1030 }
1031
1032 /* Implement the post_create_inferior target_ops method. */
1033
1034 void
1035 linux_process_target::post_create_inferior ()
1036 {
1037 struct lwp_info *lwp = get_thread_lwp (current_thread);
1038
1039 low_arch_setup ();
1040
1041 if (lwp->must_set_ptrace_flags)
1042 {
1043 struct process_info *proc = current_process ();
1044 int options = linux_low_ptrace_options (proc->attached);
1045
1046 linux_enable_event_reporting (lwpid_of (current_thread), options);
1047 lwp->must_set_ptrace_flags = 0;
1048 }
1049 }
1050
1051 int
1052 linux_process_target::attach_lwp (ptid_t ptid)
1053 {
1054 struct lwp_info *new_lwp;
1055 int lwpid = ptid.lwp ();
1056
1057 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1058 != 0)
1059 return errno;
1060
1061 new_lwp = add_lwp (ptid);
1062
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp->must_set_ptrace_flags = 1;
1066
1067 if (linux_proc_pid_is_stopped (lwpid))
1068 {
1069 if (debug_threads)
1070 debug_printf ("Attached to a stopped process\n");
1071
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1077
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid, SIGSTOP);
1086
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1091 }
1092
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1095
1096 There are several cases to consider here:
1097
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1103 resume_continue.
1104
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1110
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1114
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1116 existing threads.
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1122
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp->stop_expected = 1;
1129
1130 return 0;
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid) == NULL)
1142 {
1143 int lwpid = ptid.lwp ();
1144 int err;
1145
1146 if (debug_threads)
1147 debug_printf ("Found new lwp %d\n", lwpid);
1148
1149 err = the_linux_target->attach_lwp (ptid);
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1157 {
1158 if (debug_threads)
1159 {
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid, err, safe_strerror (err));
1163 }
1164 }
1165 else if (err != 0)
1166 {
1167 std::string reason
1168 = linux_ptrace_attach_fail_reason_string (ptid, err);
1169
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1171 }
1172
1173 return 1;
1174 }
1175 return 0;
1176 }
1177
1178 static void async_file_mark (void);
1179
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1181 of its threads. */
1182
1183 int
1184 linux_process_target::attach (unsigned long pid)
1185 {
1186 struct process_info *proc;
1187 struct thread_info *initial_thread;
1188 ptid_t ptid = ptid_t (pid, pid, 0);
1189 int err;
1190
1191 proc = add_linux_process (pid, 1);
1192
1193 /* Attach to PID. We will check for other threads
1194 soon. */
1195 err = attach_lwp (ptid);
1196 if (err != 0)
1197 {
1198 remove_process (proc);
1199
1200 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1201 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1202 }
1203
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
1206 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1207 initial_thread->last_resume_kind = resume_stop;
1208
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
1218 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1219
1220 /* GDB will shortly read the xml target description for this
1221 process, to figure out the process' architecture. But the target
1222 description is only filled in when the first process/thread in
1223 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1224 that now, otherwise, if GDB is fast enough, it could read the
1225 target description _before_ that initial stop. */
1226 if (non_stop)
1227 {
1228 struct lwp_info *lwp;
1229 int wstat, lwpid;
1230 ptid_t pid_ptid = ptid_t (pid);
1231
1232 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1233 gdb_assert (lwpid > 0);
1234
1235 lwp = find_lwp_pid (ptid_t (lwpid));
1236
1237 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1238 {
1239 lwp->status_pending_p = 1;
1240 lwp->status_pending = wstat;
1241 }
1242
1243 initial_thread->last_resume_kind = resume_continue;
1244
1245 async_file_mark ();
1246
1247 gdb_assert (proc->tdesc != NULL);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int
1254 last_thread_of_process_p (int pid)
1255 {
1256 bool seen_one = false;
1257
1258 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1259 {
1260 if (!seen_one)
1261 {
1262 /* This is the first thread of this process we see. */
1263 seen_one = true;
1264 return false;
1265 }
1266 else
1267 {
1268 /* This is the second thread of this process we see. */
1269 return true;
1270 }
1271 });
1272
1273 return thread == NULL;
1274 }
1275
1276 /* Kill LWP. */
1277
1278 static void
1279 linux_kill_one_lwp (struct lwp_info *lwp)
1280 {
1281 struct thread_info *thr = get_lwp_thread (lwp);
1282 int pid = lwpid_of (thr);
1283
1284 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1285 there is no signal context, and ptrace(PTRACE_KILL) (or
1286 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1287 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1288 alternative is to kill with SIGKILL. We only need one SIGKILL
1289 per process, not one for each thread. But since we still support
1290 support debugging programs using raw clone without CLONE_THREAD,
1291 we send one for each thread. For years, we used PTRACE_KILL
1292 only, so we're being a bit paranoid about some old kernels where
1293 PTRACE_KILL might work better (dubious if there are any such, but
1294 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1295 second, and so we're fine everywhere. */
1296
1297 errno = 0;
1298 kill_lwp (pid, SIGKILL);
1299 if (debug_threads)
1300 {
1301 int save_errno = errno;
1302
1303 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1304 target_pid_to_str (ptid_of (thr)),
1305 save_errno ? safe_strerror (save_errno) : "OK");
1306 }
1307
1308 errno = 0;
1309 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1310 if (debug_threads)
1311 {
1312 int save_errno = errno;
1313
1314 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1315 target_pid_to_str (ptid_of (thr)),
1316 save_errno ? safe_strerror (save_errno) : "OK");
1317 }
1318 }
1319
1320 /* Kill LWP and wait for it to die. */
1321
1322 static void
1323 kill_wait_lwp (struct lwp_info *lwp)
1324 {
1325 struct thread_info *thr = get_lwp_thread (lwp);
1326 int pid = ptid_of (thr).pid ();
1327 int lwpid = ptid_of (thr).lwp ();
1328 int wstat;
1329 int res;
1330
1331 if (debug_threads)
1332 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1333
1334 do
1335 {
1336 linux_kill_one_lwp (lwp);
1337
1338 /* Make sure it died. Notes:
1339
1340 - The loop is most likely unnecessary.
1341
1342 - We don't use wait_for_event as that could delete lwps
1343 while we're iterating over them. We're not interested in
1344 any pending status at this point, only in making sure all
1345 wait status on the kernel side are collected until the
1346 process is reaped.
1347
1348 - We don't use __WALL here as the __WALL emulation relies on
1349 SIGCHLD, and killing a stopped process doesn't generate
1350 one, nor an exit status.
1351 */
1352 res = my_waitpid (lwpid, &wstat, 0);
1353 if (res == -1 && errno == ECHILD)
1354 res = my_waitpid (lwpid, &wstat, __WCLONE);
1355 } while (res > 0 && WIFSTOPPED (wstat));
1356
1357 /* Even if it was stopped, the child may have already disappeared.
1358 E.g., if it was killed by SIGKILL. */
1359 if (res < 0 && errno != ECHILD)
1360 perror_with_name ("kill_wait_lwp");
1361 }
1362
1363 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1364 except the leader. */
1365
1366 static void
1367 kill_one_lwp_callback (thread_info *thread, int pid)
1368 {
1369 struct lwp_info *lwp = get_thread_lwp (thread);
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 }
1386
1387 int
1388 linux_process_target::kill (process_info *process)
1389 {
1390 int pid = process->pid;
1391
1392 /* If we're killing a running inferior, make sure it is stopped
1393 first, as PTRACE_KILL will not work otherwise. */
1394 stop_all_lwps (0, NULL);
1395
1396 for_each_thread (pid, [&] (thread_info *thread)
1397 {
1398 kill_one_lwp_callback (thread, pid);
1399 });
1400
1401 /* See the comment in linux_kill_one_lwp. We did not kill the first
1402 thread in the list, so do so now. */
1403 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1404
1405 if (lwp == NULL)
1406 {
1407 if (debug_threads)
1408 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1409 pid);
1410 }
1411 else
1412 kill_wait_lwp (lwp);
1413
1414 mourn (process);
1415
1416 /* Since we presently can only stop all lwps of all processes, we
1417 need to unstop lwps of other processes. */
1418 unstop_all_lwps (0, NULL);
1419 return 0;
1420 }
1421
1422 /* Get pending signal of THREAD, for detaching purposes. This is the
1423 signal the thread last stopped for, which we need to deliver to the
1424 thread when detaching, otherwise, it'd be suppressed/lost. */
1425
1426 static int
1427 get_detach_signal (struct thread_info *thread)
1428 {
1429 client_state &cs = get_client_state ();
1430 enum gdb_signal signo = GDB_SIGNAL_0;
1431 int status;
1432 struct lwp_info *lp = get_thread_lwp (thread);
1433
1434 if (lp->status_pending_p)
1435 status = lp->status_pending;
1436 else
1437 {
1438 /* If the thread had been suspended by gdbserver, and it stopped
1439 cleanly, then it'll have stopped with SIGSTOP. But we don't
1440 want to deliver that SIGSTOP. */
1441 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1442 || thread->last_status.value.sig == GDB_SIGNAL_0)
1443 return 0;
1444
1445 /* Otherwise, we may need to deliver the signal we
1446 intercepted. */
1447 status = lp->last_status;
1448 }
1449
1450 if (!WIFSTOPPED (status))
1451 {
1452 if (debug_threads)
1453 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread)));
1455 return 0;
1456 }
1457
1458 /* Extended wait statuses aren't real SIGTRAPs. */
1459 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had stopped with extended "
1463 "status: no pending signal\n",
1464 target_pid_to_str (ptid_of (thread)));
1465 return 0;
1466 }
1467
1468 signo = gdb_signal_from_host (WSTOPSIG (status));
1469
1470 if (cs.program_signals_p && !cs.program_signals[signo])
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476 return 0;
1477 }
1478 else if (!cs.program_signals_p
1479 /* If we have no way to know which signals GDB does not
1480 want to have passed to the program, assume
1481 SIGTRAP/SIGINT, which is GDB's default. */
1482 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1483 {
1484 if (debug_threads)
1485 debug_printf ("GPS: lwp %s had signal %s, "
1486 "but we don't know if we should pass it. "
1487 "Default to not.\n",
1488 target_pid_to_str (ptid_of (thread)),
1489 gdb_signal_to_string (signo));
1490 return 0;
1491 }
1492 else
1493 {
1494 if (debug_threads)
1495 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1496 target_pid_to_str (ptid_of (thread)),
1497 gdb_signal_to_string (signo));
1498
1499 return WSTOPSIG (status);
1500 }
1501 }
1502
1503 void
1504 linux_process_target::detach_one_lwp (lwp_info *lwp)
1505 {
1506 struct thread_info *thread = get_lwp_thread (lwp);
1507 int sig;
1508 int lwpid;
1509
1510 /* If there is a pending SIGSTOP, get rid of it. */
1511 if (lwp->stop_expected)
1512 {
1513 if (debug_threads)
1514 debug_printf ("Sending SIGCONT to %s\n",
1515 target_pid_to_str (ptid_of (thread)));
1516
1517 kill_lwp (lwpid_of (thread), SIGCONT);
1518 lwp->stop_expected = 0;
1519 }
1520
1521 /* Pass on any pending signal for this thread. */
1522 sig = get_detach_signal (thread);
1523
1524 /* Preparing to resume may try to write registers, and fail if the
1525 lwp is zombie. If that happens, ignore the error. We'll handle
1526 it below, when detach fails with ESRCH. */
1527 try
1528 {
1529 /* Flush any pending changes to the process's registers. */
1530 regcache_invalidate_thread (thread);
1531
1532 /* Finally, let it resume. */
1533 if (the_low_target.prepare_to_resume != NULL)
1534 the_low_target.prepare_to_resume (lwp);
1535 }
1536 catch (const gdb_exception_error &ex)
1537 {
1538 if (!check_ptrace_stopped_lwp_gone (lwp))
1539 throw;
1540 }
1541
1542 lwpid = lwpid_of (thread);
1543 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1544 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1545 {
1546 int save_errno = errno;
1547
1548 /* We know the thread exists, so ESRCH must mean the lwp is
1549 zombie. This can happen if one of the already-detached
1550 threads exits the whole thread group. In that case we're
1551 still attached, and must reap the lwp. */
1552 if (save_errno == ESRCH)
1553 {
1554 int ret, status;
1555
1556 ret = my_waitpid (lwpid, &status, __WALL);
1557 if (ret == -1)
1558 {
1559 warning (_("Couldn't reap LWP %d while detaching: %s"),
1560 lwpid, safe_strerror (errno));
1561 }
1562 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1563 {
1564 warning (_("Reaping LWP %d while detaching "
1565 "returned unexpected status 0x%x"),
1566 lwpid, status);
1567 }
1568 }
1569 else
1570 {
1571 error (_("Can't detach %s: %s"),
1572 target_pid_to_str (ptid_of (thread)),
1573 safe_strerror (save_errno));
1574 }
1575 }
1576 else if (debug_threads)
1577 {
1578 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1579 target_pid_to_str (ptid_of (thread)),
1580 strsignal (sig));
1581 }
1582
1583 delete_lwp (lwp);
1584 }
1585
1586 int
1587 linux_process_target::detach (process_info *process)
1588 {
1589 struct lwp_info *main_lwp;
1590
1591 /* As there's a step over already in progress, let it finish first,
1592 otherwise nesting a stabilize_threads operation on top gets real
1593 messy. */
1594 complete_ongoing_step_over ();
1595
1596 /* Stop all threads before detaching. First, ptrace requires that
1597 the thread is stopped to successfully detach. Second, thread_db
1598 may need to uninstall thread event breakpoints from memory, which
1599 only works with a stopped process anyway. */
1600 stop_all_lwps (0, NULL);
1601
1602 #ifdef USE_THREAD_DB
1603 thread_db_detach (process);
1604 #endif
1605
1606 /* Stabilize threads (move out of jump pads). */
1607 target_stabilize_threads ();
1608
1609 /* Detach from the clone lwps first. If the thread group exits just
1610 while we're detaching, we must reap the clone lwps before we're
1611 able to reap the leader. */
1612 for_each_thread (process->pid, [this] (thread_info *thread)
1613 {
1614 /* We don't actually detach from the thread group leader just yet.
1615 If the thread group exits, we must reap the zombie clone lwps
1616 before we're able to reap the leader. */
1617 if (thread->id.pid () == thread->id.lwp ())
1618 return;
1619
1620 lwp_info *lwp = get_thread_lwp (thread);
1621 detach_one_lwp (lwp);
1622 });
1623
1624 main_lwp = find_lwp_pid (ptid_t (process->pid));
1625 detach_one_lwp (main_lwp);
1626
1627 mourn (process);
1628
1629 /* Since we presently can only stop all lwps of all processes, we
1630 need to unstop lwps of other processes. */
1631 unstop_all_lwps (0, NULL);
1632 return 0;
1633 }
1634
1635 /* Remove all LWPs that belong to process PROC from the lwp list. */
1636
1637 void
1638 linux_process_target::mourn (process_info *process)
1639 {
1640 struct process_info_private *priv;
1641
1642 #ifdef USE_THREAD_DB
1643 thread_db_mourn (process);
1644 #endif
1645
1646 for_each_thread (process->pid, [this] (thread_info *thread)
1647 {
1648 delete_lwp (get_thread_lwp (thread));
1649 });
1650
1651 /* Freeing all private data. */
1652 priv = process->priv;
1653 low_delete_process (priv->arch_private);
1654 free (priv);
1655 process->priv = NULL;
1656
1657 remove_process (process);
1658 }
1659
1660 void
1661 linux_process_target::join (int pid)
1662 {
1663 int status, ret;
1664
1665 do {
1666 ret = my_waitpid (pid, &status, 0);
1667 if (WIFEXITED (status) || WIFSIGNALED (status))
1668 break;
1669 } while (ret != -1 || errno != ECHILD);
1670 }
1671
1672 /* Return true if the given thread is still alive. */
1673
1674 bool
1675 linux_process_target::thread_alive (ptid_t ptid)
1676 {
1677 struct lwp_info *lwp = find_lwp_pid (ptid);
1678
1679 /* We assume we always know if a thread exits. If a whole process
1680 exited but we still haven't been able to report it to GDB, we'll
1681 hold on to the last lwp of the dead process. */
1682 if (lwp != NULL)
1683 return !lwp_is_marked_dead (lwp);
1684 else
1685 return 0;
1686 }
1687
1688 bool
1689 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1690 {
1691 struct lwp_info *lp = get_thread_lwp (thread);
1692
1693 if (!lp->status_pending_p)
1694 return 0;
1695
1696 if (thread->last_resume_kind != resume_stop
1697 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1698 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1699 {
1700 struct thread_info *saved_thread;
1701 CORE_ADDR pc;
1702 int discard = 0;
1703
1704 gdb_assert (lp->last_status != 0);
1705
1706 pc = get_pc (lp);
1707
1708 saved_thread = current_thread;
1709 current_thread = thread;
1710
1711 if (pc != lp->stop_pc)
1712 {
1713 if (debug_threads)
1714 debug_printf ("PC of %ld changed\n",
1715 lwpid_of (thread));
1716 discard = 1;
1717 }
1718
1719 #if !USE_SIGTRAP_SIGINFO
1720 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1721 && !low_breakpoint_at (pc))
1722 {
1723 if (debug_threads)
1724 debug_printf ("previous SW breakpoint of %ld gone\n",
1725 lwpid_of (thread));
1726 discard = 1;
1727 }
1728 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1729 && !hardware_breakpoint_inserted_here (pc))
1730 {
1731 if (debug_threads)
1732 debug_printf ("previous HW breakpoint of %ld gone\n",
1733 lwpid_of (thread));
1734 discard = 1;
1735 }
1736 #endif
1737
1738 current_thread = saved_thread;
1739
1740 if (discard)
1741 {
1742 if (debug_threads)
1743 debug_printf ("discarding pending breakpoint status\n");
1744 lp->status_pending_p = 0;
1745 return 0;
1746 }
1747 }
1748
1749 return 1;
1750 }
1751
1752 /* Returns true if LWP is resumed from the client's perspective. */
1753
1754 static int
1755 lwp_resumed (struct lwp_info *lwp)
1756 {
1757 struct thread_info *thread = get_lwp_thread (lwp);
1758
1759 if (thread->last_resume_kind != resume_stop)
1760 return 1;
1761
1762 /* Did gdb send us a `vCont;t', but we haven't reported the
1763 corresponding stop to gdb yet? If so, the thread is still
1764 resumed/running from gdb's perspective. */
1765 if (thread->last_resume_kind == resume_stop
1766 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1767 return 1;
1768
1769 return 0;
1770 }
1771
1772 bool
1773 linux_process_target::status_pending_p_callback (thread_info *thread,
1774 ptid_t ptid)
1775 {
1776 struct lwp_info *lp = get_thread_lwp (thread);
1777
1778 /* Check if we're only interested in events from a specific process
1779 or a specific LWP. */
1780 if (!thread->id.matches (ptid))
1781 return 0;
1782
1783 if (!lwp_resumed (lp))
1784 return 0;
1785
1786 if (lp->status_pending_p
1787 && !thread_still_has_status_pending (thread))
1788 {
1789 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1790 return 0;
1791 }
1792
1793 return lp->status_pending_p;
1794 }
1795
1796 struct lwp_info *
1797 find_lwp_pid (ptid_t ptid)
1798 {
1799 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1800 {
1801 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1802 return thr_arg->id.lwp () == lwp;
1803 });
1804
1805 if (thread == NULL)
1806 return NULL;
1807
1808 return get_thread_lwp (thread);
1809 }
1810
1811 /* Return the number of known LWPs in the tgid given by PID. */
1812
1813 static int
1814 num_lwps (int pid)
1815 {
1816 int count = 0;
1817
1818 for_each_thread (pid, [&] (thread_info *thread)
1819 {
1820 count++;
1821 });
1822
1823 return count;
1824 }
1825
1826 /* See nat/linux-nat.h. */
1827
1828 struct lwp_info *
1829 iterate_over_lwps (ptid_t filter,
1830 gdb::function_view<iterate_over_lwps_ftype> callback)
1831 {
1832 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1833 {
1834 lwp_info *lwp = get_thread_lwp (thr_arg);
1835
1836 return callback (lwp);
1837 });
1838
1839 if (thread == NULL)
1840 return NULL;
1841
1842 return get_thread_lwp (thread);
1843 }
1844
1845 void
1846 linux_process_target::check_zombie_leaders ()
1847 {
1848 for_each_process ([this] (process_info *proc) {
1849 pid_t leader_pid = pid_of (proc);
1850 struct lwp_info *leader_lp;
1851
1852 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1853
1854 if (debug_threads)
1855 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1856 "num_lwps=%d, zombie=%d\n",
1857 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1858 linux_proc_pid_is_zombie (leader_pid));
1859
1860 if (leader_lp != NULL && !leader_lp->stopped
1861 /* Check if there are other threads in the group, as we may
1862 have raced with the inferior simply exiting. */
1863 && !last_thread_of_process_p (leader_pid)
1864 && linux_proc_pid_is_zombie (leader_pid))
1865 {
1866 /* A leader zombie can mean one of two things:
1867
1868 - It exited, and there's an exit status pending
1869 available, or only the leader exited (not the whole
1870 program). In the latter case, we can't waitpid the
1871 leader's exit status until all other threads are gone.
1872
1873 - There are 3 or more threads in the group, and a thread
1874 other than the leader exec'd. On an exec, the Linux
1875 kernel destroys all other threads (except the execing
1876 one) in the thread group, and resets the execing thread's
1877 tid to the tgid. No exit notification is sent for the
1878 execing thread -- from the ptracer's perspective, it
1879 appears as though the execing thread just vanishes.
1880 Until we reap all other threads except the leader and the
1881 execing thread, the leader will be zombie, and the
1882 execing thread will be in `D (disc sleep)'. As soon as
1883 all other threads are reaped, the execing thread changes
1884 it's tid to the tgid, and the previous (zombie) leader
1885 vanishes, giving place to the "new" leader. We could try
1886 distinguishing the exit and exec cases, by waiting once
1887 more, and seeing if something comes out, but it doesn't
1888 sound useful. The previous leader _does_ go away, and
1889 we'll re-add the new one once we see the exec event
1890 (which is just the same as what would happen if the
1891 previous leader did exit voluntarily before some other
1892 thread execs). */
1893
1894 if (debug_threads)
1895 debug_printf ("CZL: Thread group leader %d zombie "
1896 "(it exited, or another thread execd).\n",
1897 leader_pid);
1898
1899 delete_lwp (leader_lp);
1900 }
1901 });
1902 }
1903
1904 /* Callback for `find_thread'. Returns the first LWP that is not
1905 stopped. */
1906
1907 static bool
1908 not_stopped_callback (thread_info *thread, ptid_t filter)
1909 {
1910 if (!thread->id.matches (filter))
1911 return false;
1912
1913 lwp_info *lwp = get_thread_lwp (thread);
1914
1915 return !lwp->stopped;
1916 }
1917
1918 /* Increment LWP's suspend count. */
1919
1920 static void
1921 lwp_suspended_inc (struct lwp_info *lwp)
1922 {
1923 lwp->suspended++;
1924
1925 if (debug_threads && lwp->suspended > 4)
1926 {
1927 struct thread_info *thread = get_lwp_thread (lwp);
1928
1929 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1930 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1931 }
1932 }
1933
1934 /* Decrement LWP's suspend count. */
1935
1936 static void
1937 lwp_suspended_decr (struct lwp_info *lwp)
1938 {
1939 lwp->suspended--;
1940
1941 if (lwp->suspended < 0)
1942 {
1943 struct thread_info *thread = get_lwp_thread (lwp);
1944
1945 internal_error (__FILE__, __LINE__,
1946 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1947 lwp->suspended);
1948 }
1949 }
1950
1951 /* This function should only be called if the LWP got a SIGTRAP.
1952
1953 Handle any tracepoint steps or hits. Return true if a tracepoint
1954 event was handled, 0 otherwise. */
1955
1956 static int
1957 handle_tracepoints (struct lwp_info *lwp)
1958 {
1959 struct thread_info *tinfo = get_lwp_thread (lwp);
1960 int tpoint_related_event = 0;
1961
1962 gdb_assert (lwp->suspended == 0);
1963
1964 /* If this tracepoint hit causes a tracing stop, we'll immediately
1965 uninsert tracepoints. To do this, we temporarily pause all
1966 threads, unpatch away, and then unpause threads. We need to make
1967 sure the unpausing doesn't resume LWP too. */
1968 lwp_suspended_inc (lwp);
1969
1970 /* And we need to be sure that any all-threads-stopping doesn't try
1971 to move threads out of the jump pads, as it could deadlock the
1972 inferior (LWP could be in the jump pad, maybe even holding the
1973 lock.) */
1974
1975 /* Do any necessary step collect actions. */
1976 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1977
1978 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1979
1980 /* See if we just hit a tracepoint and do its main collect
1981 actions. */
1982 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1983
1984 lwp_suspended_decr (lwp);
1985
1986 gdb_assert (lwp->suspended == 0);
1987 gdb_assert (!stabilizing_threads
1988 || (lwp->collecting_fast_tracepoint
1989 != fast_tpoint_collect_result::not_collecting));
1990
1991 if (tpoint_related_event)
1992 {
1993 if (debug_threads)
1994 debug_printf ("got a tracepoint event\n");
1995 return 1;
1996 }
1997
1998 return 0;
1999 }
2000
2001 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2002 collection status. */
2003
2004 static fast_tpoint_collect_result
2005 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2006 struct fast_tpoint_collect_status *status)
2007 {
2008 CORE_ADDR thread_area;
2009 struct thread_info *thread = get_lwp_thread (lwp);
2010
2011 if (the_low_target.get_thread_area == NULL)
2012 return fast_tpoint_collect_result::not_collecting;
2013
2014 /* Get the thread area address. This is used to recognize which
2015 thread is which when tracing with the in-process agent library.
2016 We don't read anything from the address, and treat it as opaque;
2017 it's the address itself that we assume is unique per-thread. */
2018 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2019 return fast_tpoint_collect_result::not_collecting;
2020
2021 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2022 }
2023
2024 bool
2025 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2026 {
2027 struct thread_info *saved_thread;
2028
2029 saved_thread = current_thread;
2030 current_thread = get_lwp_thread (lwp);
2031
2032 if ((wstat == NULL
2033 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2034 && supports_fast_tracepoints ()
2035 && agent_loaded_p ())
2036 {
2037 struct fast_tpoint_collect_status status;
2038
2039 if (debug_threads)
2040 debug_printf ("Checking whether LWP %ld needs to move out of the "
2041 "jump pad.\n",
2042 lwpid_of (current_thread));
2043
2044 fast_tpoint_collect_result r
2045 = linux_fast_tracepoint_collecting (lwp, &status);
2046
2047 if (wstat == NULL
2048 || (WSTOPSIG (*wstat) != SIGILL
2049 && WSTOPSIG (*wstat) != SIGFPE
2050 && WSTOPSIG (*wstat) != SIGSEGV
2051 && WSTOPSIG (*wstat) != SIGBUS))
2052 {
2053 lwp->collecting_fast_tracepoint = r;
2054
2055 if (r != fast_tpoint_collect_result::not_collecting)
2056 {
2057 if (r == fast_tpoint_collect_result::before_insn
2058 && lwp->exit_jump_pad_bkpt == NULL)
2059 {
2060 /* Haven't executed the original instruction yet.
2061 Set breakpoint there, and wait till it's hit,
2062 then single-step until exiting the jump pad. */
2063 lwp->exit_jump_pad_bkpt
2064 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2065 }
2066
2067 if (debug_threads)
2068 debug_printf ("Checking whether LWP %ld needs to move out of "
2069 "the jump pad...it does\n",
2070 lwpid_of (current_thread));
2071 current_thread = saved_thread;
2072
2073 return true;
2074 }
2075 }
2076 else
2077 {
2078 /* If we get a synchronous signal while collecting, *and*
2079 while executing the (relocated) original instruction,
2080 reset the PC to point at the tpoint address, before
2081 reporting to GDB. Otherwise, it's an IPA lib bug: just
2082 report the signal to GDB, and pray for the best. */
2083
2084 lwp->collecting_fast_tracepoint
2085 = fast_tpoint_collect_result::not_collecting;
2086
2087 if (r != fast_tpoint_collect_result::not_collecting
2088 && (status.adjusted_insn_addr <= lwp->stop_pc
2089 && lwp->stop_pc < status.adjusted_insn_addr_end))
2090 {
2091 siginfo_t info;
2092 struct regcache *regcache;
2093
2094 /* The si_addr on a few signals references the address
2095 of the faulting instruction. Adjust that as
2096 well. */
2097 if ((WSTOPSIG (*wstat) == SIGILL
2098 || WSTOPSIG (*wstat) == SIGFPE
2099 || WSTOPSIG (*wstat) == SIGBUS
2100 || WSTOPSIG (*wstat) == SIGSEGV)
2101 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2102 (PTRACE_TYPE_ARG3) 0, &info) == 0
2103 /* Final check just to make sure we don't clobber
2104 the siginfo of non-kernel-sent signals. */
2105 && (uintptr_t) info.si_addr == lwp->stop_pc)
2106 {
2107 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2108 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2109 (PTRACE_TYPE_ARG3) 0, &info);
2110 }
2111
2112 regcache = get_thread_regcache (current_thread, 1);
2113 low_set_pc (regcache, status.tpoint_addr);
2114 lwp->stop_pc = status.tpoint_addr;
2115
2116 /* Cancel any fast tracepoint lock this thread was
2117 holding. */
2118 force_unlock_trace_buffer ();
2119 }
2120
2121 if (lwp->exit_jump_pad_bkpt != NULL)
2122 {
2123 if (debug_threads)
2124 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2125 "stopping all threads momentarily.\n");
2126
2127 stop_all_lwps (1, lwp);
2128
2129 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2130 lwp->exit_jump_pad_bkpt = NULL;
2131
2132 unstop_all_lwps (1, lwp);
2133
2134 gdb_assert (lwp->suspended >= 0);
2135 }
2136 }
2137 }
2138
2139 if (debug_threads)
2140 debug_printf ("Checking whether LWP %ld needs to move out of the "
2141 "jump pad...no\n",
2142 lwpid_of (current_thread));
2143
2144 current_thread = saved_thread;
2145 return false;
2146 }
2147
2148 /* Enqueue one signal in the "signals to report later when out of the
2149 jump pad" list. */
2150
2151 static void
2152 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2153 {
2154 struct pending_signals *p_sig;
2155 struct thread_info *thread = get_lwp_thread (lwp);
2156
2157 if (debug_threads)
2158 debug_printf ("Deferring signal %d for LWP %ld.\n",
2159 WSTOPSIG (*wstat), lwpid_of (thread));
2160
2161 if (debug_threads)
2162 {
2163 struct pending_signals *sig;
2164
2165 for (sig = lwp->pending_signals_to_report;
2166 sig != NULL;
2167 sig = sig->prev)
2168 debug_printf (" Already queued %d\n",
2169 sig->signal);
2170
2171 debug_printf (" (no more currently queued signals)\n");
2172 }
2173
2174 /* Don't enqueue non-RT signals if they are already in the deferred
2175 queue. (SIGSTOP being the easiest signal to see ending up here
2176 twice) */
2177 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2178 {
2179 struct pending_signals *sig;
2180
2181 for (sig = lwp->pending_signals_to_report;
2182 sig != NULL;
2183 sig = sig->prev)
2184 {
2185 if (sig->signal == WSTOPSIG (*wstat))
2186 {
2187 if (debug_threads)
2188 debug_printf ("Not requeuing already queued non-RT signal %d"
2189 " for LWP %ld\n",
2190 sig->signal,
2191 lwpid_of (thread));
2192 return;
2193 }
2194 }
2195 }
2196
2197 p_sig = XCNEW (struct pending_signals);
2198 p_sig->prev = lwp->pending_signals_to_report;
2199 p_sig->signal = WSTOPSIG (*wstat);
2200
2201 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2202 &p_sig->info);
2203
2204 lwp->pending_signals_to_report = p_sig;
2205 }
2206
2207 /* Dequeue one signal from the "signals to report later when out of
2208 the jump pad" list. */
2209
2210 static int
2211 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2212 {
2213 struct thread_info *thread = get_lwp_thread (lwp);
2214
2215 if (lwp->pending_signals_to_report != NULL)
2216 {
2217 struct pending_signals **p_sig;
2218
2219 p_sig = &lwp->pending_signals_to_report;
2220 while ((*p_sig)->prev != NULL)
2221 p_sig = &(*p_sig)->prev;
2222
2223 *wstat = W_STOPCODE ((*p_sig)->signal);
2224 if ((*p_sig)->info.si_signo != 0)
2225 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2226 &(*p_sig)->info);
2227 free (*p_sig);
2228 *p_sig = NULL;
2229
2230 if (debug_threads)
2231 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2232 WSTOPSIG (*wstat), lwpid_of (thread));
2233
2234 if (debug_threads)
2235 {
2236 struct pending_signals *sig;
2237
2238 for (sig = lwp->pending_signals_to_report;
2239 sig != NULL;
2240 sig = sig->prev)
2241 debug_printf (" Still queued %d\n",
2242 sig->signal);
2243
2244 debug_printf (" (no more queued signals)\n");
2245 }
2246
2247 return 1;
2248 }
2249
2250 return 0;
2251 }
2252
2253 bool
2254 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2255 {
2256 struct thread_info *saved_thread = current_thread;
2257 current_thread = get_lwp_thread (child);
2258
2259 if (low_stopped_by_watchpoint ())
2260 {
2261 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2262 child->stopped_data_address = low_stopped_data_address ();
2263 }
2264
2265 current_thread = saved_thread;
2266
2267 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2268 }
2269
2270 bool
2271 linux_process_target::low_stopped_by_watchpoint ()
2272 {
2273 return false;
2274 }
2275
2276 CORE_ADDR
2277 linux_process_target::low_stopped_data_address ()
2278 {
2279 return 0;
2280 }
2281
2282 /* Return the ptrace options that we want to try to enable. */
2283
2284 static int
2285 linux_low_ptrace_options (int attached)
2286 {
2287 client_state &cs = get_client_state ();
2288 int options = 0;
2289
2290 if (!attached)
2291 options |= PTRACE_O_EXITKILL;
2292
2293 if (cs.report_fork_events)
2294 options |= PTRACE_O_TRACEFORK;
2295
2296 if (cs.report_vfork_events)
2297 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2298
2299 if (cs.report_exec_events)
2300 options |= PTRACE_O_TRACEEXEC;
2301
2302 options |= PTRACE_O_TRACESYSGOOD;
2303
2304 return options;
2305 }
2306
2307 lwp_info *
2308 linux_process_target::filter_event (int lwpid, int wstat)
2309 {
2310 client_state &cs = get_client_state ();
2311 struct lwp_info *child;
2312 struct thread_info *thread;
2313 int have_stop_pc = 0;
2314
2315 child = find_lwp_pid (ptid_t (lwpid));
2316
2317 /* Check for stop events reported by a process we didn't already
2318 know about - anything not already in our LWP list.
2319
2320 If we're expecting to receive stopped processes after
2321 fork, vfork, and clone events, then we'll just add the
2322 new one to our list and go back to waiting for the event
2323 to be reported - the stopped process might be returned
2324 from waitpid before or after the event is.
2325
2326 But note the case of a non-leader thread exec'ing after the
2327 leader having exited, and gone from our lists (because
2328 check_zombie_leaders deleted it). The non-leader thread
2329 changes its tid to the tgid. */
2330
2331 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2332 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2333 {
2334 ptid_t child_ptid;
2335
2336 /* A multi-thread exec after we had seen the leader exiting. */
2337 if (debug_threads)
2338 {
2339 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2340 "after exec.\n", lwpid);
2341 }
2342
2343 child_ptid = ptid_t (lwpid, lwpid, 0);
2344 child = add_lwp (child_ptid);
2345 child->stopped = 1;
2346 current_thread = child->thread;
2347 }
2348
2349 /* If we didn't find a process, one of two things presumably happened:
2350 - A process we started and then detached from has exited. Ignore it.
2351 - A process we are controlling has forked and the new child's stop
2352 was reported to us by the kernel. Save its PID. */
2353 if (child == NULL && WIFSTOPPED (wstat))
2354 {
2355 add_to_pid_list (&stopped_pids, lwpid, wstat);
2356 return NULL;
2357 }
2358 else if (child == NULL)
2359 return NULL;
2360
2361 thread = get_lwp_thread (child);
2362
2363 child->stopped = 1;
2364
2365 child->last_status = wstat;
2366
2367 /* Check if the thread has exited. */
2368 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2369 {
2370 if (debug_threads)
2371 debug_printf ("LLFE: %d exited.\n", lwpid);
2372
2373 if (finish_step_over (child))
2374 {
2375 /* Unsuspend all other LWPs, and set them back running again. */
2376 unsuspend_all_lwps (child);
2377 }
2378
2379 /* If there is at least one more LWP, then the exit signal was
2380 not the end of the debugged application and should be
2381 ignored, unless GDB wants to hear about thread exits. */
2382 if (cs.report_thread_events
2383 || last_thread_of_process_p (pid_of (thread)))
2384 {
2385 /* Since events are serialized to GDB core, and we can't
2386 report this one right now. Leave the status pending for
2387 the next time we're able to report it. */
2388 mark_lwp_dead (child, wstat);
2389 return child;
2390 }
2391 else
2392 {
2393 delete_lwp (child);
2394 return NULL;
2395 }
2396 }
2397
2398 gdb_assert (WIFSTOPPED (wstat));
2399
2400 if (WIFSTOPPED (wstat))
2401 {
2402 struct process_info *proc;
2403
2404 /* Architecture-specific setup after inferior is running. */
2405 proc = find_process_pid (pid_of (thread));
2406 if (proc->tdesc == NULL)
2407 {
2408 if (proc->attached)
2409 {
2410 /* This needs to happen after we have attached to the
2411 inferior and it is stopped for the first time, but
2412 before we access any inferior registers. */
2413 arch_setup_thread (thread);
2414 }
2415 else
2416 {
2417 /* The process is started, but GDBserver will do
2418 architecture-specific setup after the program stops at
2419 the first instruction. */
2420 child->status_pending_p = 1;
2421 child->status_pending = wstat;
2422 return child;
2423 }
2424 }
2425 }
2426
2427 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2428 {
2429 struct process_info *proc = find_process_pid (pid_of (thread));
2430 int options = linux_low_ptrace_options (proc->attached);
2431
2432 linux_enable_event_reporting (lwpid, options);
2433 child->must_set_ptrace_flags = 0;
2434 }
2435
2436 /* Always update syscall_state, even if it will be filtered later. */
2437 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2438 {
2439 child->syscall_state
2440 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2441 ? TARGET_WAITKIND_SYSCALL_RETURN
2442 : TARGET_WAITKIND_SYSCALL_ENTRY);
2443 }
2444 else
2445 {
2446 /* Almost all other ptrace-stops are known to be outside of system
2447 calls, with further exceptions in handle_extended_wait. */
2448 child->syscall_state = TARGET_WAITKIND_IGNORE;
2449 }
2450
2451 /* Be careful to not overwrite stop_pc until save_stop_reason is
2452 called. */
2453 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2454 && linux_is_extended_waitstatus (wstat))
2455 {
2456 child->stop_pc = get_pc (child);
2457 if (handle_extended_wait (&child, wstat))
2458 {
2459 /* The event has been handled, so just return without
2460 reporting it. */
2461 return NULL;
2462 }
2463 }
2464
2465 if (linux_wstatus_maybe_breakpoint (wstat))
2466 {
2467 if (save_stop_reason (child))
2468 have_stop_pc = 1;
2469 }
2470
2471 if (!have_stop_pc)
2472 child->stop_pc = get_pc (child);
2473
2474 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2475 && child->stop_expected)
2476 {
2477 if (debug_threads)
2478 debug_printf ("Expected stop.\n");
2479 child->stop_expected = 0;
2480
2481 if (thread->last_resume_kind == resume_stop)
2482 {
2483 /* We want to report the stop to the core. Treat the
2484 SIGSTOP as a normal event. */
2485 if (debug_threads)
2486 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2487 target_pid_to_str (ptid_of (thread)));
2488 }
2489 else if (stopping_threads != NOT_STOPPING_THREADS)
2490 {
2491 /* Stopping threads. We don't want this SIGSTOP to end up
2492 pending. */
2493 if (debug_threads)
2494 debug_printf ("LLW: SIGSTOP caught for %s "
2495 "while stopping threads.\n",
2496 target_pid_to_str (ptid_of (thread)));
2497 return NULL;
2498 }
2499 else
2500 {
2501 /* This is a delayed SIGSTOP. Filter out the event. */
2502 if (debug_threads)
2503 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2504 child->stepping ? "step" : "continue",
2505 target_pid_to_str (ptid_of (thread)));
2506
2507 resume_one_lwp (child, child->stepping, 0, NULL);
2508 return NULL;
2509 }
2510 }
2511
2512 child->status_pending_p = 1;
2513 child->status_pending = wstat;
2514 return child;
2515 }
2516
2517 /* Return true if THREAD is doing hardware single step. */
2518
2519 static int
2520 maybe_hw_step (struct thread_info *thread)
2521 {
2522 if (can_hardware_single_step ())
2523 return 1;
2524 else
2525 {
2526 /* GDBserver must insert single-step breakpoint for software
2527 single step. */
2528 gdb_assert (has_single_step_breakpoints (thread));
2529 return 0;
2530 }
2531 }
2532
2533 void
2534 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2535 {
2536 struct lwp_info *lp = get_thread_lwp (thread);
2537
2538 if (lp->stopped
2539 && !lp->suspended
2540 && !lp->status_pending_p
2541 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2542 {
2543 int step = 0;
2544
2545 if (thread->last_resume_kind == resume_step)
2546 step = maybe_hw_step (thread);
2547
2548 if (debug_threads)
2549 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2550 target_pid_to_str (ptid_of (thread)),
2551 paddress (lp->stop_pc),
2552 step);
2553
2554 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2555 }
2556 }
2557
2558 int
2559 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2560 ptid_t filter_ptid,
2561 int *wstatp, int options)
2562 {
2563 struct thread_info *event_thread;
2564 struct lwp_info *event_child, *requested_child;
2565 sigset_t block_mask, prev_mask;
2566
2567 retry:
2568 /* N.B. event_thread points to the thread_info struct that contains
2569 event_child. Keep them in sync. */
2570 event_thread = NULL;
2571 event_child = NULL;
2572 requested_child = NULL;
2573
2574 /* Check for a lwp with a pending status. */
2575
2576 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2577 {
2578 event_thread = find_thread_in_random ([&] (thread_info *thread)
2579 {
2580 return status_pending_p_callback (thread, filter_ptid);
2581 });
2582
2583 if (event_thread != NULL)
2584 event_child = get_thread_lwp (event_thread);
2585 if (debug_threads && event_thread)
2586 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2587 }
2588 else if (filter_ptid != null_ptid)
2589 {
2590 requested_child = find_lwp_pid (filter_ptid);
2591
2592 if (stopping_threads == NOT_STOPPING_THREADS
2593 && requested_child->status_pending_p
2594 && (requested_child->collecting_fast_tracepoint
2595 != fast_tpoint_collect_result::not_collecting))
2596 {
2597 enqueue_one_deferred_signal (requested_child,
2598 &requested_child->status_pending);
2599 requested_child->status_pending_p = 0;
2600 requested_child->status_pending = 0;
2601 resume_one_lwp (requested_child, 0, 0, NULL);
2602 }
2603
2604 if (requested_child->suspended
2605 && requested_child->status_pending_p)
2606 {
2607 internal_error (__FILE__, __LINE__,
2608 "requesting an event out of a"
2609 " suspended child?");
2610 }
2611
2612 if (requested_child->status_pending_p)
2613 {
2614 event_child = requested_child;
2615 event_thread = get_lwp_thread (event_child);
2616 }
2617 }
2618
2619 if (event_child != NULL)
2620 {
2621 if (debug_threads)
2622 debug_printf ("Got an event from pending child %ld (%04x)\n",
2623 lwpid_of (event_thread), event_child->status_pending);
2624 *wstatp = event_child->status_pending;
2625 event_child->status_pending_p = 0;
2626 event_child->status_pending = 0;
2627 current_thread = event_thread;
2628 return lwpid_of (event_thread);
2629 }
2630
2631 /* But if we don't find a pending event, we'll have to wait.
2632
2633 We only enter this loop if no process has a pending wait status.
2634 Thus any action taken in response to a wait status inside this
2635 loop is responding as soon as we detect the status, not after any
2636 pending events. */
2637
2638 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2639 all signals while here. */
2640 sigfillset (&block_mask);
2641 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2642
2643 /* Always pull all events out of the kernel. We'll randomly select
2644 an event LWP out of all that have events, to prevent
2645 starvation. */
2646 while (event_child == NULL)
2647 {
2648 pid_t ret = 0;
2649
2650 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2651 quirks:
2652
2653 - If the thread group leader exits while other threads in the
2654 thread group still exist, waitpid(TGID, ...) hangs. That
2655 waitpid won't return an exit status until the other threads
2656 in the group are reaped.
2657
2658 - When a non-leader thread execs, that thread just vanishes
2659 without reporting an exit (so we'd hang if we waited for it
2660 explicitly in that case). The exec event is reported to
2661 the TGID pid. */
2662 errno = 0;
2663 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2664
2665 if (debug_threads)
2666 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2667 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2668
2669 if (ret > 0)
2670 {
2671 if (debug_threads)
2672 {
2673 debug_printf ("LLW: waitpid %ld received %s\n",
2674 (long) ret, status_to_str (*wstatp));
2675 }
2676
2677 /* Filter all events. IOW, leave all events pending. We'll
2678 randomly select an event LWP out of all that have events
2679 below. */
2680 filter_event (ret, *wstatp);
2681 /* Retry until nothing comes out of waitpid. A single
2682 SIGCHLD can indicate more than one child stopped. */
2683 continue;
2684 }
2685
2686 /* Now that we've pulled all events out of the kernel, resume
2687 LWPs that don't have an interesting event to report. */
2688 if (stopping_threads == NOT_STOPPING_THREADS)
2689 for_each_thread ([this] (thread_info *thread)
2690 {
2691 resume_stopped_resumed_lwps (thread);
2692 });
2693
2694 /* ... and find an LWP with a status to report to the core, if
2695 any. */
2696 event_thread = find_thread_in_random ([&] (thread_info *thread)
2697 {
2698 return status_pending_p_callback (thread, filter_ptid);
2699 });
2700
2701 if (event_thread != NULL)
2702 {
2703 event_child = get_thread_lwp (event_thread);
2704 *wstatp = event_child->status_pending;
2705 event_child->status_pending_p = 0;
2706 event_child->status_pending = 0;
2707 break;
2708 }
2709
2710 /* Check for zombie thread group leaders. Those can't be reaped
2711 until all other threads in the thread group are. */
2712 check_zombie_leaders ();
2713
2714 auto not_stopped = [&] (thread_info *thread)
2715 {
2716 return not_stopped_callback (thread, wait_ptid);
2717 };
2718
2719 /* If there are no resumed children left in the set of LWPs we
2720 want to wait for, bail. We can't just block in
2721 waitpid/sigsuspend, because lwps might have been left stopped
2722 in trace-stop state, and we'd be stuck forever waiting for
2723 their status to change (which would only happen if we resumed
2724 them). Even if WNOHANG is set, this return code is preferred
2725 over 0 (below), as it is more detailed. */
2726 if (find_thread (not_stopped) == NULL)
2727 {
2728 if (debug_threads)
2729 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2730 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2731 return -1;
2732 }
2733
2734 /* No interesting event to report to the caller. */
2735 if ((options & WNOHANG))
2736 {
2737 if (debug_threads)
2738 debug_printf ("WNOHANG set, no event found\n");
2739
2740 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2741 return 0;
2742 }
2743
2744 /* Block until we get an event reported with SIGCHLD. */
2745 if (debug_threads)
2746 debug_printf ("sigsuspend'ing\n");
2747
2748 sigsuspend (&prev_mask);
2749 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2750 goto retry;
2751 }
2752
2753 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2754
2755 current_thread = event_thread;
2756
2757 return lwpid_of (event_thread);
2758 }
2759
2760 int
2761 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2762 {
2763 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2764 }
2765
2766 /* Select one LWP out of those that have events pending. */
2767
2768 static void
2769 select_event_lwp (struct lwp_info **orig_lp)
2770 {
2771 struct thread_info *event_thread = NULL;
2772
2773 /* In all-stop, give preference to the LWP that is being
2774 single-stepped. There will be at most one, and it's the LWP that
2775 the core is most interested in. If we didn't do this, then we'd
2776 have to handle pending step SIGTRAPs somehow in case the core
2777 later continues the previously-stepped thread, otherwise we'd
2778 report the pending SIGTRAP, and the core, not having stepped the
2779 thread, wouldn't understand what the trap was for, and therefore
2780 would report it to the user as a random signal. */
2781 if (!non_stop)
2782 {
2783 event_thread = find_thread ([] (thread_info *thread)
2784 {
2785 lwp_info *lp = get_thread_lwp (thread);
2786
2787 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2788 && thread->last_resume_kind == resume_step
2789 && lp->status_pending_p);
2790 });
2791
2792 if (event_thread != NULL)
2793 {
2794 if (debug_threads)
2795 debug_printf ("SEL: Select single-step %s\n",
2796 target_pid_to_str (ptid_of (event_thread)));
2797 }
2798 }
2799 if (event_thread == NULL)
2800 {
2801 /* No single-stepping LWP. Select one at random, out of those
2802 which have had events. */
2803
2804 event_thread = find_thread_in_random ([&] (thread_info *thread)
2805 {
2806 lwp_info *lp = get_thread_lwp (thread);
2807
2808 /* Only resumed LWPs that have an event pending. */
2809 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2810 && lp->status_pending_p);
2811 });
2812 }
2813
2814 if (event_thread != NULL)
2815 {
2816 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2817
2818 /* Switch the event LWP. */
2819 *orig_lp = event_lp;
2820 }
2821 }
2822
2823 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2824 NULL. */
2825
2826 static void
2827 unsuspend_all_lwps (struct lwp_info *except)
2828 {
2829 for_each_thread ([&] (thread_info *thread)
2830 {
2831 lwp_info *lwp = get_thread_lwp (thread);
2832
2833 if (lwp != except)
2834 lwp_suspended_decr (lwp);
2835 });
2836 }
2837
2838 static bool stuck_in_jump_pad_callback (thread_info *thread);
2839 static bool lwp_running (thread_info *thread);
2840
2841 /* Stabilize threads (move out of jump pads).
2842
2843 If a thread is midway collecting a fast tracepoint, we need to
2844 finish the collection and move it out of the jump pad before
2845 reporting the signal.
2846
2847 This avoids recursion while collecting (when a signal arrives
2848 midway, and the signal handler itself collects), which would trash
2849 the trace buffer. In case the user set a breakpoint in a signal
2850 handler, this avoids the backtrace showing the jump pad, etc..
2851 Most importantly, there are certain things we can't do safely if
2852 threads are stopped in a jump pad (or in its callee's). For
2853 example:
2854
2855 - starting a new trace run. A thread still collecting the
2856 previous run, could trash the trace buffer when resumed. The trace
2857 buffer control structures would have been reset but the thread had
2858 no way to tell. The thread could even midway memcpy'ing to the
2859 buffer, which would mean that when resumed, it would clobber the
2860 trace buffer that had been set for a new run.
2861
2862 - we can't rewrite/reuse the jump pads for new tracepoints
2863 safely. Say you do tstart while a thread is stopped midway while
2864 collecting. When the thread is later resumed, it finishes the
2865 collection, and returns to the jump pad, to execute the original
2866 instruction that was under the tracepoint jump at the time the
2867 older run had been started. If the jump pad had been rewritten
2868 since for something else in the new run, the thread would now
2869 execute the wrong / random instructions. */
2870
2871 void
2872 linux_process_target::stabilize_threads ()
2873 {
2874 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2875
2876 if (thread_stuck != NULL)
2877 {
2878 if (debug_threads)
2879 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2880 lwpid_of (thread_stuck));
2881 return;
2882 }
2883
2884 thread_info *saved_thread = current_thread;
2885
2886 stabilizing_threads = 1;
2887
2888 /* Kick 'em all. */
2889 for_each_thread ([this] (thread_info *thread)
2890 {
2891 move_out_of_jump_pad (thread);
2892 });
2893
2894 /* Loop until all are stopped out of the jump pads. */
2895 while (find_thread (lwp_running) != NULL)
2896 {
2897 struct target_waitstatus ourstatus;
2898 struct lwp_info *lwp;
2899 int wstat;
2900
2901 /* Note that we go through the full wait even loop. While
2902 moving threads out of jump pad, we need to be able to step
2903 over internal breakpoints and such. */
2904 wait_1 (minus_one_ptid, &ourstatus, 0);
2905
2906 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2907 {
2908 lwp = get_thread_lwp (current_thread);
2909
2910 /* Lock it. */
2911 lwp_suspended_inc (lwp);
2912
2913 if (ourstatus.value.sig != GDB_SIGNAL_0
2914 || current_thread->last_resume_kind == resume_stop)
2915 {
2916 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2917 enqueue_one_deferred_signal (lwp, &wstat);
2918 }
2919 }
2920 }
2921
2922 unsuspend_all_lwps (NULL);
2923
2924 stabilizing_threads = 0;
2925
2926 current_thread = saved_thread;
2927
2928 if (debug_threads)
2929 {
2930 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2931
2932 if (thread_stuck != NULL)
2933 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2934 lwpid_of (thread_stuck));
2935 }
2936 }
2937
2938 /* Convenience function that is called when the kernel reports an
2939 event that is not passed out to GDB. */
2940
2941 static ptid_t
2942 ignore_event (struct target_waitstatus *ourstatus)
2943 {
2944 /* If we got an event, there may still be others, as a single
2945 SIGCHLD can indicate more than one child stopped. This forces
2946 another target_wait call. */
2947 async_file_mark ();
2948
2949 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2950 return null_ptid;
2951 }
2952
2953 ptid_t
2954 linux_process_target::filter_exit_event (lwp_info *event_child,
2955 target_waitstatus *ourstatus)
2956 {
2957 client_state &cs = get_client_state ();
2958 struct thread_info *thread = get_lwp_thread (event_child);
2959 ptid_t ptid = ptid_of (thread);
2960
2961 if (!last_thread_of_process_p (pid_of (thread)))
2962 {
2963 if (cs.report_thread_events)
2964 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2965 else
2966 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2967
2968 delete_lwp (event_child);
2969 }
2970 return ptid;
2971 }
2972
2973 /* Returns 1 if GDB is interested in any event_child syscalls. */
2974
2975 static int
2976 gdb_catching_syscalls_p (struct lwp_info *event_child)
2977 {
2978 struct thread_info *thread = get_lwp_thread (event_child);
2979 struct process_info *proc = get_thread_process (thread);
2980
2981 return !proc->syscalls_to_catch.empty ();
2982 }
2983
2984 /* Returns 1 if GDB is interested in the event_child syscall.
2985 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2986
2987 static int
2988 gdb_catch_this_syscall_p (struct lwp_info *event_child)
2989 {
2990 int sysno;
2991 struct thread_info *thread = get_lwp_thread (event_child);
2992 struct process_info *proc = get_thread_process (thread);
2993
2994 if (proc->syscalls_to_catch.empty ())
2995 return 0;
2996
2997 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2998 return 1;
2999
3000 get_syscall_trapinfo (event_child, &sysno);
3001
3002 for (int iter : proc->syscalls_to_catch)
3003 if (iter == sysno)
3004 return 1;
3005
3006 return 0;
3007 }
3008
3009 ptid_t
3010 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3011 int target_options)
3012 {
3013 client_state &cs = get_client_state ();
3014 int w;
3015 struct lwp_info *event_child;
3016 int options;
3017 int pid;
3018 int step_over_finished;
3019 int bp_explains_trap;
3020 int maybe_internal_trap;
3021 int report_to_gdb;
3022 int trace_event;
3023 int in_step_range;
3024 int any_resumed;
3025
3026 if (debug_threads)
3027 {
3028 debug_enter ();
3029 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3030 }
3031
3032 /* Translate generic target options into linux options. */
3033 options = __WALL;
3034 if (target_options & TARGET_WNOHANG)
3035 options |= WNOHANG;
3036
3037 bp_explains_trap = 0;
3038 trace_event = 0;
3039 in_step_range = 0;
3040 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3041
3042 auto status_pending_p_any = [&] (thread_info *thread)
3043 {
3044 return status_pending_p_callback (thread, minus_one_ptid);
3045 };
3046
3047 auto not_stopped = [&] (thread_info *thread)
3048 {
3049 return not_stopped_callback (thread, minus_one_ptid);
3050 };
3051
3052 /* Find a resumed LWP, if any. */
3053 if (find_thread (status_pending_p_any) != NULL)
3054 any_resumed = 1;
3055 else if (find_thread (not_stopped) != NULL)
3056 any_resumed = 1;
3057 else
3058 any_resumed = 0;
3059
3060 if (step_over_bkpt == null_ptid)
3061 pid = wait_for_event (ptid, &w, options);
3062 else
3063 {
3064 if (debug_threads)
3065 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3066 target_pid_to_str (step_over_bkpt));
3067 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3068 }
3069
3070 if (pid == 0 || (pid == -1 && !any_resumed))
3071 {
3072 gdb_assert (target_options & TARGET_WNOHANG);
3073
3074 if (debug_threads)
3075 {
3076 debug_printf ("wait_1 ret = null_ptid, "
3077 "TARGET_WAITKIND_IGNORE\n");
3078 debug_exit ();
3079 }
3080
3081 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3082 return null_ptid;
3083 }
3084 else if (pid == -1)
3085 {
3086 if (debug_threads)
3087 {
3088 debug_printf ("wait_1 ret = null_ptid, "
3089 "TARGET_WAITKIND_NO_RESUMED\n");
3090 debug_exit ();
3091 }
3092
3093 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3094 return null_ptid;
3095 }
3096
3097 event_child = get_thread_lwp (current_thread);
3098
3099 /* wait_for_event only returns an exit status for the last
3100 child of a process. Report it. */
3101 if (WIFEXITED (w) || WIFSIGNALED (w))
3102 {
3103 if (WIFEXITED (w))
3104 {
3105 ourstatus->kind = TARGET_WAITKIND_EXITED;
3106 ourstatus->value.integer = WEXITSTATUS (w);
3107
3108 if (debug_threads)
3109 {
3110 debug_printf ("wait_1 ret = %s, exited with "
3111 "retcode %d\n",
3112 target_pid_to_str (ptid_of (current_thread)),
3113 WEXITSTATUS (w));
3114 debug_exit ();
3115 }
3116 }
3117 else
3118 {
3119 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3120 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3121
3122 if (debug_threads)
3123 {
3124 debug_printf ("wait_1 ret = %s, terminated with "
3125 "signal %d\n",
3126 target_pid_to_str (ptid_of (current_thread)),
3127 WTERMSIG (w));
3128 debug_exit ();
3129 }
3130 }
3131
3132 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3133 return filter_exit_event (event_child, ourstatus);
3134
3135 return ptid_of (current_thread);
3136 }
3137
3138 /* If step-over executes a breakpoint instruction, in the case of a
3139 hardware single step it means a gdb/gdbserver breakpoint had been
3140 planted on top of a permanent breakpoint, in the case of a software
3141 single step it may just mean that gdbserver hit the reinsert breakpoint.
3142 The PC has been adjusted by save_stop_reason to point at
3143 the breakpoint address.
3144 So in the case of the hardware single step advance the PC manually
3145 past the breakpoint and in the case of software single step advance only
3146 if it's not the single_step_breakpoint we are hitting.
3147 This avoids that a program would keep trapping a permanent breakpoint
3148 forever. */
3149 if (step_over_bkpt != null_ptid
3150 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3151 && (event_child->stepping
3152 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3153 {
3154 int increment_pc = 0;
3155 int breakpoint_kind = 0;
3156 CORE_ADDR stop_pc = event_child->stop_pc;
3157
3158 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3159 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3160
3161 if (debug_threads)
3162 {
3163 debug_printf ("step-over for %s executed software breakpoint\n",
3164 target_pid_to_str (ptid_of (current_thread)));
3165 }
3166
3167 if (increment_pc != 0)
3168 {
3169 struct regcache *regcache
3170 = get_thread_regcache (current_thread, 1);
3171
3172 event_child->stop_pc += increment_pc;
3173 low_set_pc (regcache, event_child->stop_pc);
3174
3175 if (!low_breakpoint_at (event_child->stop_pc))
3176 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3177 }
3178 }
3179
3180 /* If this event was not handled before, and is not a SIGTRAP, we
3181 report it. SIGILL and SIGSEGV are also treated as traps in case
3182 a breakpoint is inserted at the current PC. If this target does
3183 not support internal breakpoints at all, we also report the
3184 SIGTRAP without further processing; it's of no concern to us. */
3185 maybe_internal_trap
3186 = (low_supports_breakpoints ()
3187 && (WSTOPSIG (w) == SIGTRAP
3188 || ((WSTOPSIG (w) == SIGILL
3189 || WSTOPSIG (w) == SIGSEGV)
3190 && low_breakpoint_at (event_child->stop_pc))));
3191
3192 if (maybe_internal_trap)
3193 {
3194 /* Handle anything that requires bookkeeping before deciding to
3195 report the event or continue waiting. */
3196
3197 /* First check if we can explain the SIGTRAP with an internal
3198 breakpoint, or if we should possibly report the event to GDB.
3199 Do this before anything that may remove or insert a
3200 breakpoint. */
3201 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3202
3203 /* We have a SIGTRAP, possibly a step-over dance has just
3204 finished. If so, tweak the state machine accordingly,
3205 reinsert breakpoints and delete any single-step
3206 breakpoints. */
3207 step_over_finished = finish_step_over (event_child);
3208
3209 /* Now invoke the callbacks of any internal breakpoints there. */
3210 check_breakpoints (event_child->stop_pc);
3211
3212 /* Handle tracepoint data collecting. This may overflow the
3213 trace buffer, and cause a tracing stop, removing
3214 breakpoints. */
3215 trace_event = handle_tracepoints (event_child);
3216
3217 if (bp_explains_trap)
3218 {
3219 if (debug_threads)
3220 debug_printf ("Hit a gdbserver breakpoint.\n");
3221 }
3222 }
3223 else
3224 {
3225 /* We have some other signal, possibly a step-over dance was in
3226 progress, and it should be cancelled too. */
3227 step_over_finished = finish_step_over (event_child);
3228 }
3229
3230 /* We have all the data we need. Either report the event to GDB, or
3231 resume threads and keep waiting for more. */
3232
3233 /* If we're collecting a fast tracepoint, finish the collection and
3234 move out of the jump pad before delivering a signal. See
3235 linux_stabilize_threads. */
3236
3237 if (WIFSTOPPED (w)
3238 && WSTOPSIG (w) != SIGTRAP
3239 && supports_fast_tracepoints ()
3240 && agent_loaded_p ())
3241 {
3242 if (debug_threads)
3243 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3244 "to defer or adjust it.\n",
3245 WSTOPSIG (w), lwpid_of (current_thread));
3246
3247 /* Allow debugging the jump pad itself. */
3248 if (current_thread->last_resume_kind != resume_step
3249 && maybe_move_out_of_jump_pad (event_child, &w))
3250 {
3251 enqueue_one_deferred_signal (event_child, &w);
3252
3253 if (debug_threads)
3254 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3255 WSTOPSIG (w), lwpid_of (current_thread));
3256
3257 resume_one_lwp (event_child, 0, 0, NULL);
3258
3259 if (debug_threads)
3260 debug_exit ();
3261 return ignore_event (ourstatus);
3262 }
3263 }
3264
3265 if (event_child->collecting_fast_tracepoint
3266 != fast_tpoint_collect_result::not_collecting)
3267 {
3268 if (debug_threads)
3269 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3270 "Check if we're already there.\n",
3271 lwpid_of (current_thread),
3272 (int) event_child->collecting_fast_tracepoint);
3273
3274 trace_event = 1;
3275
3276 event_child->collecting_fast_tracepoint
3277 = linux_fast_tracepoint_collecting (event_child, NULL);
3278
3279 if (event_child->collecting_fast_tracepoint
3280 != fast_tpoint_collect_result::before_insn)
3281 {
3282 /* No longer need this breakpoint. */
3283 if (event_child->exit_jump_pad_bkpt != NULL)
3284 {
3285 if (debug_threads)
3286 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3287 "stopping all threads momentarily.\n");
3288
3289 /* Other running threads could hit this breakpoint.
3290 We don't handle moribund locations like GDB does,
3291 instead we always pause all threads when removing
3292 breakpoints, so that any step-over or
3293 decr_pc_after_break adjustment is always taken
3294 care of while the breakpoint is still
3295 inserted. */
3296 stop_all_lwps (1, event_child);
3297
3298 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3299 event_child->exit_jump_pad_bkpt = NULL;
3300
3301 unstop_all_lwps (1, event_child);
3302
3303 gdb_assert (event_child->suspended >= 0);
3304 }
3305 }
3306
3307 if (event_child->collecting_fast_tracepoint
3308 == fast_tpoint_collect_result::not_collecting)
3309 {
3310 if (debug_threads)
3311 debug_printf ("fast tracepoint finished "
3312 "collecting successfully.\n");
3313
3314 /* We may have a deferred signal to report. */
3315 if (dequeue_one_deferred_signal (event_child, &w))
3316 {
3317 if (debug_threads)
3318 debug_printf ("dequeued one signal.\n");
3319 }
3320 else
3321 {
3322 if (debug_threads)
3323 debug_printf ("no deferred signals.\n");
3324
3325 if (stabilizing_threads)
3326 {
3327 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3328 ourstatus->value.sig = GDB_SIGNAL_0;
3329
3330 if (debug_threads)
3331 {
3332 debug_printf ("wait_1 ret = %s, stopped "
3333 "while stabilizing threads\n",
3334 target_pid_to_str (ptid_of (current_thread)));
3335 debug_exit ();
3336 }
3337
3338 return ptid_of (current_thread);
3339 }
3340 }
3341 }
3342 }
3343
3344 /* Check whether GDB would be interested in this event. */
3345
3346 /* Check if GDB is interested in this syscall. */
3347 if (WIFSTOPPED (w)
3348 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3349 && !gdb_catch_this_syscall_p (event_child))
3350 {
3351 if (debug_threads)
3352 {
3353 debug_printf ("Ignored syscall for LWP %ld.\n",
3354 lwpid_of (current_thread));
3355 }
3356
3357 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3358
3359 if (debug_threads)
3360 debug_exit ();
3361 return ignore_event (ourstatus);
3362 }
3363
3364 /* If GDB is not interested in this signal, don't stop other
3365 threads, and don't report it to GDB. Just resume the inferior
3366 right away. We do this for threading-related signals as well as
3367 any that GDB specifically requested we ignore. But never ignore
3368 SIGSTOP if we sent it ourselves, and do not ignore signals when
3369 stepping - they may require special handling to skip the signal
3370 handler. Also never ignore signals that could be caused by a
3371 breakpoint. */
3372 if (WIFSTOPPED (w)
3373 && current_thread->last_resume_kind != resume_step
3374 && (
3375 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3376 (current_process ()->priv->thread_db != NULL
3377 && (WSTOPSIG (w) == __SIGRTMIN
3378 || WSTOPSIG (w) == __SIGRTMIN + 1))
3379 ||
3380 #endif
3381 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3382 && !(WSTOPSIG (w) == SIGSTOP
3383 && current_thread->last_resume_kind == resume_stop)
3384 && !linux_wstatus_maybe_breakpoint (w))))
3385 {
3386 siginfo_t info, *info_p;
3387
3388 if (debug_threads)
3389 debug_printf ("Ignored signal %d for LWP %ld.\n",
3390 WSTOPSIG (w), lwpid_of (current_thread));
3391
3392 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3393 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3394 info_p = &info;
3395 else
3396 info_p = NULL;
3397
3398 if (step_over_finished)
3399 {
3400 /* We cancelled this thread's step-over above. We still
3401 need to unsuspend all other LWPs, and set them back
3402 running again while the signal handler runs. */
3403 unsuspend_all_lwps (event_child);
3404
3405 /* Enqueue the pending signal info so that proceed_all_lwps
3406 doesn't lose it. */
3407 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3408
3409 proceed_all_lwps ();
3410 }
3411 else
3412 {
3413 resume_one_lwp (event_child, event_child->stepping,
3414 WSTOPSIG (w), info_p);
3415 }
3416
3417 if (debug_threads)
3418 debug_exit ();
3419
3420 return ignore_event (ourstatus);
3421 }
3422
3423 /* Note that all addresses are always "out of the step range" when
3424 there's no range to begin with. */
3425 in_step_range = lwp_in_step_range (event_child);
3426
3427 /* If GDB wanted this thread to single step, and the thread is out
3428 of the step range, we always want to report the SIGTRAP, and let
3429 GDB handle it. Watchpoints should always be reported. So should
3430 signals we can't explain. A SIGTRAP we can't explain could be a
3431 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3432 do, we're be able to handle GDB breakpoints on top of internal
3433 breakpoints, by handling the internal breakpoint and still
3434 reporting the event to GDB. If we don't, we're out of luck, GDB
3435 won't see the breakpoint hit. If we see a single-step event but
3436 the thread should be continuing, don't pass the trap to gdb.
3437 That indicates that we had previously finished a single-step but
3438 left the single-step pending -- see
3439 complete_ongoing_step_over. */
3440 report_to_gdb = (!maybe_internal_trap
3441 || (current_thread->last_resume_kind == resume_step
3442 && !in_step_range)
3443 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3444 || (!in_step_range
3445 && !bp_explains_trap
3446 && !trace_event
3447 && !step_over_finished
3448 && !(current_thread->last_resume_kind == resume_continue
3449 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3450 || (gdb_breakpoint_here (event_child->stop_pc)
3451 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3452 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3453 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3454
3455 run_breakpoint_commands (event_child->stop_pc);
3456
3457 /* We found no reason GDB would want us to stop. We either hit one
3458 of our own breakpoints, or finished an internal step GDB
3459 shouldn't know about. */
3460 if (!report_to_gdb)
3461 {
3462 if (debug_threads)
3463 {
3464 if (bp_explains_trap)
3465 debug_printf ("Hit a gdbserver breakpoint.\n");
3466 if (step_over_finished)
3467 debug_printf ("Step-over finished.\n");
3468 if (trace_event)
3469 debug_printf ("Tracepoint event.\n");
3470 if (lwp_in_step_range (event_child))
3471 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3472 paddress (event_child->stop_pc),
3473 paddress (event_child->step_range_start),
3474 paddress (event_child->step_range_end));
3475 }
3476
3477 /* We're not reporting this breakpoint to GDB, so apply the
3478 decr_pc_after_break adjustment to the inferior's regcache
3479 ourselves. */
3480
3481 if (low_supports_breakpoints ())
3482 {
3483 struct regcache *regcache
3484 = get_thread_regcache (current_thread, 1);
3485 low_set_pc (regcache, event_child->stop_pc);
3486 }
3487
3488 if (step_over_finished)
3489 {
3490 /* If we have finished stepping over a breakpoint, we've
3491 stopped and suspended all LWPs momentarily except the
3492 stepping one. This is where we resume them all again.
3493 We're going to keep waiting, so use proceed, which
3494 handles stepping over the next breakpoint. */
3495 unsuspend_all_lwps (event_child);
3496 }
3497 else
3498 {
3499 /* Remove the single-step breakpoints if any. Note that
3500 there isn't single-step breakpoint if we finished stepping
3501 over. */
3502 if (supports_software_single_step ()
3503 && has_single_step_breakpoints (current_thread))
3504 {
3505 stop_all_lwps (0, event_child);
3506 delete_single_step_breakpoints (current_thread);
3507 unstop_all_lwps (0, event_child);
3508 }
3509 }
3510
3511 if (debug_threads)
3512 debug_printf ("proceeding all threads.\n");
3513 proceed_all_lwps ();
3514
3515 if (debug_threads)
3516 debug_exit ();
3517
3518 return ignore_event (ourstatus);
3519 }
3520
3521 if (debug_threads)
3522 {
3523 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3524 {
3525 std::string str
3526 = target_waitstatus_to_string (&event_child->waitstatus);
3527
3528 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3529 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3530 }
3531 if (current_thread->last_resume_kind == resume_step)
3532 {
3533 if (event_child->step_range_start == event_child->step_range_end)
3534 debug_printf ("GDB wanted to single-step, reporting event.\n");
3535 else if (!lwp_in_step_range (event_child))
3536 debug_printf ("Out of step range, reporting event.\n");
3537 }
3538 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3539 debug_printf ("Stopped by watchpoint.\n");
3540 else if (gdb_breakpoint_here (event_child->stop_pc))
3541 debug_printf ("Stopped by GDB breakpoint.\n");
3542 if (debug_threads)
3543 debug_printf ("Hit a non-gdbserver trap event.\n");
3544 }
3545
3546 /* Alright, we're going to report a stop. */
3547
3548 /* Remove single-step breakpoints. */
3549 if (supports_software_single_step ())
3550 {
3551 /* Remove single-step breakpoints or not. It it is true, stop all
3552 lwps, so that other threads won't hit the breakpoint in the
3553 staled memory. */
3554 int remove_single_step_breakpoints_p = 0;
3555
3556 if (non_stop)
3557 {
3558 remove_single_step_breakpoints_p
3559 = has_single_step_breakpoints (current_thread);
3560 }
3561 else
3562 {
3563 /* In all-stop, a stop reply cancels all previous resume
3564 requests. Delete all single-step breakpoints. */
3565
3566 find_thread ([&] (thread_info *thread) {
3567 if (has_single_step_breakpoints (thread))
3568 {
3569 remove_single_step_breakpoints_p = 1;
3570 return true;
3571 }
3572
3573 return false;
3574 });
3575 }
3576
3577 if (remove_single_step_breakpoints_p)
3578 {
3579 /* If we remove single-step breakpoints from memory, stop all lwps,
3580 so that other threads won't hit the breakpoint in the staled
3581 memory. */
3582 stop_all_lwps (0, event_child);
3583
3584 if (non_stop)
3585 {
3586 gdb_assert (has_single_step_breakpoints (current_thread));
3587 delete_single_step_breakpoints (current_thread);
3588 }
3589 else
3590 {
3591 for_each_thread ([] (thread_info *thread){
3592 if (has_single_step_breakpoints (thread))
3593 delete_single_step_breakpoints (thread);
3594 });
3595 }
3596
3597 unstop_all_lwps (0, event_child);
3598 }
3599 }
3600
3601 if (!stabilizing_threads)
3602 {
3603 /* In all-stop, stop all threads. */
3604 if (!non_stop)
3605 stop_all_lwps (0, NULL);
3606
3607 if (step_over_finished)
3608 {
3609 if (!non_stop)
3610 {
3611 /* If we were doing a step-over, all other threads but
3612 the stepping one had been paused in start_step_over,
3613 with their suspend counts incremented. We don't want
3614 to do a full unstop/unpause, because we're in
3615 all-stop mode (so we want threads stopped), but we
3616 still need to unsuspend the other threads, to
3617 decrement their `suspended' count back. */
3618 unsuspend_all_lwps (event_child);
3619 }
3620 else
3621 {
3622 /* If we just finished a step-over, then all threads had
3623 been momentarily paused. In all-stop, that's fine,
3624 we want threads stopped by now anyway. In non-stop,
3625 we need to re-resume threads that GDB wanted to be
3626 running. */
3627 unstop_all_lwps (1, event_child);
3628 }
3629 }
3630
3631 /* If we're not waiting for a specific LWP, choose an event LWP
3632 from among those that have had events. Giving equal priority
3633 to all LWPs that have had events helps prevent
3634 starvation. */
3635 if (ptid == minus_one_ptid)
3636 {
3637 event_child->status_pending_p = 1;
3638 event_child->status_pending = w;
3639
3640 select_event_lwp (&event_child);
3641
3642 /* current_thread and event_child must stay in sync. */
3643 current_thread = get_lwp_thread (event_child);
3644
3645 event_child->status_pending_p = 0;
3646 w = event_child->status_pending;
3647 }
3648
3649
3650 /* Stabilize threads (move out of jump pads). */
3651 if (!non_stop)
3652 target_stabilize_threads ();
3653 }
3654 else
3655 {
3656 /* If we just finished a step-over, then all threads had been
3657 momentarily paused. In all-stop, that's fine, we want
3658 threads stopped by now anyway. In non-stop, we need to
3659 re-resume threads that GDB wanted to be running. */
3660 if (step_over_finished)
3661 unstop_all_lwps (1, event_child);
3662 }
3663
3664 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3665 {
3666 /* If the reported event is an exit, fork, vfork or exec, let
3667 GDB know. */
3668
3669 /* Break the unreported fork relationship chain. */
3670 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3671 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3672 {
3673 event_child->fork_relative->fork_relative = NULL;
3674 event_child->fork_relative = NULL;
3675 }
3676
3677 *ourstatus = event_child->waitstatus;
3678 /* Clear the event lwp's waitstatus since we handled it already. */
3679 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3680 }
3681 else
3682 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3683
3684 /* Now that we've selected our final event LWP, un-adjust its PC if
3685 it was a software breakpoint, and the client doesn't know we can
3686 adjust the breakpoint ourselves. */
3687 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3688 && !cs.swbreak_feature)
3689 {
3690 int decr_pc = low_decr_pc_after_break ();
3691
3692 if (decr_pc != 0)
3693 {
3694 struct regcache *regcache
3695 = get_thread_regcache (current_thread, 1);
3696 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3697 }
3698 }
3699
3700 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3701 {
3702 get_syscall_trapinfo (event_child,
3703 &ourstatus->value.syscall_number);
3704 ourstatus->kind = event_child->syscall_state;
3705 }
3706 else if (current_thread->last_resume_kind == resume_stop
3707 && WSTOPSIG (w) == SIGSTOP)
3708 {
3709 /* A thread that has been requested to stop by GDB with vCont;t,
3710 and it stopped cleanly, so report as SIG0. The use of
3711 SIGSTOP is an implementation detail. */
3712 ourstatus->value.sig = GDB_SIGNAL_0;
3713 }
3714 else if (current_thread->last_resume_kind == resume_stop
3715 && WSTOPSIG (w) != SIGSTOP)
3716 {
3717 /* A thread that has been requested to stop by GDB with vCont;t,
3718 but, it stopped for other reasons. */
3719 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3720 }
3721 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3722 {
3723 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3724 }
3725
3726 gdb_assert (step_over_bkpt == null_ptid);
3727
3728 if (debug_threads)
3729 {
3730 debug_printf ("wait_1 ret = %s, %d, %d\n",
3731 target_pid_to_str (ptid_of (current_thread)),
3732 ourstatus->kind, ourstatus->value.sig);
3733 debug_exit ();
3734 }
3735
3736 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3737 return filter_exit_event (event_child, ourstatus);
3738
3739 return ptid_of (current_thread);
3740 }
3741
3742 /* Get rid of any pending event in the pipe. */
3743 static void
3744 async_file_flush (void)
3745 {
3746 int ret;
3747 char buf;
3748
3749 do
3750 ret = read (linux_event_pipe[0], &buf, 1);
3751 while (ret >= 0 || (ret == -1 && errno == EINTR));
3752 }
3753
3754 /* Put something in the pipe, so the event loop wakes up. */
3755 static void
3756 async_file_mark (void)
3757 {
3758 int ret;
3759
3760 async_file_flush ();
3761
3762 do
3763 ret = write (linux_event_pipe[1], "+", 1);
3764 while (ret == 0 || (ret == -1 && errno == EINTR));
3765
3766 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3767 be awakened anyway. */
3768 }
3769
3770 ptid_t
3771 linux_process_target::wait (ptid_t ptid,
3772 target_waitstatus *ourstatus,
3773 int target_options)
3774 {
3775 ptid_t event_ptid;
3776
3777 /* Flush the async file first. */
3778 if (target_is_async_p ())
3779 async_file_flush ();
3780
3781 do
3782 {
3783 event_ptid = wait_1 (ptid, ourstatus, target_options);
3784 }
3785 while ((target_options & TARGET_WNOHANG) == 0
3786 && event_ptid == null_ptid
3787 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3788
3789 /* If at least one stop was reported, there may be more. A single
3790 SIGCHLD can signal more than one child stop. */
3791 if (target_is_async_p ()
3792 && (target_options & TARGET_WNOHANG) != 0
3793 && event_ptid != null_ptid)
3794 async_file_mark ();
3795
3796 return event_ptid;
3797 }
3798
3799 /* Send a signal to an LWP. */
3800
3801 static int
3802 kill_lwp (unsigned long lwpid, int signo)
3803 {
3804 int ret;
3805
3806 errno = 0;
3807 ret = syscall (__NR_tkill, lwpid, signo);
3808 if (errno == ENOSYS)
3809 {
3810 /* If tkill fails, then we are not using nptl threads, a
3811 configuration we no longer support. */
3812 perror_with_name (("tkill"));
3813 }
3814 return ret;
3815 }
3816
3817 void
3818 linux_stop_lwp (struct lwp_info *lwp)
3819 {
3820 send_sigstop (lwp);
3821 }
3822
3823 static void
3824 send_sigstop (struct lwp_info *lwp)
3825 {
3826 int pid;
3827
3828 pid = lwpid_of (get_lwp_thread (lwp));
3829
3830 /* If we already have a pending stop signal for this process, don't
3831 send another. */
3832 if (lwp->stop_expected)
3833 {
3834 if (debug_threads)
3835 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3836
3837 return;
3838 }
3839
3840 if (debug_threads)
3841 debug_printf ("Sending sigstop to lwp %d\n", pid);
3842
3843 lwp->stop_expected = 1;
3844 kill_lwp (pid, SIGSTOP);
3845 }
3846
3847 static void
3848 send_sigstop (thread_info *thread, lwp_info *except)
3849 {
3850 struct lwp_info *lwp = get_thread_lwp (thread);
3851
3852 /* Ignore EXCEPT. */
3853 if (lwp == except)
3854 return;
3855
3856 if (lwp->stopped)
3857 return;
3858
3859 send_sigstop (lwp);
3860 }
3861
3862 /* Increment the suspend count of an LWP, and stop it, if not stopped
3863 yet. */
3864 static void
3865 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3866 {
3867 struct lwp_info *lwp = get_thread_lwp (thread);
3868
3869 /* Ignore EXCEPT. */
3870 if (lwp == except)
3871 return;
3872
3873 lwp_suspended_inc (lwp);
3874
3875 send_sigstop (thread, except);
3876 }
3877
3878 static void
3879 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3880 {
3881 /* Store the exit status for later. */
3882 lwp->status_pending_p = 1;
3883 lwp->status_pending = wstat;
3884
3885 /* Store in waitstatus as well, as there's nothing else to process
3886 for this event. */
3887 if (WIFEXITED (wstat))
3888 {
3889 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3890 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3891 }
3892 else if (WIFSIGNALED (wstat))
3893 {
3894 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3895 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3896 }
3897
3898 /* Prevent trying to stop it. */
3899 lwp->stopped = 1;
3900
3901 /* No further stops are expected from a dead lwp. */
3902 lwp->stop_expected = 0;
3903 }
3904
3905 /* Return true if LWP has exited already, and has a pending exit event
3906 to report to GDB. */
3907
3908 static int
3909 lwp_is_marked_dead (struct lwp_info *lwp)
3910 {
3911 return (lwp->status_pending_p
3912 && (WIFEXITED (lwp->status_pending)
3913 || WIFSIGNALED (lwp->status_pending)));
3914 }
3915
3916 void
3917 linux_process_target::wait_for_sigstop ()
3918 {
3919 struct thread_info *saved_thread;
3920 ptid_t saved_tid;
3921 int wstat;
3922 int ret;
3923
3924 saved_thread = current_thread;
3925 if (saved_thread != NULL)
3926 saved_tid = saved_thread->id;
3927 else
3928 saved_tid = null_ptid; /* avoid bogus unused warning */
3929
3930 if (debug_threads)
3931 debug_printf ("wait_for_sigstop: pulling events\n");
3932
3933 /* Passing NULL_PTID as filter indicates we want all events to be
3934 left pending. Eventually this returns when there are no
3935 unwaited-for children left. */
3936 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3937 gdb_assert (ret == -1);
3938
3939 if (saved_thread == NULL || mythread_alive (saved_tid))
3940 current_thread = saved_thread;
3941 else
3942 {
3943 if (debug_threads)
3944 debug_printf ("Previously current thread died.\n");
3945
3946 /* We can't change the current inferior behind GDB's back,
3947 otherwise, a subsequent command may apply to the wrong
3948 process. */
3949 current_thread = NULL;
3950 }
3951 }
3952
3953 /* Returns true if THREAD is stopped in a jump pad, and we can't
3954 move it out, because we need to report the stop event to GDB. For
3955 example, if the user puts a breakpoint in the jump pad, it's
3956 because she wants to debug it. */
3957
3958 static bool
3959 stuck_in_jump_pad_callback (thread_info *thread)
3960 {
3961 struct lwp_info *lwp = get_thread_lwp (thread);
3962
3963 if (lwp->suspended != 0)
3964 {
3965 internal_error (__FILE__, __LINE__,
3966 "LWP %ld is suspended, suspended=%d\n",
3967 lwpid_of (thread), lwp->suspended);
3968 }
3969 gdb_assert (lwp->stopped);
3970
3971 /* Allow debugging the jump pad, gdb_collect, etc.. */
3972 return (supports_fast_tracepoints ()
3973 && agent_loaded_p ()
3974 && (gdb_breakpoint_here (lwp->stop_pc)
3975 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3976 || thread->last_resume_kind == resume_step)
3977 && (linux_fast_tracepoint_collecting (lwp, NULL)
3978 != fast_tpoint_collect_result::not_collecting));
3979 }
3980
3981 void
3982 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3983 {
3984 struct thread_info *saved_thread;
3985 struct lwp_info *lwp = get_thread_lwp (thread);
3986 int *wstat;
3987
3988 if (lwp->suspended != 0)
3989 {
3990 internal_error (__FILE__, __LINE__,
3991 "LWP %ld is suspended, suspended=%d\n",
3992 lwpid_of (thread), lwp->suspended);
3993 }
3994 gdb_assert (lwp->stopped);
3995
3996 /* For gdb_breakpoint_here. */
3997 saved_thread = current_thread;
3998 current_thread = thread;
3999
4000 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4001
4002 /* Allow debugging the jump pad, gdb_collect, etc. */
4003 if (!gdb_breakpoint_here (lwp->stop_pc)
4004 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4005 && thread->last_resume_kind != resume_step
4006 && maybe_move_out_of_jump_pad (lwp, wstat))
4007 {
4008 if (debug_threads)
4009 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4010 lwpid_of (thread));
4011
4012 if (wstat)
4013 {
4014 lwp->status_pending_p = 0;
4015 enqueue_one_deferred_signal (lwp, wstat);
4016
4017 if (debug_threads)
4018 debug_printf ("Signal %d for LWP %ld deferred "
4019 "(in jump pad)\n",
4020 WSTOPSIG (*wstat), lwpid_of (thread));
4021 }
4022
4023 resume_one_lwp (lwp, 0, 0, NULL);
4024 }
4025 else
4026 lwp_suspended_inc (lwp);
4027
4028 current_thread = saved_thread;
4029 }
4030
4031 static bool
4032 lwp_running (thread_info *thread)
4033 {
4034 struct lwp_info *lwp = get_thread_lwp (thread);
4035
4036 if (lwp_is_marked_dead (lwp))
4037 return false;
4038
4039 return !lwp->stopped;
4040 }
4041
4042 void
4043 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4044 {
4045 /* Should not be called recursively. */
4046 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4047
4048 if (debug_threads)
4049 {
4050 debug_enter ();
4051 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4052 suspend ? "stop-and-suspend" : "stop",
4053 except != NULL
4054 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4055 : "none");
4056 }
4057
4058 stopping_threads = (suspend
4059 ? STOPPING_AND_SUSPENDING_THREADS
4060 : STOPPING_THREADS);
4061
4062 if (suspend)
4063 for_each_thread ([&] (thread_info *thread)
4064 {
4065 suspend_and_send_sigstop (thread, except);
4066 });
4067 else
4068 for_each_thread ([&] (thread_info *thread)
4069 {
4070 send_sigstop (thread, except);
4071 });
4072
4073 wait_for_sigstop ();
4074 stopping_threads = NOT_STOPPING_THREADS;
4075
4076 if (debug_threads)
4077 {
4078 debug_printf ("stop_all_lwps done, setting stopping_threads "
4079 "back to !stopping\n");
4080 debug_exit ();
4081 }
4082 }
4083
4084 /* Enqueue one signal in the chain of signals which need to be
4085 delivered to this process on next resume. */
4086
4087 static void
4088 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4089 {
4090 struct pending_signals *p_sig = XNEW (struct pending_signals);
4091
4092 p_sig->prev = lwp->pending_signals;
4093 p_sig->signal = signal;
4094 if (info == NULL)
4095 memset (&p_sig->info, 0, sizeof (siginfo_t));
4096 else
4097 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4098 lwp->pending_signals = p_sig;
4099 }
4100
4101 void
4102 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4103 {
4104 struct thread_info *thread = get_lwp_thread (lwp);
4105 struct regcache *regcache = get_thread_regcache (thread, 1);
4106
4107 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4108
4109 current_thread = thread;
4110 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4111
4112 for (CORE_ADDR pc : next_pcs)
4113 set_single_step_breakpoint (pc, current_ptid);
4114 }
4115
4116 int
4117 linux_process_target::single_step (lwp_info* lwp)
4118 {
4119 int step = 0;
4120
4121 if (can_hardware_single_step ())
4122 {
4123 step = 1;
4124 }
4125 else if (supports_software_single_step ())
4126 {
4127 install_software_single_step_breakpoints (lwp);
4128 step = 0;
4129 }
4130 else
4131 {
4132 if (debug_threads)
4133 debug_printf ("stepping is not implemented on this target");
4134 }
4135
4136 return step;
4137 }
4138
4139 /* The signal can be delivered to the inferior if we are not trying to
4140 finish a fast tracepoint collect. Since signal can be delivered in
4141 the step-over, the program may go to signal handler and trap again
4142 after return from the signal handler. We can live with the spurious
4143 double traps. */
4144
4145 static int
4146 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4147 {
4148 return (lwp->collecting_fast_tracepoint
4149 == fast_tpoint_collect_result::not_collecting);
4150 }
4151
4152 void
4153 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4154 int signal, siginfo_t *info)
4155 {
4156 struct thread_info *thread = get_lwp_thread (lwp);
4157 struct thread_info *saved_thread;
4158 int ptrace_request;
4159 struct process_info *proc = get_thread_process (thread);
4160
4161 /* Note that target description may not be initialised
4162 (proc->tdesc == NULL) at this point because the program hasn't
4163 stopped at the first instruction yet. It means GDBserver skips
4164 the extra traps from the wrapper program (see option --wrapper).
4165 Code in this function that requires register access should be
4166 guarded by proc->tdesc == NULL or something else. */
4167
4168 if (lwp->stopped == 0)
4169 return;
4170
4171 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4172
4173 fast_tpoint_collect_result fast_tp_collecting
4174 = lwp->collecting_fast_tracepoint;
4175
4176 gdb_assert (!stabilizing_threads
4177 || (fast_tp_collecting
4178 != fast_tpoint_collect_result::not_collecting));
4179
4180 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4181 user used the "jump" command, or "set $pc = foo"). */
4182 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4183 {
4184 /* Collecting 'while-stepping' actions doesn't make sense
4185 anymore. */
4186 release_while_stepping_state_list (thread);
4187 }
4188
4189 /* If we have pending signals or status, and a new signal, enqueue the
4190 signal. Also enqueue the signal if it can't be delivered to the
4191 inferior right now. */
4192 if (signal != 0
4193 && (lwp->status_pending_p
4194 || lwp->pending_signals != NULL
4195 || !lwp_signal_can_be_delivered (lwp)))
4196 {
4197 enqueue_pending_signal (lwp, signal, info);
4198
4199 /* Postpone any pending signal. It was enqueued above. */
4200 signal = 0;
4201 }
4202
4203 if (lwp->status_pending_p)
4204 {
4205 if (debug_threads)
4206 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4207 " has pending status\n",
4208 lwpid_of (thread), step ? "step" : "continue",
4209 lwp->stop_expected ? "expected" : "not expected");
4210 return;
4211 }
4212
4213 saved_thread = current_thread;
4214 current_thread = thread;
4215
4216 /* This bit needs some thinking about. If we get a signal that
4217 we must report while a single-step reinsert is still pending,
4218 we often end up resuming the thread. It might be better to
4219 (ew) allow a stack of pending events; then we could be sure that
4220 the reinsert happened right away and not lose any signals.
4221
4222 Making this stack would also shrink the window in which breakpoints are
4223 uninserted (see comment in linux_wait_for_lwp) but not enough for
4224 complete correctness, so it won't solve that problem. It may be
4225 worthwhile just to solve this one, however. */
4226 if (lwp->bp_reinsert != 0)
4227 {
4228 if (debug_threads)
4229 debug_printf (" pending reinsert at 0x%s\n",
4230 paddress (lwp->bp_reinsert));
4231
4232 if (can_hardware_single_step ())
4233 {
4234 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4235 {
4236 if (step == 0)
4237 warning ("BAD - reinserting but not stepping.");
4238 if (lwp->suspended)
4239 warning ("BAD - reinserting and suspended(%d).",
4240 lwp->suspended);
4241 }
4242 }
4243
4244 step = maybe_hw_step (thread);
4245 }
4246
4247 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4248 {
4249 if (debug_threads)
4250 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4251 " (exit-jump-pad-bkpt)\n",
4252 lwpid_of (thread));
4253 }
4254 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4255 {
4256 if (debug_threads)
4257 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4258 " single-stepping\n",
4259 lwpid_of (thread));
4260
4261 if (can_hardware_single_step ())
4262 step = 1;
4263 else
4264 {
4265 internal_error (__FILE__, __LINE__,
4266 "moving out of jump pad single-stepping"
4267 " not implemented on this target");
4268 }
4269 }
4270
4271 /* If we have while-stepping actions in this thread set it stepping.
4272 If we have a signal to deliver, it may or may not be set to
4273 SIG_IGN, we don't know. Assume so, and allow collecting
4274 while-stepping into a signal handler. A possible smart thing to
4275 do would be to set an internal breakpoint at the signal return
4276 address, continue, and carry on catching this while-stepping
4277 action only when that breakpoint is hit. A future
4278 enhancement. */
4279 if (thread->while_stepping != NULL)
4280 {
4281 if (debug_threads)
4282 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4283 lwpid_of (thread));
4284
4285 step = single_step (lwp);
4286 }
4287
4288 if (proc->tdesc != NULL && low_supports_breakpoints ())
4289 {
4290 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4291
4292 lwp->stop_pc = low_get_pc (regcache);
4293
4294 if (debug_threads)
4295 {
4296 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4297 (long) lwp->stop_pc);
4298 }
4299 }
4300
4301 /* If we have pending signals, consume one if it can be delivered to
4302 the inferior. */
4303 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4304 {
4305 struct pending_signals **p_sig;
4306
4307 p_sig = &lwp->pending_signals;
4308 while ((*p_sig)->prev != NULL)
4309 p_sig = &(*p_sig)->prev;
4310
4311 signal = (*p_sig)->signal;
4312 if ((*p_sig)->info.si_signo != 0)
4313 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4314 &(*p_sig)->info);
4315
4316 free (*p_sig);
4317 *p_sig = NULL;
4318 }
4319
4320 if (debug_threads)
4321 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4322 lwpid_of (thread), step ? "step" : "continue", signal,
4323 lwp->stop_expected ? "expected" : "not expected");
4324
4325 if (the_low_target.prepare_to_resume != NULL)
4326 the_low_target.prepare_to_resume (lwp);
4327
4328 regcache_invalidate_thread (thread);
4329 errno = 0;
4330 lwp->stepping = step;
4331 if (step)
4332 ptrace_request = PTRACE_SINGLESTEP;
4333 else if (gdb_catching_syscalls_p (lwp))
4334 ptrace_request = PTRACE_SYSCALL;
4335 else
4336 ptrace_request = PTRACE_CONT;
4337 ptrace (ptrace_request,
4338 lwpid_of (thread),
4339 (PTRACE_TYPE_ARG3) 0,
4340 /* Coerce to a uintptr_t first to avoid potential gcc warning
4341 of coercing an 8 byte integer to a 4 byte pointer. */
4342 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4343
4344 current_thread = saved_thread;
4345 if (errno)
4346 perror_with_name ("resuming thread");
4347
4348 /* Successfully resumed. Clear state that no longer makes sense,
4349 and mark the LWP as running. Must not do this before resuming
4350 otherwise if that fails other code will be confused. E.g., we'd
4351 later try to stop the LWP and hang forever waiting for a stop
4352 status. Note that we must not throw after this is cleared,
4353 otherwise handle_zombie_lwp_error would get confused. */
4354 lwp->stopped = 0;
4355 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4356 }
4357
4358 /* Called when we try to resume a stopped LWP and that errors out. If
4359 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4360 or about to become), discard the error, clear any pending status
4361 the LWP may have, and return true (we'll collect the exit status
4362 soon enough). Otherwise, return false. */
4363
4364 static int
4365 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4366 {
4367 struct thread_info *thread = get_lwp_thread (lp);
4368
4369 /* If we get an error after resuming the LWP successfully, we'd
4370 confuse !T state for the LWP being gone. */
4371 gdb_assert (lp->stopped);
4372
4373 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4374 because even if ptrace failed with ESRCH, the tracee may be "not
4375 yet fully dead", but already refusing ptrace requests. In that
4376 case the tracee has 'R (Running)' state for a little bit
4377 (observed in Linux 3.18). See also the note on ESRCH in the
4378 ptrace(2) man page. Instead, check whether the LWP has any state
4379 other than ptrace-stopped. */
4380
4381 /* Don't assume anything if /proc/PID/status can't be read. */
4382 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4383 {
4384 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4385 lp->status_pending_p = 0;
4386 return 1;
4387 }
4388 return 0;
4389 }
4390
4391 void
4392 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4393 siginfo_t *info)
4394 {
4395 try
4396 {
4397 resume_one_lwp_throw (lwp, step, signal, info);
4398 }
4399 catch (const gdb_exception_error &ex)
4400 {
4401 if (!check_ptrace_stopped_lwp_gone (lwp))
4402 throw;
4403 }
4404 }
4405
4406 /* This function is called once per thread via for_each_thread.
4407 We look up which resume request applies to THREAD and mark it with a
4408 pointer to the appropriate resume request.
4409
4410 This algorithm is O(threads * resume elements), but resume elements
4411 is small (and will remain small at least until GDB supports thread
4412 suspension). */
4413
4414 static void
4415 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4416 {
4417 struct lwp_info *lwp = get_thread_lwp (thread);
4418
4419 for (int ndx = 0; ndx < n; ndx++)
4420 {
4421 ptid_t ptid = resume[ndx].thread;
4422 if (ptid == minus_one_ptid
4423 || ptid == thread->id
4424 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4425 of PID'. */
4426 || (ptid.pid () == pid_of (thread)
4427 && (ptid.is_pid ()
4428 || ptid.lwp () == -1)))
4429 {
4430 if (resume[ndx].kind == resume_stop
4431 && thread->last_resume_kind == resume_stop)
4432 {
4433 if (debug_threads)
4434 debug_printf ("already %s LWP %ld at GDB's request\n",
4435 (thread->last_status.kind
4436 == TARGET_WAITKIND_STOPPED)
4437 ? "stopped"
4438 : "stopping",
4439 lwpid_of (thread));
4440
4441 continue;
4442 }
4443
4444 /* Ignore (wildcard) resume requests for already-resumed
4445 threads. */
4446 if (resume[ndx].kind != resume_stop
4447 && thread->last_resume_kind != resume_stop)
4448 {
4449 if (debug_threads)
4450 debug_printf ("already %s LWP %ld at GDB's request\n",
4451 (thread->last_resume_kind
4452 == resume_step)
4453 ? "stepping"
4454 : "continuing",
4455 lwpid_of (thread));
4456 continue;
4457 }
4458
4459 /* Don't let wildcard resumes resume fork children that GDB
4460 does not yet know are new fork children. */
4461 if (lwp->fork_relative != NULL)
4462 {
4463 struct lwp_info *rel = lwp->fork_relative;
4464
4465 if (rel->status_pending_p
4466 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4467 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4468 {
4469 if (debug_threads)
4470 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4471 lwpid_of (thread));
4472 continue;
4473 }
4474 }
4475
4476 /* If the thread has a pending event that has already been
4477 reported to GDBserver core, but GDB has not pulled the
4478 event out of the vStopped queue yet, likewise, ignore the
4479 (wildcard) resume request. */
4480 if (in_queued_stop_replies (thread->id))
4481 {
4482 if (debug_threads)
4483 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4484 lwpid_of (thread));
4485 continue;
4486 }
4487
4488 lwp->resume = &resume[ndx];
4489 thread->last_resume_kind = lwp->resume->kind;
4490
4491 lwp->step_range_start = lwp->resume->step_range_start;
4492 lwp->step_range_end = lwp->resume->step_range_end;
4493
4494 /* If we had a deferred signal to report, dequeue one now.
4495 This can happen if LWP gets more than one signal while
4496 trying to get out of a jump pad. */
4497 if (lwp->stopped
4498 && !lwp->status_pending_p
4499 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4500 {
4501 lwp->status_pending_p = 1;
4502
4503 if (debug_threads)
4504 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4505 "leaving status pending.\n",
4506 WSTOPSIG (lwp->status_pending),
4507 lwpid_of (thread));
4508 }
4509
4510 return;
4511 }
4512 }
4513
4514 /* No resume action for this thread. */
4515 lwp->resume = NULL;
4516 }
4517
4518 bool
4519 linux_process_target::resume_status_pending (thread_info *thread)
4520 {
4521 struct lwp_info *lwp = get_thread_lwp (thread);
4522
4523 /* LWPs which will not be resumed are not interesting, because
4524 we might not wait for them next time through linux_wait. */
4525 if (lwp->resume == NULL)
4526 return false;
4527
4528 return thread_still_has_status_pending (thread);
4529 }
4530
4531 bool
4532 linux_process_target::thread_needs_step_over (thread_info *thread)
4533 {
4534 struct lwp_info *lwp = get_thread_lwp (thread);
4535 struct thread_info *saved_thread;
4536 CORE_ADDR pc;
4537 struct process_info *proc = get_thread_process (thread);
4538
4539 /* GDBserver is skipping the extra traps from the wrapper program,
4540 don't have to do step over. */
4541 if (proc->tdesc == NULL)
4542 return false;
4543
4544 /* LWPs which will not be resumed are not interesting, because we
4545 might not wait for them next time through linux_wait. */
4546
4547 if (!lwp->stopped)
4548 {
4549 if (debug_threads)
4550 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4551 lwpid_of (thread));
4552 return false;
4553 }
4554
4555 if (thread->last_resume_kind == resume_stop)
4556 {
4557 if (debug_threads)
4558 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4559 " stopped\n",
4560 lwpid_of (thread));
4561 return false;
4562 }
4563
4564 gdb_assert (lwp->suspended >= 0);
4565
4566 if (lwp->suspended)
4567 {
4568 if (debug_threads)
4569 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4570 lwpid_of (thread));
4571 return false;
4572 }
4573
4574 if (lwp->status_pending_p)
4575 {
4576 if (debug_threads)
4577 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4578 " status.\n",
4579 lwpid_of (thread));
4580 return false;
4581 }
4582
4583 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4584 or we have. */
4585 pc = get_pc (lwp);
4586
4587 /* If the PC has changed since we stopped, then don't do anything,
4588 and let the breakpoint/tracepoint be hit. This happens if, for
4589 instance, GDB handled the decr_pc_after_break subtraction itself,
4590 GDB is OOL stepping this thread, or the user has issued a "jump"
4591 command, or poked thread's registers herself. */
4592 if (pc != lwp->stop_pc)
4593 {
4594 if (debug_threads)
4595 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4596 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4597 lwpid_of (thread),
4598 paddress (lwp->stop_pc), paddress (pc));
4599 return false;
4600 }
4601
4602 /* On software single step target, resume the inferior with signal
4603 rather than stepping over. */
4604 if (supports_software_single_step ()
4605 && lwp->pending_signals != NULL
4606 && lwp_signal_can_be_delivered (lwp))
4607 {
4608 if (debug_threads)
4609 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4610 " signals.\n",
4611 lwpid_of (thread));
4612
4613 return false;
4614 }
4615
4616 saved_thread = current_thread;
4617 current_thread = thread;
4618
4619 /* We can only step over breakpoints we know about. */
4620 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4621 {
4622 /* Don't step over a breakpoint that GDB expects to hit
4623 though. If the condition is being evaluated on the target's side
4624 and it evaluate to false, step over this breakpoint as well. */
4625 if (gdb_breakpoint_here (pc)
4626 && gdb_condition_true_at_breakpoint (pc)
4627 && gdb_no_commands_at_breakpoint (pc))
4628 {
4629 if (debug_threads)
4630 debug_printf ("Need step over [LWP %ld]? yes, but found"
4631 " GDB breakpoint at 0x%s; skipping step over\n",
4632 lwpid_of (thread), paddress (pc));
4633
4634 current_thread = saved_thread;
4635 return false;
4636 }
4637 else
4638 {
4639 if (debug_threads)
4640 debug_printf ("Need step over [LWP %ld]? yes, "
4641 "found breakpoint at 0x%s\n",
4642 lwpid_of (thread), paddress (pc));
4643
4644 /* We've found an lwp that needs stepping over --- return 1 so
4645 that find_thread stops looking. */
4646 current_thread = saved_thread;
4647
4648 return true;
4649 }
4650 }
4651
4652 current_thread = saved_thread;
4653
4654 if (debug_threads)
4655 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4656 " at 0x%s\n",
4657 lwpid_of (thread), paddress (pc));
4658
4659 return false;
4660 }
4661
4662 void
4663 linux_process_target::start_step_over (lwp_info *lwp)
4664 {
4665 struct thread_info *thread = get_lwp_thread (lwp);
4666 struct thread_info *saved_thread;
4667 CORE_ADDR pc;
4668 int step;
4669
4670 if (debug_threads)
4671 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4672 lwpid_of (thread));
4673
4674 stop_all_lwps (1, lwp);
4675
4676 if (lwp->suspended != 0)
4677 {
4678 internal_error (__FILE__, __LINE__,
4679 "LWP %ld suspended=%d\n", lwpid_of (thread),
4680 lwp->suspended);
4681 }
4682
4683 if (debug_threads)
4684 debug_printf ("Done stopping all threads for step-over.\n");
4685
4686 /* Note, we should always reach here with an already adjusted PC,
4687 either by GDB (if we're resuming due to GDB's request), or by our
4688 caller, if we just finished handling an internal breakpoint GDB
4689 shouldn't care about. */
4690 pc = get_pc (lwp);
4691
4692 saved_thread = current_thread;
4693 current_thread = thread;
4694
4695 lwp->bp_reinsert = pc;
4696 uninsert_breakpoints_at (pc);
4697 uninsert_fast_tracepoint_jumps_at (pc);
4698
4699 step = single_step (lwp);
4700
4701 current_thread = saved_thread;
4702
4703 resume_one_lwp (lwp, step, 0, NULL);
4704
4705 /* Require next event from this LWP. */
4706 step_over_bkpt = thread->id;
4707 }
4708
4709 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4710 start_step_over, if still there, and delete any single-step
4711 breakpoints we've set, on non hardware single-step targets. */
4712
4713 static int
4714 finish_step_over (struct lwp_info *lwp)
4715 {
4716 if (lwp->bp_reinsert != 0)
4717 {
4718 struct thread_info *saved_thread = current_thread;
4719
4720 if (debug_threads)
4721 debug_printf ("Finished step over.\n");
4722
4723 current_thread = get_lwp_thread (lwp);
4724
4725 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4726 may be no breakpoint to reinsert there by now. */
4727 reinsert_breakpoints_at (lwp->bp_reinsert);
4728 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4729
4730 lwp->bp_reinsert = 0;
4731
4732 /* Delete any single-step breakpoints. No longer needed. We
4733 don't have to worry about other threads hitting this trap,
4734 and later not being able to explain it, because we were
4735 stepping over a breakpoint, and we hold all threads but
4736 LWP stopped while doing that. */
4737 if (!can_hardware_single_step ())
4738 {
4739 gdb_assert (has_single_step_breakpoints (current_thread));
4740 delete_single_step_breakpoints (current_thread);
4741 }
4742
4743 step_over_bkpt = null_ptid;
4744 current_thread = saved_thread;
4745 return 1;
4746 }
4747 else
4748 return 0;
4749 }
4750
4751 void
4752 linux_process_target::complete_ongoing_step_over ()
4753 {
4754 if (step_over_bkpt != null_ptid)
4755 {
4756 struct lwp_info *lwp;
4757 int wstat;
4758 int ret;
4759
4760 if (debug_threads)
4761 debug_printf ("detach: step over in progress, finish it first\n");
4762
4763 /* Passing NULL_PTID as filter indicates we want all events to
4764 be left pending. Eventually this returns when there are no
4765 unwaited-for children left. */
4766 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4767 __WALL);
4768 gdb_assert (ret == -1);
4769
4770 lwp = find_lwp_pid (step_over_bkpt);
4771 if (lwp != NULL)
4772 finish_step_over (lwp);
4773 step_over_bkpt = null_ptid;
4774 unsuspend_all_lwps (lwp);
4775 }
4776 }
4777
4778 void
4779 linux_process_target::resume_one_thread (thread_info *thread,
4780 bool leave_all_stopped)
4781 {
4782 struct lwp_info *lwp = get_thread_lwp (thread);
4783 int leave_pending;
4784
4785 if (lwp->resume == NULL)
4786 return;
4787
4788 if (lwp->resume->kind == resume_stop)
4789 {
4790 if (debug_threads)
4791 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4792
4793 if (!lwp->stopped)
4794 {
4795 if (debug_threads)
4796 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4797
4798 /* Stop the thread, and wait for the event asynchronously,
4799 through the event loop. */
4800 send_sigstop (lwp);
4801 }
4802 else
4803 {
4804 if (debug_threads)
4805 debug_printf ("already stopped LWP %ld\n",
4806 lwpid_of (thread));
4807
4808 /* The LWP may have been stopped in an internal event that
4809 was not meant to be notified back to GDB (e.g., gdbserver
4810 breakpoint), so we should be reporting a stop event in
4811 this case too. */
4812
4813 /* If the thread already has a pending SIGSTOP, this is a
4814 no-op. Otherwise, something later will presumably resume
4815 the thread and this will cause it to cancel any pending
4816 operation, due to last_resume_kind == resume_stop. If
4817 the thread already has a pending status to report, we
4818 will still report it the next time we wait - see
4819 status_pending_p_callback. */
4820
4821 /* If we already have a pending signal to report, then
4822 there's no need to queue a SIGSTOP, as this means we're
4823 midway through moving the LWP out of the jumppad, and we
4824 will report the pending signal as soon as that is
4825 finished. */
4826 if (lwp->pending_signals_to_report == NULL)
4827 send_sigstop (lwp);
4828 }
4829
4830 /* For stop requests, we're done. */
4831 lwp->resume = NULL;
4832 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4833 return;
4834 }
4835
4836 /* If this thread which is about to be resumed has a pending status,
4837 then don't resume it - we can just report the pending status.
4838 Likewise if it is suspended, because e.g., another thread is
4839 stepping past a breakpoint. Make sure to queue any signals that
4840 would otherwise be sent. In all-stop mode, we do this decision
4841 based on if *any* thread has a pending status. If there's a
4842 thread that needs the step-over-breakpoint dance, then don't
4843 resume any other thread but that particular one. */
4844 leave_pending = (lwp->suspended
4845 || lwp->status_pending_p
4846 || leave_all_stopped);
4847
4848 /* If we have a new signal, enqueue the signal. */
4849 if (lwp->resume->sig != 0)
4850 {
4851 siginfo_t info, *info_p;
4852
4853 /* If this is the same signal we were previously stopped by,
4854 make sure to queue its siginfo. */
4855 if (WIFSTOPPED (lwp->last_status)
4856 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4857 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4858 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4859 info_p = &info;
4860 else
4861 info_p = NULL;
4862
4863 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4864 }
4865
4866 if (!leave_pending)
4867 {
4868 if (debug_threads)
4869 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4870
4871 proceed_one_lwp (thread, NULL);
4872 }
4873 else
4874 {
4875 if (debug_threads)
4876 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4877 }
4878
4879 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4880 lwp->resume = NULL;
4881 }
4882
4883 void
4884 linux_process_target::resume (thread_resume *resume_info, size_t n)
4885 {
4886 struct thread_info *need_step_over = NULL;
4887
4888 if (debug_threads)
4889 {
4890 debug_enter ();
4891 debug_printf ("linux_resume:\n");
4892 }
4893
4894 for_each_thread ([&] (thread_info *thread)
4895 {
4896 linux_set_resume_request (thread, resume_info, n);
4897 });
4898
4899 /* If there is a thread which would otherwise be resumed, which has
4900 a pending status, then don't resume any threads - we can just
4901 report the pending status. Make sure to queue any signals that
4902 would otherwise be sent. In non-stop mode, we'll apply this
4903 logic to each thread individually. We consume all pending events
4904 before considering to start a step-over (in all-stop). */
4905 bool any_pending = false;
4906 if (!non_stop)
4907 any_pending = find_thread ([this] (thread_info *thread)
4908 {
4909 return resume_status_pending (thread);
4910 }) != nullptr;
4911
4912 /* If there is a thread which would otherwise be resumed, which is
4913 stopped at a breakpoint that needs stepping over, then don't
4914 resume any threads - have it step over the breakpoint with all
4915 other threads stopped, then resume all threads again. Make sure
4916 to queue any signals that would otherwise be delivered or
4917 queued. */
4918 if (!any_pending && low_supports_breakpoints ())
4919 need_step_over = find_thread ([this] (thread_info *thread)
4920 {
4921 return thread_needs_step_over (thread);
4922 });
4923
4924 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4925
4926 if (debug_threads)
4927 {
4928 if (need_step_over != NULL)
4929 debug_printf ("Not resuming all, need step over\n");
4930 else if (any_pending)
4931 debug_printf ("Not resuming, all-stop and found "
4932 "an LWP with pending status\n");
4933 else
4934 debug_printf ("Resuming, no pending status or step over needed\n");
4935 }
4936
4937 /* Even if we're leaving threads stopped, queue all signals we'd
4938 otherwise deliver. */
4939 for_each_thread ([&] (thread_info *thread)
4940 {
4941 resume_one_thread (thread, leave_all_stopped);
4942 });
4943
4944 if (need_step_over)
4945 start_step_over (get_thread_lwp (need_step_over));
4946
4947 if (debug_threads)
4948 {
4949 debug_printf ("linux_resume done\n");
4950 debug_exit ();
4951 }
4952
4953 /* We may have events that were pending that can/should be sent to
4954 the client now. Trigger a linux_wait call. */
4955 if (target_is_async_p ())
4956 async_file_mark ();
4957 }
4958
4959 void
4960 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4961 {
4962 struct lwp_info *lwp = get_thread_lwp (thread);
4963 int step;
4964
4965 if (lwp == except)
4966 return;
4967
4968 if (debug_threads)
4969 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4970
4971 if (!lwp->stopped)
4972 {
4973 if (debug_threads)
4974 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4975 return;
4976 }
4977
4978 if (thread->last_resume_kind == resume_stop
4979 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4980 {
4981 if (debug_threads)
4982 debug_printf (" client wants LWP to remain %ld stopped\n",
4983 lwpid_of (thread));
4984 return;
4985 }
4986
4987 if (lwp->status_pending_p)
4988 {
4989 if (debug_threads)
4990 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4991 lwpid_of (thread));
4992 return;
4993 }
4994
4995 gdb_assert (lwp->suspended >= 0);
4996
4997 if (lwp->suspended)
4998 {
4999 if (debug_threads)
5000 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5001 return;
5002 }
5003
5004 if (thread->last_resume_kind == resume_stop
5005 && lwp->pending_signals_to_report == NULL
5006 && (lwp->collecting_fast_tracepoint
5007 == fast_tpoint_collect_result::not_collecting))
5008 {
5009 /* We haven't reported this LWP as stopped yet (otherwise, the
5010 last_status.kind check above would catch it, and we wouldn't
5011 reach here. This LWP may have been momentarily paused by a
5012 stop_all_lwps call while handling for example, another LWP's
5013 step-over. In that case, the pending expected SIGSTOP signal
5014 that was queued at vCont;t handling time will have already
5015 been consumed by wait_for_sigstop, and so we need to requeue
5016 another one here. Note that if the LWP already has a SIGSTOP
5017 pending, this is a no-op. */
5018
5019 if (debug_threads)
5020 debug_printf ("Client wants LWP %ld to stop. "
5021 "Making sure it has a SIGSTOP pending\n",
5022 lwpid_of (thread));
5023
5024 send_sigstop (lwp);
5025 }
5026
5027 if (thread->last_resume_kind == resume_step)
5028 {
5029 if (debug_threads)
5030 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5031 lwpid_of (thread));
5032
5033 /* If resume_step is requested by GDB, install single-step
5034 breakpoints when the thread is about to be actually resumed if
5035 the single-step breakpoints weren't removed. */
5036 if (supports_software_single_step ()
5037 && !has_single_step_breakpoints (thread))
5038 install_software_single_step_breakpoints (lwp);
5039
5040 step = maybe_hw_step (thread);
5041 }
5042 else if (lwp->bp_reinsert != 0)
5043 {
5044 if (debug_threads)
5045 debug_printf (" stepping LWP %ld, reinsert set\n",
5046 lwpid_of (thread));
5047
5048 step = maybe_hw_step (thread);
5049 }
5050 else
5051 step = 0;
5052
5053 resume_one_lwp (lwp, step, 0, NULL);
5054 }
5055
5056 void
5057 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5058 lwp_info *except)
5059 {
5060 struct lwp_info *lwp = get_thread_lwp (thread);
5061
5062 if (lwp == except)
5063 return;
5064
5065 lwp_suspended_decr (lwp);
5066
5067 proceed_one_lwp (thread, except);
5068 }
5069
5070 void
5071 linux_process_target::proceed_all_lwps ()
5072 {
5073 struct thread_info *need_step_over;
5074
5075 /* If there is a thread which would otherwise be resumed, which is
5076 stopped at a breakpoint that needs stepping over, then don't
5077 resume any threads - have it step over the breakpoint with all
5078 other threads stopped, then resume all threads again. */
5079
5080 if (low_supports_breakpoints ())
5081 {
5082 need_step_over = find_thread ([this] (thread_info *thread)
5083 {
5084 return thread_needs_step_over (thread);
5085 });
5086
5087 if (need_step_over != NULL)
5088 {
5089 if (debug_threads)
5090 debug_printf ("proceed_all_lwps: found "
5091 "thread %ld needing a step-over\n",
5092 lwpid_of (need_step_over));
5093
5094 start_step_over (get_thread_lwp (need_step_over));
5095 return;
5096 }
5097 }
5098
5099 if (debug_threads)
5100 debug_printf ("Proceeding, no step-over needed\n");
5101
5102 for_each_thread ([this] (thread_info *thread)
5103 {
5104 proceed_one_lwp (thread, NULL);
5105 });
5106 }
5107
5108 void
5109 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5110 {
5111 if (debug_threads)
5112 {
5113 debug_enter ();
5114 if (except)
5115 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5116 lwpid_of (get_lwp_thread (except)));
5117 else
5118 debug_printf ("unstopping all lwps\n");
5119 }
5120
5121 if (unsuspend)
5122 for_each_thread ([&] (thread_info *thread)
5123 {
5124 unsuspend_and_proceed_one_lwp (thread, except);
5125 });
5126 else
5127 for_each_thread ([&] (thread_info *thread)
5128 {
5129 proceed_one_lwp (thread, except);
5130 });
5131
5132 if (debug_threads)
5133 {
5134 debug_printf ("unstop_all_lwps done\n");
5135 debug_exit ();
5136 }
5137 }
5138
5139
5140 #ifdef HAVE_LINUX_REGSETS
5141
5142 #define use_linux_regsets 1
5143
5144 /* Returns true if REGSET has been disabled. */
5145
5146 static int
5147 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5148 {
5149 return (info->disabled_regsets != NULL
5150 && info->disabled_regsets[regset - info->regsets]);
5151 }
5152
5153 /* Disable REGSET. */
5154
5155 static void
5156 disable_regset (struct regsets_info *info, struct regset_info *regset)
5157 {
5158 int dr_offset;
5159
5160 dr_offset = regset - info->regsets;
5161 if (info->disabled_regsets == NULL)
5162 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5163 info->disabled_regsets[dr_offset] = 1;
5164 }
5165
5166 static int
5167 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5168 struct regcache *regcache)
5169 {
5170 struct regset_info *regset;
5171 int saw_general_regs = 0;
5172 int pid;
5173 struct iovec iov;
5174
5175 pid = lwpid_of (current_thread);
5176 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5177 {
5178 void *buf, *data;
5179 int nt_type, res;
5180
5181 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5182 continue;
5183
5184 buf = xmalloc (regset->size);
5185
5186 nt_type = regset->nt_type;
5187 if (nt_type)
5188 {
5189 iov.iov_base = buf;
5190 iov.iov_len = regset->size;
5191 data = (void *) &iov;
5192 }
5193 else
5194 data = buf;
5195
5196 #ifndef __sparc__
5197 res = ptrace (regset->get_request, pid,
5198 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5199 #else
5200 res = ptrace (regset->get_request, pid, data, nt_type);
5201 #endif
5202 if (res < 0)
5203 {
5204 if (errno == EIO
5205 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5206 {
5207 /* If we get EIO on a regset, or an EINVAL and the regset is
5208 optional, do not try it again for this process mode. */
5209 disable_regset (regsets_info, regset);
5210 }
5211 else if (errno == ENODATA)
5212 {
5213 /* ENODATA may be returned if the regset is currently
5214 not "active". This can happen in normal operation,
5215 so suppress the warning in this case. */
5216 }
5217 else if (errno == ESRCH)
5218 {
5219 /* At this point, ESRCH should mean the process is
5220 already gone, in which case we simply ignore attempts
5221 to read its registers. */
5222 }
5223 else
5224 {
5225 char s[256];
5226 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5227 pid);
5228 perror (s);
5229 }
5230 }
5231 else
5232 {
5233 if (regset->type == GENERAL_REGS)
5234 saw_general_regs = 1;
5235 regset->store_function (regcache, buf);
5236 }
5237 free (buf);
5238 }
5239 if (saw_general_regs)
5240 return 0;
5241 else
5242 return 1;
5243 }
5244
5245 static int
5246 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5247 struct regcache *regcache)
5248 {
5249 struct regset_info *regset;
5250 int saw_general_regs = 0;
5251 int pid;
5252 struct iovec iov;
5253
5254 pid = lwpid_of (current_thread);
5255 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5256 {
5257 void *buf, *data;
5258 int nt_type, res;
5259
5260 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5261 || regset->fill_function == NULL)
5262 continue;
5263
5264 buf = xmalloc (regset->size);
5265
5266 /* First fill the buffer with the current register set contents,
5267 in case there are any items in the kernel's regset that are
5268 not in gdbserver's regcache. */
5269
5270 nt_type = regset->nt_type;
5271 if (nt_type)
5272 {
5273 iov.iov_base = buf;
5274 iov.iov_len = regset->size;
5275 data = (void *) &iov;
5276 }
5277 else
5278 data = buf;
5279
5280 #ifndef __sparc__
5281 res = ptrace (regset->get_request, pid,
5282 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5283 #else
5284 res = ptrace (regset->get_request, pid, data, nt_type);
5285 #endif
5286
5287 if (res == 0)
5288 {
5289 /* Then overlay our cached registers on that. */
5290 regset->fill_function (regcache, buf);
5291
5292 /* Only now do we write the register set. */
5293 #ifndef __sparc__
5294 res = ptrace (regset->set_request, pid,
5295 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5296 #else
5297 res = ptrace (regset->set_request, pid, data, nt_type);
5298 #endif
5299 }
5300
5301 if (res < 0)
5302 {
5303 if (errno == EIO
5304 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5305 {
5306 /* If we get EIO on a regset, or an EINVAL and the regset is
5307 optional, do not try it again for this process mode. */
5308 disable_regset (regsets_info, regset);
5309 }
5310 else if (errno == ESRCH)
5311 {
5312 /* At this point, ESRCH should mean the process is
5313 already gone, in which case we simply ignore attempts
5314 to change its registers. See also the related
5315 comment in resume_one_lwp. */
5316 free (buf);
5317 return 0;
5318 }
5319 else
5320 {
5321 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5322 }
5323 }
5324 else if (regset->type == GENERAL_REGS)
5325 saw_general_regs = 1;
5326 free (buf);
5327 }
5328 if (saw_general_regs)
5329 return 0;
5330 else
5331 return 1;
5332 }
5333
5334 #else /* !HAVE_LINUX_REGSETS */
5335
5336 #define use_linux_regsets 0
5337 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5338 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5339
5340 #endif
5341
5342 /* Return 1 if register REGNO is supported by one of the regset ptrace
5343 calls or 0 if it has to be transferred individually. */
5344
5345 static int
5346 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5347 {
5348 unsigned char mask = 1 << (regno % 8);
5349 size_t index = regno / 8;
5350
5351 return (use_linux_regsets
5352 && (regs_info->regset_bitmap == NULL
5353 || (regs_info->regset_bitmap[index] & mask) != 0));
5354 }
5355
5356 #ifdef HAVE_LINUX_USRREGS
5357
5358 static int
5359 register_addr (const struct usrregs_info *usrregs, int regnum)
5360 {
5361 int addr;
5362
5363 if (regnum < 0 || regnum >= usrregs->num_regs)
5364 error ("Invalid register number %d.", regnum);
5365
5366 addr = usrregs->regmap[regnum];
5367
5368 return addr;
5369 }
5370
5371
5372 void
5373 linux_process_target::fetch_register (const usrregs_info *usrregs,
5374 regcache *regcache, int regno)
5375 {
5376 CORE_ADDR regaddr;
5377 int i, size;
5378 char *buf;
5379 int pid;
5380
5381 if (regno >= usrregs->num_regs)
5382 return;
5383 if (low_cannot_fetch_register (regno))
5384 return;
5385
5386 regaddr = register_addr (usrregs, regno);
5387 if (regaddr == -1)
5388 return;
5389
5390 size = ((register_size (regcache->tdesc, regno)
5391 + sizeof (PTRACE_XFER_TYPE) - 1)
5392 & -sizeof (PTRACE_XFER_TYPE));
5393 buf = (char *) alloca (size);
5394
5395 pid = lwpid_of (current_thread);
5396 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5397 {
5398 errno = 0;
5399 *(PTRACE_XFER_TYPE *) (buf + i) =
5400 ptrace (PTRACE_PEEKUSER, pid,
5401 /* Coerce to a uintptr_t first to avoid potential gcc warning
5402 of coercing an 8 byte integer to a 4 byte pointer. */
5403 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5404 regaddr += sizeof (PTRACE_XFER_TYPE);
5405 if (errno != 0)
5406 {
5407 /* Mark register REGNO unavailable. */
5408 supply_register (regcache, regno, NULL);
5409 return;
5410 }
5411 }
5412
5413 low_supply_ptrace_register (regcache, regno, buf);
5414 }
5415
5416 void
5417 linux_process_target::store_register (const usrregs_info *usrregs,
5418 regcache *regcache, int regno)
5419 {
5420 CORE_ADDR regaddr;
5421 int i, size;
5422 char *buf;
5423 int pid;
5424
5425 if (regno >= usrregs->num_regs)
5426 return;
5427 if (low_cannot_store_register (regno))
5428 return;
5429
5430 regaddr = register_addr (usrregs, regno);
5431 if (regaddr == -1)
5432 return;
5433
5434 size = ((register_size (regcache->tdesc, regno)
5435 + sizeof (PTRACE_XFER_TYPE) - 1)
5436 & -sizeof (PTRACE_XFER_TYPE));
5437 buf = (char *) alloca (size);
5438 memset (buf, 0, size);
5439
5440 low_collect_ptrace_register (regcache, regno, buf);
5441
5442 pid = lwpid_of (current_thread);
5443 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5444 {
5445 errno = 0;
5446 ptrace (PTRACE_POKEUSER, pid,
5447 /* Coerce to a uintptr_t first to avoid potential gcc warning
5448 about coercing an 8 byte integer to a 4 byte pointer. */
5449 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5450 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5451 if (errno != 0)
5452 {
5453 /* At this point, ESRCH should mean the process is
5454 already gone, in which case we simply ignore attempts
5455 to change its registers. See also the related
5456 comment in resume_one_lwp. */
5457 if (errno == ESRCH)
5458 return;
5459
5460
5461 if (!low_cannot_store_register (regno))
5462 error ("writing register %d: %s", regno, safe_strerror (errno));
5463 }
5464 regaddr += sizeof (PTRACE_XFER_TYPE);
5465 }
5466 }
5467 #endif /* HAVE_LINUX_USRREGS */
5468
5469 void
5470 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5471 int regno, char *buf)
5472 {
5473 collect_register (regcache, regno, buf);
5474 }
5475
5476 void
5477 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5478 int regno, const char *buf)
5479 {
5480 supply_register (regcache, regno, buf);
5481 }
5482
5483 void
5484 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5485 regcache *regcache,
5486 int regno, int all)
5487 {
5488 #ifdef HAVE_LINUX_USRREGS
5489 struct usrregs_info *usr = regs_info->usrregs;
5490
5491 if (regno == -1)
5492 {
5493 for (regno = 0; regno < usr->num_regs; regno++)
5494 if (all || !linux_register_in_regsets (regs_info, regno))
5495 fetch_register (usr, regcache, regno);
5496 }
5497 else
5498 fetch_register (usr, regcache, regno);
5499 #endif
5500 }
5501
5502 void
5503 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5504 regcache *regcache,
5505 int regno, int all)
5506 {
5507 #ifdef HAVE_LINUX_USRREGS
5508 struct usrregs_info *usr = regs_info->usrregs;
5509
5510 if (regno == -1)
5511 {
5512 for (regno = 0; regno < usr->num_regs; regno++)
5513 if (all || !linux_register_in_regsets (regs_info, regno))
5514 store_register (usr, regcache, regno);
5515 }
5516 else
5517 store_register (usr, regcache, regno);
5518 #endif
5519 }
5520
5521 void
5522 linux_process_target::fetch_registers (regcache *regcache, int regno)
5523 {
5524 int use_regsets;
5525 int all = 0;
5526 const regs_info *regs_info = get_regs_info ();
5527
5528 if (regno == -1)
5529 {
5530 if (regs_info->usrregs != NULL)
5531 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5532 low_fetch_register (regcache, regno);
5533
5534 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5535 if (regs_info->usrregs != NULL)
5536 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5537 }
5538 else
5539 {
5540 if (low_fetch_register (regcache, regno))
5541 return;
5542
5543 use_regsets = linux_register_in_regsets (regs_info, regno);
5544 if (use_regsets)
5545 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5546 regcache);
5547 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5548 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5549 }
5550 }
5551
5552 void
5553 linux_process_target::store_registers (regcache *regcache, int regno)
5554 {
5555 int use_regsets;
5556 int all = 0;
5557 const regs_info *regs_info = get_regs_info ();
5558
5559 if (regno == -1)
5560 {
5561 all = regsets_store_inferior_registers (regs_info->regsets_info,
5562 regcache);
5563 if (regs_info->usrregs != NULL)
5564 usr_store_inferior_registers (regs_info, regcache, regno, all);
5565 }
5566 else
5567 {
5568 use_regsets = linux_register_in_regsets (regs_info, regno);
5569 if (use_regsets)
5570 all = regsets_store_inferior_registers (regs_info->regsets_info,
5571 regcache);
5572 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5573 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5574 }
5575 }
5576
5577 bool
5578 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5579 {
5580 return false;
5581 }
5582
5583 /* A wrapper for the read_memory target op. */
5584
5585 static int
5586 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5587 {
5588 return the_target->read_memory (memaddr, myaddr, len);
5589 }
5590
5591 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5592 to debugger memory starting at MYADDR. */
5593
5594 int
5595 linux_process_target::read_memory (CORE_ADDR memaddr,
5596 unsigned char *myaddr, int len)
5597 {
5598 int pid = lwpid_of (current_thread);
5599 PTRACE_XFER_TYPE *buffer;
5600 CORE_ADDR addr;
5601 int count;
5602 char filename[64];
5603 int i;
5604 int ret;
5605 int fd;
5606
5607 /* Try using /proc. Don't bother for one word. */
5608 if (len >= 3 * sizeof (long))
5609 {
5610 int bytes;
5611
5612 /* We could keep this file open and cache it - possibly one per
5613 thread. That requires some juggling, but is even faster. */
5614 sprintf (filename, "/proc/%d/mem", pid);
5615 fd = open (filename, O_RDONLY | O_LARGEFILE);
5616 if (fd == -1)
5617 goto no_proc;
5618
5619 /* If pread64 is available, use it. It's faster if the kernel
5620 supports it (only one syscall), and it's 64-bit safe even on
5621 32-bit platforms (for instance, SPARC debugging a SPARC64
5622 application). */
5623 #ifdef HAVE_PREAD64
5624 bytes = pread64 (fd, myaddr, len, memaddr);
5625 #else
5626 bytes = -1;
5627 if (lseek (fd, memaddr, SEEK_SET) != -1)
5628 bytes = read (fd, myaddr, len);
5629 #endif
5630
5631 close (fd);
5632 if (bytes == len)
5633 return 0;
5634
5635 /* Some data was read, we'll try to get the rest with ptrace. */
5636 if (bytes > 0)
5637 {
5638 memaddr += bytes;
5639 myaddr += bytes;
5640 len -= bytes;
5641 }
5642 }
5643
5644 no_proc:
5645 /* Round starting address down to longword boundary. */
5646 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5647 /* Round ending address up; get number of longwords that makes. */
5648 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5649 / sizeof (PTRACE_XFER_TYPE));
5650 /* Allocate buffer of that many longwords. */
5651 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5652
5653 /* Read all the longwords */
5654 errno = 0;
5655 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5656 {
5657 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5658 about coercing an 8 byte integer to a 4 byte pointer. */
5659 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5660 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5661 (PTRACE_TYPE_ARG4) 0);
5662 if (errno)
5663 break;
5664 }
5665 ret = errno;
5666
5667 /* Copy appropriate bytes out of the buffer. */
5668 if (i > 0)
5669 {
5670 i *= sizeof (PTRACE_XFER_TYPE);
5671 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5672 memcpy (myaddr,
5673 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5674 i < len ? i : len);
5675 }
5676
5677 return ret;
5678 }
5679
5680 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5681 memory at MEMADDR. On failure (cannot write to the inferior)
5682 returns the value of errno. Always succeeds if LEN is zero. */
5683
5684 int
5685 linux_process_target::write_memory (CORE_ADDR memaddr,
5686 const unsigned char *myaddr, int len)
5687 {
5688 int i;
5689 /* Round starting address down to longword boundary. */
5690 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5691 /* Round ending address up; get number of longwords that makes. */
5692 int count
5693 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5694 / sizeof (PTRACE_XFER_TYPE);
5695
5696 /* Allocate buffer of that many longwords. */
5697 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5698
5699 int pid = lwpid_of (current_thread);
5700
5701 if (len == 0)
5702 {
5703 /* Zero length write always succeeds. */
5704 return 0;
5705 }
5706
5707 if (debug_threads)
5708 {
5709 /* Dump up to four bytes. */
5710 char str[4 * 2 + 1];
5711 char *p = str;
5712 int dump = len < 4 ? len : 4;
5713
5714 for (i = 0; i < dump; i++)
5715 {
5716 sprintf (p, "%02x", myaddr[i]);
5717 p += 2;
5718 }
5719 *p = '\0';
5720
5721 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5722 str, (long) memaddr, pid);
5723 }
5724
5725 /* Fill start and end extra bytes of buffer with existing memory data. */
5726
5727 errno = 0;
5728 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5729 about coercing an 8 byte integer to a 4 byte pointer. */
5730 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5731 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5732 (PTRACE_TYPE_ARG4) 0);
5733 if (errno)
5734 return errno;
5735
5736 if (count > 1)
5737 {
5738 errno = 0;
5739 buffer[count - 1]
5740 = ptrace (PTRACE_PEEKTEXT, pid,
5741 /* Coerce to a uintptr_t first to avoid potential gcc warning
5742 about coercing an 8 byte integer to a 4 byte pointer. */
5743 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5744 * sizeof (PTRACE_XFER_TYPE)),
5745 (PTRACE_TYPE_ARG4) 0);
5746 if (errno)
5747 return errno;
5748 }
5749
5750 /* Copy data to be written over corresponding part of buffer. */
5751
5752 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5753 myaddr, len);
5754
5755 /* Write the entire buffer. */
5756
5757 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5758 {
5759 errno = 0;
5760 ptrace (PTRACE_POKETEXT, pid,
5761 /* Coerce to a uintptr_t first to avoid potential gcc warning
5762 about coercing an 8 byte integer to a 4 byte pointer. */
5763 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5764 (PTRACE_TYPE_ARG4) buffer[i]);
5765 if (errno)
5766 return errno;
5767 }
5768
5769 return 0;
5770 }
5771
5772 void
5773 linux_process_target::look_up_symbols ()
5774 {
5775 #ifdef USE_THREAD_DB
5776 struct process_info *proc = current_process ();
5777
5778 if (proc->priv->thread_db != NULL)
5779 return;
5780
5781 thread_db_init ();
5782 #endif
5783 }
5784
5785 void
5786 linux_process_target::request_interrupt ()
5787 {
5788 /* Send a SIGINT to the process group. This acts just like the user
5789 typed a ^C on the controlling terminal. */
5790 ::kill (-signal_pid, SIGINT);
5791 }
5792
5793 bool
5794 linux_process_target::supports_read_auxv ()
5795 {
5796 return true;
5797 }
5798
5799 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5800 to debugger memory starting at MYADDR. */
5801
5802 int
5803 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5804 unsigned int len)
5805 {
5806 char filename[PATH_MAX];
5807 int fd, n;
5808 int pid = lwpid_of (current_thread);
5809
5810 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5811
5812 fd = open (filename, O_RDONLY);
5813 if (fd < 0)
5814 return -1;
5815
5816 if (offset != (CORE_ADDR) 0
5817 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5818 n = -1;
5819 else
5820 n = read (fd, myaddr, len);
5821
5822 close (fd);
5823
5824 return n;
5825 }
5826
5827 int
5828 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5829 int size, raw_breakpoint *bp)
5830 {
5831 if (type == raw_bkpt_type_sw)
5832 return insert_memory_breakpoint (bp);
5833 else
5834 return low_insert_point (type, addr, size, bp);
5835 }
5836
5837 int
5838 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5839 int size, raw_breakpoint *bp)
5840 {
5841 /* Unsupported (see target.h). */
5842 return 1;
5843 }
5844
5845 int
5846 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5847 int size, raw_breakpoint *bp)
5848 {
5849 if (type == raw_bkpt_type_sw)
5850 return remove_memory_breakpoint (bp);
5851 else
5852 return low_remove_point (type, addr, size, bp);
5853 }
5854
5855 int
5856 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5857 int size, raw_breakpoint *bp)
5858 {
5859 /* Unsupported (see target.h). */
5860 return 1;
5861 }
5862
5863 /* Implement the stopped_by_sw_breakpoint target_ops
5864 method. */
5865
5866 bool
5867 linux_process_target::stopped_by_sw_breakpoint ()
5868 {
5869 struct lwp_info *lwp = get_thread_lwp (current_thread);
5870
5871 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5872 }
5873
5874 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5875 method. */
5876
5877 bool
5878 linux_process_target::supports_stopped_by_sw_breakpoint ()
5879 {
5880 return USE_SIGTRAP_SIGINFO;
5881 }
5882
5883 /* Implement the stopped_by_hw_breakpoint target_ops
5884 method. */
5885
5886 bool
5887 linux_process_target::stopped_by_hw_breakpoint ()
5888 {
5889 struct lwp_info *lwp = get_thread_lwp (current_thread);
5890
5891 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5892 }
5893
5894 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5895 method. */
5896
5897 bool
5898 linux_process_target::supports_stopped_by_hw_breakpoint ()
5899 {
5900 return USE_SIGTRAP_SIGINFO;
5901 }
5902
5903 /* Implement the supports_hardware_single_step target_ops method. */
5904
5905 bool
5906 linux_process_target::supports_hardware_single_step ()
5907 {
5908 return can_hardware_single_step ();
5909 }
5910
5911 bool
5912 linux_process_target::stopped_by_watchpoint ()
5913 {
5914 struct lwp_info *lwp = get_thread_lwp (current_thread);
5915
5916 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5917 }
5918
5919 CORE_ADDR
5920 linux_process_target::stopped_data_address ()
5921 {
5922 struct lwp_info *lwp = get_thread_lwp (current_thread);
5923
5924 return lwp->stopped_data_address;
5925 }
5926
5927 /* This is only used for targets that define PT_TEXT_ADDR,
5928 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5929 the target has different ways of acquiring this information, like
5930 loadmaps. */
5931
5932 bool
5933 linux_process_target::supports_read_offsets ()
5934 {
5935 #ifdef SUPPORTS_READ_OFFSETS
5936 return true;
5937 #else
5938 return false;
5939 #endif
5940 }
5941
5942 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5943 to tell gdb about. */
5944
5945 int
5946 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5947 {
5948 #ifdef SUPPORTS_READ_OFFSETS
5949 unsigned long text, text_end, data;
5950 int pid = lwpid_of (current_thread);
5951
5952 errno = 0;
5953
5954 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5955 (PTRACE_TYPE_ARG4) 0);
5956 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5957 (PTRACE_TYPE_ARG4) 0);
5958 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5959 (PTRACE_TYPE_ARG4) 0);
5960
5961 if (errno == 0)
5962 {
5963 /* Both text and data offsets produced at compile-time (and so
5964 used by gdb) are relative to the beginning of the program,
5965 with the data segment immediately following the text segment.
5966 However, the actual runtime layout in memory may put the data
5967 somewhere else, so when we send gdb a data base-address, we
5968 use the real data base address and subtract the compile-time
5969 data base-address from it (which is just the length of the
5970 text segment). BSS immediately follows data in both
5971 cases. */
5972 *text_p = text;
5973 *data_p = data - (text_end - text);
5974
5975 return 1;
5976 }
5977 return 0;
5978 #else
5979 gdb_assert_not_reached ("target op read_offsets not supported");
5980 #endif
5981 }
5982
5983 bool
5984 linux_process_target::supports_get_tls_address ()
5985 {
5986 #ifdef USE_THREAD_DB
5987 return true;
5988 #else
5989 return false;
5990 #endif
5991 }
5992
5993 int
5994 linux_process_target::get_tls_address (thread_info *thread,
5995 CORE_ADDR offset,
5996 CORE_ADDR load_module,
5997 CORE_ADDR *address)
5998 {
5999 #ifdef USE_THREAD_DB
6000 return thread_db_get_tls_address (thread, offset, load_module, address);
6001 #else
6002 return -1;
6003 #endif
6004 }
6005
6006 bool
6007 linux_process_target::supports_qxfer_osdata ()
6008 {
6009 return true;
6010 }
6011
6012 int
6013 linux_process_target::qxfer_osdata (const char *annex,
6014 unsigned char *readbuf,
6015 unsigned const char *writebuf,
6016 CORE_ADDR offset, int len)
6017 {
6018 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6019 }
6020
6021 void
6022 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
6023 gdb_byte *inf_siginfo, int direction)
6024 {
6025 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
6026
6027 /* If there was no callback, or the callback didn't do anything,
6028 then just do a straight memcpy. */
6029 if (!done)
6030 {
6031 if (direction == 1)
6032 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6033 else
6034 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6035 }
6036 }
6037
6038 bool
6039 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
6040 int direction)
6041 {
6042 return false;
6043 }
6044
6045 bool
6046 linux_process_target::supports_qxfer_siginfo ()
6047 {
6048 return true;
6049 }
6050
6051 int
6052 linux_process_target::qxfer_siginfo (const char *annex,
6053 unsigned char *readbuf,
6054 unsigned const char *writebuf,
6055 CORE_ADDR offset, int len)
6056 {
6057 int pid;
6058 siginfo_t siginfo;
6059 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6060
6061 if (current_thread == NULL)
6062 return -1;
6063
6064 pid = lwpid_of (current_thread);
6065
6066 if (debug_threads)
6067 debug_printf ("%s siginfo for lwp %d.\n",
6068 readbuf != NULL ? "Reading" : "Writing",
6069 pid);
6070
6071 if (offset >= sizeof (siginfo))
6072 return -1;
6073
6074 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6075 return -1;
6076
6077 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6078 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6079 inferior with a 64-bit GDBSERVER should look the same as debugging it
6080 with a 32-bit GDBSERVER, we need to convert it. */
6081 siginfo_fixup (&siginfo, inf_siginfo, 0);
6082
6083 if (offset + len > sizeof (siginfo))
6084 len = sizeof (siginfo) - offset;
6085
6086 if (readbuf != NULL)
6087 memcpy (readbuf, inf_siginfo + offset, len);
6088 else
6089 {
6090 memcpy (inf_siginfo + offset, writebuf, len);
6091
6092 /* Convert back to ptrace layout before flushing it out. */
6093 siginfo_fixup (&siginfo, inf_siginfo, 1);
6094
6095 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6096 return -1;
6097 }
6098
6099 return len;
6100 }
6101
6102 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6103 so we notice when children change state; as the handler for the
6104 sigsuspend in my_waitpid. */
6105
6106 static void
6107 sigchld_handler (int signo)
6108 {
6109 int old_errno = errno;
6110
6111 if (debug_threads)
6112 {
6113 do
6114 {
6115 /* Use the async signal safe debug function. */
6116 if (debug_write ("sigchld_handler\n",
6117 sizeof ("sigchld_handler\n") - 1) < 0)
6118 break; /* just ignore */
6119 } while (0);
6120 }
6121
6122 if (target_is_async_p ())
6123 async_file_mark (); /* trigger a linux_wait */
6124
6125 errno = old_errno;
6126 }
6127
6128 bool
6129 linux_process_target::supports_non_stop ()
6130 {
6131 return true;
6132 }
6133
6134 bool
6135 linux_process_target::async (bool enable)
6136 {
6137 bool previous = target_is_async_p ();
6138
6139 if (debug_threads)
6140 debug_printf ("linux_async (%d), previous=%d\n",
6141 enable, previous);
6142
6143 if (previous != enable)
6144 {
6145 sigset_t mask;
6146 sigemptyset (&mask);
6147 sigaddset (&mask, SIGCHLD);
6148
6149 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6150
6151 if (enable)
6152 {
6153 if (pipe (linux_event_pipe) == -1)
6154 {
6155 linux_event_pipe[0] = -1;
6156 linux_event_pipe[1] = -1;
6157 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6158
6159 warning ("creating event pipe failed.");
6160 return previous;
6161 }
6162
6163 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6164 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6165
6166 /* Register the event loop handler. */
6167 add_file_handler (linux_event_pipe[0],
6168 handle_target_event, NULL);
6169
6170 /* Always trigger a linux_wait. */
6171 async_file_mark ();
6172 }
6173 else
6174 {
6175 delete_file_handler (linux_event_pipe[0]);
6176
6177 close (linux_event_pipe[0]);
6178 close (linux_event_pipe[1]);
6179 linux_event_pipe[0] = -1;
6180 linux_event_pipe[1] = -1;
6181 }
6182
6183 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6184 }
6185
6186 return previous;
6187 }
6188
6189 int
6190 linux_process_target::start_non_stop (bool nonstop)
6191 {
6192 /* Register or unregister from event-loop accordingly. */
6193 target_async (nonstop);
6194
6195 if (target_is_async_p () != (nonstop != false))
6196 return -1;
6197
6198 return 0;
6199 }
6200
6201 bool
6202 linux_process_target::supports_multi_process ()
6203 {
6204 return true;
6205 }
6206
6207 /* Check if fork events are supported. */
6208
6209 bool
6210 linux_process_target::supports_fork_events ()
6211 {
6212 return linux_supports_tracefork ();
6213 }
6214
6215 /* Check if vfork events are supported. */
6216
6217 bool
6218 linux_process_target::supports_vfork_events ()
6219 {
6220 return linux_supports_tracefork ();
6221 }
6222
6223 /* Check if exec events are supported. */
6224
6225 bool
6226 linux_process_target::supports_exec_events ()
6227 {
6228 return linux_supports_traceexec ();
6229 }
6230
6231 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6232 ptrace flags for all inferiors. This is in case the new GDB connection
6233 doesn't support the same set of events that the previous one did. */
6234
6235 void
6236 linux_process_target::handle_new_gdb_connection ()
6237 {
6238 /* Request that all the lwps reset their ptrace options. */
6239 for_each_thread ([] (thread_info *thread)
6240 {
6241 struct lwp_info *lwp = get_thread_lwp (thread);
6242
6243 if (!lwp->stopped)
6244 {
6245 /* Stop the lwp so we can modify its ptrace options. */
6246 lwp->must_set_ptrace_flags = 1;
6247 linux_stop_lwp (lwp);
6248 }
6249 else
6250 {
6251 /* Already stopped; go ahead and set the ptrace options. */
6252 struct process_info *proc = find_process_pid (pid_of (thread));
6253 int options = linux_low_ptrace_options (proc->attached);
6254
6255 linux_enable_event_reporting (lwpid_of (thread), options);
6256 lwp->must_set_ptrace_flags = 0;
6257 }
6258 });
6259 }
6260
6261 int
6262 linux_process_target::handle_monitor_command (char *mon)
6263 {
6264 #ifdef USE_THREAD_DB
6265 return thread_db_handle_monitor_command (mon);
6266 #else
6267 return 0;
6268 #endif
6269 }
6270
6271 int
6272 linux_process_target::core_of_thread (ptid_t ptid)
6273 {
6274 return linux_common_core_of_thread (ptid);
6275 }
6276
6277 bool
6278 linux_process_target::supports_disable_randomization ()
6279 {
6280 #ifdef HAVE_PERSONALITY
6281 return true;
6282 #else
6283 return false;
6284 #endif
6285 }
6286
6287 bool
6288 linux_process_target::supports_agent ()
6289 {
6290 return true;
6291 }
6292
6293 bool
6294 linux_process_target::supports_range_stepping ()
6295 {
6296 if (supports_software_single_step ())
6297 return true;
6298 if (*the_low_target.supports_range_stepping == NULL)
6299 return false;
6300
6301 return (*the_low_target.supports_range_stepping) ();
6302 }
6303
6304 bool
6305 linux_process_target::supports_pid_to_exec_file ()
6306 {
6307 return true;
6308 }
6309
6310 char *
6311 linux_process_target::pid_to_exec_file (int pid)
6312 {
6313 return linux_proc_pid_to_exec_file (pid);
6314 }
6315
6316 bool
6317 linux_process_target::supports_multifs ()
6318 {
6319 return true;
6320 }
6321
6322 int
6323 linux_process_target::multifs_open (int pid, const char *filename,
6324 int flags, mode_t mode)
6325 {
6326 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6327 }
6328
6329 int
6330 linux_process_target::multifs_unlink (int pid, const char *filename)
6331 {
6332 return linux_mntns_unlink (pid, filename);
6333 }
6334
6335 ssize_t
6336 linux_process_target::multifs_readlink (int pid, const char *filename,
6337 char *buf, size_t bufsiz)
6338 {
6339 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6340 }
6341
6342 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6343 struct target_loadseg
6344 {
6345 /* Core address to which the segment is mapped. */
6346 Elf32_Addr addr;
6347 /* VMA recorded in the program header. */
6348 Elf32_Addr p_vaddr;
6349 /* Size of this segment in memory. */
6350 Elf32_Word p_memsz;
6351 };
6352
6353 # if defined PT_GETDSBT
6354 struct target_loadmap
6355 {
6356 /* Protocol version number, must be zero. */
6357 Elf32_Word version;
6358 /* Pointer to the DSBT table, its size, and the DSBT index. */
6359 unsigned *dsbt_table;
6360 unsigned dsbt_size, dsbt_index;
6361 /* Number of segments in this map. */
6362 Elf32_Word nsegs;
6363 /* The actual memory map. */
6364 struct target_loadseg segs[/*nsegs*/];
6365 };
6366 # define LINUX_LOADMAP PT_GETDSBT
6367 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6368 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6369 # else
6370 struct target_loadmap
6371 {
6372 /* Protocol version number, must be zero. */
6373 Elf32_Half version;
6374 /* Number of segments in this map. */
6375 Elf32_Half nsegs;
6376 /* The actual memory map. */
6377 struct target_loadseg segs[/*nsegs*/];
6378 };
6379 # define LINUX_LOADMAP PTRACE_GETFDPIC
6380 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6381 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6382 # endif
6383
6384 bool
6385 linux_process_target::supports_read_loadmap ()
6386 {
6387 return true;
6388 }
6389
6390 int
6391 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6392 unsigned char *myaddr, unsigned int len)
6393 {
6394 int pid = lwpid_of (current_thread);
6395 int addr = -1;
6396 struct target_loadmap *data = NULL;
6397 unsigned int actual_length, copy_length;
6398
6399 if (strcmp (annex, "exec") == 0)
6400 addr = (int) LINUX_LOADMAP_EXEC;
6401 else if (strcmp (annex, "interp") == 0)
6402 addr = (int) LINUX_LOADMAP_INTERP;
6403 else
6404 return -1;
6405
6406 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6407 return -1;
6408
6409 if (data == NULL)
6410 return -1;
6411
6412 actual_length = sizeof (struct target_loadmap)
6413 + sizeof (struct target_loadseg) * data->nsegs;
6414
6415 if (offset < 0 || offset > actual_length)
6416 return -1;
6417
6418 copy_length = actual_length - offset < len ? actual_length - offset : len;
6419 memcpy (myaddr, (char *) data + offset, copy_length);
6420 return copy_length;
6421 }
6422 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6423
6424 void
6425 linux_process_target::process_qsupported (char **features, int count)
6426 {
6427 if (the_low_target.process_qsupported != NULL)
6428 the_low_target.process_qsupported (features, count);
6429 }
6430
6431 bool
6432 linux_process_target::supports_catch_syscall ()
6433 {
6434 return (the_low_target.get_syscall_trapinfo != NULL
6435 && linux_supports_tracesysgood ());
6436 }
6437
6438 int
6439 linux_process_target::get_ipa_tdesc_idx ()
6440 {
6441 if (the_low_target.get_ipa_tdesc_idx == NULL)
6442 return 0;
6443
6444 return (*the_low_target.get_ipa_tdesc_idx) ();
6445 }
6446
6447 bool
6448 linux_process_target::supports_tracepoints ()
6449 {
6450 if (*the_low_target.supports_tracepoints == NULL)
6451 return false;
6452
6453 return (*the_low_target.supports_tracepoints) ();
6454 }
6455
6456 CORE_ADDR
6457 linux_process_target::read_pc (regcache *regcache)
6458 {
6459 if (!low_supports_breakpoints ())
6460 return 0;
6461
6462 return low_get_pc (regcache);
6463 }
6464
6465 void
6466 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6467 {
6468 gdb_assert (low_supports_breakpoints ());
6469
6470 low_set_pc (regcache, pc);
6471 }
6472
6473 bool
6474 linux_process_target::supports_thread_stopped ()
6475 {
6476 return true;
6477 }
6478
6479 bool
6480 linux_process_target::thread_stopped (thread_info *thread)
6481 {
6482 return get_thread_lwp (thread)->stopped;
6483 }
6484
6485 /* This exposes stop-all-threads functionality to other modules. */
6486
6487 void
6488 linux_process_target::pause_all (bool freeze)
6489 {
6490 stop_all_lwps (freeze, NULL);
6491 }
6492
6493 /* This exposes unstop-all-threads functionality to other gdbserver
6494 modules. */
6495
6496 void
6497 linux_process_target::unpause_all (bool unfreeze)
6498 {
6499 unstop_all_lwps (unfreeze, NULL);
6500 }
6501
6502 int
6503 linux_process_target::prepare_to_access_memory ()
6504 {
6505 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6506 running LWP. */
6507 if (non_stop)
6508 target_pause_all (true);
6509 return 0;
6510 }
6511
6512 void
6513 linux_process_target::done_accessing_memory ()
6514 {
6515 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6516 running LWP. */
6517 if (non_stop)
6518 target_unpause_all (true);
6519 }
6520
6521 bool
6522 linux_process_target::supports_fast_tracepoints ()
6523 {
6524 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6525 }
6526
6527 int
6528 linux_process_target::install_fast_tracepoint_jump_pad
6529 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6530 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6531 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6532 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6533 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6534 char *err)
6535 {
6536 return (*the_low_target.install_fast_tracepoint_jump_pad)
6537 (tpoint, tpaddr, collector, lockaddr, orig_size,
6538 jump_entry, trampoline, trampoline_size,
6539 jjump_pad_insn, jjump_pad_insn_size,
6540 adjusted_insn_addr, adjusted_insn_addr_end,
6541 err);
6542 }
6543
6544 emit_ops *
6545 linux_process_target::emit_ops ()
6546 {
6547 if (the_low_target.emit_ops != NULL)
6548 return (*the_low_target.emit_ops) ();
6549 else
6550 return NULL;
6551 }
6552
6553 int
6554 linux_process_target::get_min_fast_tracepoint_insn_len ()
6555 {
6556 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6557 }
6558
6559 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6560
6561 static int
6562 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6563 CORE_ADDR *phdr_memaddr, int *num_phdr)
6564 {
6565 char filename[PATH_MAX];
6566 int fd;
6567 const int auxv_size = is_elf64
6568 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6569 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6570
6571 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6572
6573 fd = open (filename, O_RDONLY);
6574 if (fd < 0)
6575 return 1;
6576
6577 *phdr_memaddr = 0;
6578 *num_phdr = 0;
6579 while (read (fd, buf, auxv_size) == auxv_size
6580 && (*phdr_memaddr == 0 || *num_phdr == 0))
6581 {
6582 if (is_elf64)
6583 {
6584 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6585
6586 switch (aux->a_type)
6587 {
6588 case AT_PHDR:
6589 *phdr_memaddr = aux->a_un.a_val;
6590 break;
6591 case AT_PHNUM:
6592 *num_phdr = aux->a_un.a_val;
6593 break;
6594 }
6595 }
6596 else
6597 {
6598 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6599
6600 switch (aux->a_type)
6601 {
6602 case AT_PHDR:
6603 *phdr_memaddr = aux->a_un.a_val;
6604 break;
6605 case AT_PHNUM:
6606 *num_phdr = aux->a_un.a_val;
6607 break;
6608 }
6609 }
6610 }
6611
6612 close (fd);
6613
6614 if (*phdr_memaddr == 0 || *num_phdr == 0)
6615 {
6616 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6617 "phdr_memaddr = %ld, phdr_num = %d",
6618 (long) *phdr_memaddr, *num_phdr);
6619 return 2;
6620 }
6621
6622 return 0;
6623 }
6624
6625 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6626
6627 static CORE_ADDR
6628 get_dynamic (const int pid, const int is_elf64)
6629 {
6630 CORE_ADDR phdr_memaddr, relocation;
6631 int num_phdr, i;
6632 unsigned char *phdr_buf;
6633 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6634
6635 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6636 return 0;
6637
6638 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6639 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6640
6641 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6642 return 0;
6643
6644 /* Compute relocation: it is expected to be 0 for "regular" executables,
6645 non-zero for PIE ones. */
6646 relocation = -1;
6647 for (i = 0; relocation == -1 && i < num_phdr; i++)
6648 if (is_elf64)
6649 {
6650 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6651
6652 if (p->p_type == PT_PHDR)
6653 relocation = phdr_memaddr - p->p_vaddr;
6654 }
6655 else
6656 {
6657 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6658
6659 if (p->p_type == PT_PHDR)
6660 relocation = phdr_memaddr - p->p_vaddr;
6661 }
6662
6663 if (relocation == -1)
6664 {
6665 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6666 any real world executables, including PIE executables, have always
6667 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6668 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6669 or present DT_DEBUG anyway (fpc binaries are statically linked).
6670
6671 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6672
6673 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6674
6675 return 0;
6676 }
6677
6678 for (i = 0; i < num_phdr; i++)
6679 {
6680 if (is_elf64)
6681 {
6682 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6683
6684 if (p->p_type == PT_DYNAMIC)
6685 return p->p_vaddr + relocation;
6686 }
6687 else
6688 {
6689 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6690
6691 if (p->p_type == PT_DYNAMIC)
6692 return p->p_vaddr + relocation;
6693 }
6694 }
6695
6696 return 0;
6697 }
6698
6699 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6700 can be 0 if the inferior does not yet have the library list initialized.
6701 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6702 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6703
6704 static CORE_ADDR
6705 get_r_debug (const int pid, const int is_elf64)
6706 {
6707 CORE_ADDR dynamic_memaddr;
6708 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6709 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6710 CORE_ADDR map = -1;
6711
6712 dynamic_memaddr = get_dynamic (pid, is_elf64);
6713 if (dynamic_memaddr == 0)
6714 return map;
6715
6716 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6717 {
6718 if (is_elf64)
6719 {
6720 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6721 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6722 union
6723 {
6724 Elf64_Xword map;
6725 unsigned char buf[sizeof (Elf64_Xword)];
6726 }
6727 rld_map;
6728 #endif
6729 #ifdef DT_MIPS_RLD_MAP
6730 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6731 {
6732 if (linux_read_memory (dyn->d_un.d_val,
6733 rld_map.buf, sizeof (rld_map.buf)) == 0)
6734 return rld_map.map;
6735 else
6736 break;
6737 }
6738 #endif /* DT_MIPS_RLD_MAP */
6739 #ifdef DT_MIPS_RLD_MAP_REL
6740 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6741 {
6742 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6743 rld_map.buf, sizeof (rld_map.buf)) == 0)
6744 return rld_map.map;
6745 else
6746 break;
6747 }
6748 #endif /* DT_MIPS_RLD_MAP_REL */
6749
6750 if (dyn->d_tag == DT_DEBUG && map == -1)
6751 map = dyn->d_un.d_val;
6752
6753 if (dyn->d_tag == DT_NULL)
6754 break;
6755 }
6756 else
6757 {
6758 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6759 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6760 union
6761 {
6762 Elf32_Word map;
6763 unsigned char buf[sizeof (Elf32_Word)];
6764 }
6765 rld_map;
6766 #endif
6767 #ifdef DT_MIPS_RLD_MAP
6768 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6769 {
6770 if (linux_read_memory (dyn->d_un.d_val,
6771 rld_map.buf, sizeof (rld_map.buf)) == 0)
6772 return rld_map.map;
6773 else
6774 break;
6775 }
6776 #endif /* DT_MIPS_RLD_MAP */
6777 #ifdef DT_MIPS_RLD_MAP_REL
6778 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6779 {
6780 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6781 rld_map.buf, sizeof (rld_map.buf)) == 0)
6782 return rld_map.map;
6783 else
6784 break;
6785 }
6786 #endif /* DT_MIPS_RLD_MAP_REL */
6787
6788 if (dyn->d_tag == DT_DEBUG && map == -1)
6789 map = dyn->d_un.d_val;
6790
6791 if (dyn->d_tag == DT_NULL)
6792 break;
6793 }
6794
6795 dynamic_memaddr += dyn_size;
6796 }
6797
6798 return map;
6799 }
6800
6801 /* Read one pointer from MEMADDR in the inferior. */
6802
6803 static int
6804 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6805 {
6806 int ret;
6807
6808 /* Go through a union so this works on either big or little endian
6809 hosts, when the inferior's pointer size is smaller than the size
6810 of CORE_ADDR. It is assumed the inferior's endianness is the
6811 same of the superior's. */
6812 union
6813 {
6814 CORE_ADDR core_addr;
6815 unsigned int ui;
6816 unsigned char uc;
6817 } addr;
6818
6819 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6820 if (ret == 0)
6821 {
6822 if (ptr_size == sizeof (CORE_ADDR))
6823 *ptr = addr.core_addr;
6824 else if (ptr_size == sizeof (unsigned int))
6825 *ptr = addr.ui;
6826 else
6827 gdb_assert_not_reached ("unhandled pointer size");
6828 }
6829 return ret;
6830 }
6831
6832 bool
6833 linux_process_target::supports_qxfer_libraries_svr4 ()
6834 {
6835 return true;
6836 }
6837
6838 struct link_map_offsets
6839 {
6840 /* Offset and size of r_debug.r_version. */
6841 int r_version_offset;
6842
6843 /* Offset and size of r_debug.r_map. */
6844 int r_map_offset;
6845
6846 /* Offset to l_addr field in struct link_map. */
6847 int l_addr_offset;
6848
6849 /* Offset to l_name field in struct link_map. */
6850 int l_name_offset;
6851
6852 /* Offset to l_ld field in struct link_map. */
6853 int l_ld_offset;
6854
6855 /* Offset to l_next field in struct link_map. */
6856 int l_next_offset;
6857
6858 /* Offset to l_prev field in struct link_map. */
6859 int l_prev_offset;
6860 };
6861
6862 /* Construct qXfer:libraries-svr4:read reply. */
6863
6864 int
6865 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6866 unsigned char *readbuf,
6867 unsigned const char *writebuf,
6868 CORE_ADDR offset, int len)
6869 {
6870 struct process_info_private *const priv = current_process ()->priv;
6871 char filename[PATH_MAX];
6872 int pid, is_elf64;
6873
6874 static const struct link_map_offsets lmo_32bit_offsets =
6875 {
6876 0, /* r_version offset. */
6877 4, /* r_debug.r_map offset. */
6878 0, /* l_addr offset in link_map. */
6879 4, /* l_name offset in link_map. */
6880 8, /* l_ld offset in link_map. */
6881 12, /* l_next offset in link_map. */
6882 16 /* l_prev offset in link_map. */
6883 };
6884
6885 static const struct link_map_offsets lmo_64bit_offsets =
6886 {
6887 0, /* r_version offset. */
6888 8, /* r_debug.r_map offset. */
6889 0, /* l_addr offset in link_map. */
6890 8, /* l_name offset in link_map. */
6891 16, /* l_ld offset in link_map. */
6892 24, /* l_next offset in link_map. */
6893 32 /* l_prev offset in link_map. */
6894 };
6895 const struct link_map_offsets *lmo;
6896 unsigned int machine;
6897 int ptr_size;
6898 CORE_ADDR lm_addr = 0, lm_prev = 0;
6899 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6900 int header_done = 0;
6901
6902 if (writebuf != NULL)
6903 return -2;
6904 if (readbuf == NULL)
6905 return -1;
6906
6907 pid = lwpid_of (current_thread);
6908 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6909 is_elf64 = elf_64_file_p (filename, &machine);
6910 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6911 ptr_size = is_elf64 ? 8 : 4;
6912
6913 while (annex[0] != '\0')
6914 {
6915 const char *sep;
6916 CORE_ADDR *addrp;
6917 int name_len;
6918
6919 sep = strchr (annex, '=');
6920 if (sep == NULL)
6921 break;
6922
6923 name_len = sep - annex;
6924 if (name_len == 5 && startswith (annex, "start"))
6925 addrp = &lm_addr;
6926 else if (name_len == 4 && startswith (annex, "prev"))
6927 addrp = &lm_prev;
6928 else
6929 {
6930 annex = strchr (sep, ';');
6931 if (annex == NULL)
6932 break;
6933 annex++;
6934 continue;
6935 }
6936
6937 annex = decode_address_to_semicolon (addrp, sep + 1);
6938 }
6939
6940 if (lm_addr == 0)
6941 {
6942 int r_version = 0;
6943
6944 if (priv->r_debug == 0)
6945 priv->r_debug = get_r_debug (pid, is_elf64);
6946
6947 /* We failed to find DT_DEBUG. Such situation will not change
6948 for this inferior - do not retry it. Report it to GDB as
6949 E01, see for the reasons at the GDB solib-svr4.c side. */
6950 if (priv->r_debug == (CORE_ADDR) -1)
6951 return -1;
6952
6953 if (priv->r_debug != 0)
6954 {
6955 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6956 (unsigned char *) &r_version,
6957 sizeof (r_version)) != 0
6958 || r_version != 1)
6959 {
6960 warning ("unexpected r_debug version %d", r_version);
6961 }
6962 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6963 &lm_addr, ptr_size) != 0)
6964 {
6965 warning ("unable to read r_map from 0x%lx",
6966 (long) priv->r_debug + lmo->r_map_offset);
6967 }
6968 }
6969 }
6970
6971 std::string document = "<library-list-svr4 version=\"1.0\"";
6972
6973 while (lm_addr
6974 && read_one_ptr (lm_addr + lmo->l_name_offset,
6975 &l_name, ptr_size) == 0
6976 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6977 &l_addr, ptr_size) == 0
6978 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6979 &l_ld, ptr_size) == 0
6980 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6981 &l_prev, ptr_size) == 0
6982 && read_one_ptr (lm_addr + lmo->l_next_offset,
6983 &l_next, ptr_size) == 0)
6984 {
6985 unsigned char libname[PATH_MAX];
6986
6987 if (lm_prev != l_prev)
6988 {
6989 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6990 (long) lm_prev, (long) l_prev);
6991 break;
6992 }
6993
6994 /* Ignore the first entry even if it has valid name as the first entry
6995 corresponds to the main executable. The first entry should not be
6996 skipped if the dynamic loader was loaded late by a static executable
6997 (see solib-svr4.c parameter ignore_first). But in such case the main
6998 executable does not have PT_DYNAMIC present and this function already
6999 exited above due to failed get_r_debug. */
7000 if (lm_prev == 0)
7001 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7002 else
7003 {
7004 /* Not checking for error because reading may stop before
7005 we've got PATH_MAX worth of characters. */
7006 libname[0] = '\0';
7007 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7008 libname[sizeof (libname) - 1] = '\0';
7009 if (libname[0] != '\0')
7010 {
7011 if (!header_done)
7012 {
7013 /* Terminate `<library-list-svr4'. */
7014 document += '>';
7015 header_done = 1;
7016 }
7017
7018 string_appendf (document, "<library name=\"");
7019 xml_escape_text_append (&document, (char *) libname);
7020 string_appendf (document, "\" lm=\"0x%lx\" "
7021 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7022 (unsigned long) lm_addr, (unsigned long) l_addr,
7023 (unsigned long) l_ld);
7024 }
7025 }
7026
7027 lm_prev = lm_addr;
7028 lm_addr = l_next;
7029 }
7030
7031 if (!header_done)
7032 {
7033 /* Empty list; terminate `<library-list-svr4'. */
7034 document += "/>";
7035 }
7036 else
7037 document += "</library-list-svr4>";
7038
7039 int document_len = document.length ();
7040 if (offset < document_len)
7041 document_len -= offset;
7042 else
7043 document_len = 0;
7044 if (len > document_len)
7045 len = document_len;
7046
7047 memcpy (readbuf, document.data () + offset, len);
7048
7049 return len;
7050 }
7051
7052 #ifdef HAVE_LINUX_BTRACE
7053
7054 btrace_target_info *
7055 linux_process_target::enable_btrace (ptid_t ptid,
7056 const btrace_config *conf)
7057 {
7058 return linux_enable_btrace (ptid, conf);
7059 }
7060
7061 /* See to_disable_btrace target method. */
7062
7063 int
7064 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7065 {
7066 enum btrace_error err;
7067
7068 err = linux_disable_btrace (tinfo);
7069 return (err == BTRACE_ERR_NONE ? 0 : -1);
7070 }
7071
7072 /* Encode an Intel Processor Trace configuration. */
7073
7074 static void
7075 linux_low_encode_pt_config (struct buffer *buffer,
7076 const struct btrace_data_pt_config *config)
7077 {
7078 buffer_grow_str (buffer, "<pt-config>\n");
7079
7080 switch (config->cpu.vendor)
7081 {
7082 case CV_INTEL:
7083 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7084 "model=\"%u\" stepping=\"%u\"/>\n",
7085 config->cpu.family, config->cpu.model,
7086 config->cpu.stepping);
7087 break;
7088
7089 default:
7090 break;
7091 }
7092
7093 buffer_grow_str (buffer, "</pt-config>\n");
7094 }
7095
7096 /* Encode a raw buffer. */
7097
7098 static void
7099 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7100 unsigned int size)
7101 {
7102 if (size == 0)
7103 return;
7104
7105 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7106 buffer_grow_str (buffer, "<raw>\n");
7107
7108 while (size-- > 0)
7109 {
7110 char elem[2];
7111
7112 elem[0] = tohex ((*data >> 4) & 0xf);
7113 elem[1] = tohex (*data++ & 0xf);
7114
7115 buffer_grow (buffer, elem, 2);
7116 }
7117
7118 buffer_grow_str (buffer, "</raw>\n");
7119 }
7120
7121 /* See to_read_btrace target method. */
7122
7123 int
7124 linux_process_target::read_btrace (btrace_target_info *tinfo,
7125 buffer *buffer,
7126 enum btrace_read_type type)
7127 {
7128 struct btrace_data btrace;
7129 enum btrace_error err;
7130
7131 err = linux_read_btrace (&btrace, tinfo, type);
7132 if (err != BTRACE_ERR_NONE)
7133 {
7134 if (err == BTRACE_ERR_OVERFLOW)
7135 buffer_grow_str0 (buffer, "E.Overflow.");
7136 else
7137 buffer_grow_str0 (buffer, "E.Generic Error.");
7138
7139 return -1;
7140 }
7141
7142 switch (btrace.format)
7143 {
7144 case BTRACE_FORMAT_NONE:
7145 buffer_grow_str0 (buffer, "E.No Trace.");
7146 return -1;
7147
7148 case BTRACE_FORMAT_BTS:
7149 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7150 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7151
7152 for (const btrace_block &block : *btrace.variant.bts.blocks)
7153 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7154 paddress (block.begin), paddress (block.end));
7155
7156 buffer_grow_str0 (buffer, "</btrace>\n");
7157 break;
7158
7159 case BTRACE_FORMAT_PT:
7160 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7161 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7162 buffer_grow_str (buffer, "<pt>\n");
7163
7164 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7165
7166 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7167 btrace.variant.pt.size);
7168
7169 buffer_grow_str (buffer, "</pt>\n");
7170 buffer_grow_str0 (buffer, "</btrace>\n");
7171 break;
7172
7173 default:
7174 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7175 return -1;
7176 }
7177
7178 return 0;
7179 }
7180
7181 /* See to_btrace_conf target method. */
7182
7183 int
7184 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7185 buffer *buffer)
7186 {
7187 const struct btrace_config *conf;
7188
7189 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7190 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7191
7192 conf = linux_btrace_conf (tinfo);
7193 if (conf != NULL)
7194 {
7195 switch (conf->format)
7196 {
7197 case BTRACE_FORMAT_NONE:
7198 break;
7199
7200 case BTRACE_FORMAT_BTS:
7201 buffer_xml_printf (buffer, "<bts");
7202 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7203 buffer_xml_printf (buffer, " />\n");
7204 break;
7205
7206 case BTRACE_FORMAT_PT:
7207 buffer_xml_printf (buffer, "<pt");
7208 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7209 buffer_xml_printf (buffer, "/>\n");
7210 break;
7211 }
7212 }
7213
7214 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7215 return 0;
7216 }
7217 #endif /* HAVE_LINUX_BTRACE */
7218
7219 /* See nat/linux-nat.h. */
7220
7221 ptid_t
7222 current_lwp_ptid (void)
7223 {
7224 return ptid_of (current_thread);
7225 }
7226
7227 const char *
7228 linux_process_target::thread_name (ptid_t thread)
7229 {
7230 return linux_proc_tid_get_name (thread);
7231 }
7232
7233 #if USE_THREAD_DB
7234 bool
7235 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7236 int *handle_len)
7237 {
7238 return thread_db_thread_handle (ptid, handle, handle_len);
7239 }
7240 #endif
7241
7242 /* Default implementation of linux_target_ops method "set_pc" for
7243 32-bit pc register which is literally named "pc". */
7244
7245 void
7246 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7247 {
7248 uint32_t newpc = pc;
7249
7250 supply_register_by_name (regcache, "pc", &newpc);
7251 }
7252
7253 /* Default implementation of linux_target_ops method "get_pc" for
7254 32-bit pc register which is literally named "pc". */
7255
7256 CORE_ADDR
7257 linux_get_pc_32bit (struct regcache *regcache)
7258 {
7259 uint32_t pc;
7260
7261 collect_register_by_name (regcache, "pc", &pc);
7262 if (debug_threads)
7263 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7264 return pc;
7265 }
7266
7267 /* Default implementation of linux_target_ops method "set_pc" for
7268 64-bit pc register which is literally named "pc". */
7269
7270 void
7271 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7272 {
7273 uint64_t newpc = pc;
7274
7275 supply_register_by_name (regcache, "pc", &newpc);
7276 }
7277
7278 /* Default implementation of linux_target_ops method "get_pc" for
7279 64-bit pc register which is literally named "pc". */
7280
7281 CORE_ADDR
7282 linux_get_pc_64bit (struct regcache *regcache)
7283 {
7284 uint64_t pc;
7285
7286 collect_register_by_name (regcache, "pc", &pc);
7287 if (debug_threads)
7288 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7289 return pc;
7290 }
7291
7292 /* See linux-low.h. */
7293
7294 int
7295 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7296 {
7297 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7298 int offset = 0;
7299
7300 gdb_assert (wordsize == 4 || wordsize == 8);
7301
7302 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7303 {
7304 if (wordsize == 4)
7305 {
7306 uint32_t *data_p = (uint32_t *) data;
7307 if (data_p[0] == match)
7308 {
7309 *valp = data_p[1];
7310 return 1;
7311 }
7312 }
7313 else
7314 {
7315 uint64_t *data_p = (uint64_t *) data;
7316 if (data_p[0] == match)
7317 {
7318 *valp = data_p[1];
7319 return 1;
7320 }
7321 }
7322
7323 offset += 2 * wordsize;
7324 }
7325
7326 return 0;
7327 }
7328
7329 /* See linux-low.h. */
7330
7331 CORE_ADDR
7332 linux_get_hwcap (int wordsize)
7333 {
7334 CORE_ADDR hwcap = 0;
7335 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7336 return hwcap;
7337 }
7338
7339 /* See linux-low.h. */
7340
7341 CORE_ADDR
7342 linux_get_hwcap2 (int wordsize)
7343 {
7344 CORE_ADDR hwcap2 = 0;
7345 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7346 return hwcap2;
7347 }
7348
7349 #ifdef HAVE_LINUX_REGSETS
7350 void
7351 initialize_regsets_info (struct regsets_info *info)
7352 {
7353 for (info->num_regsets = 0;
7354 info->regsets[info->num_regsets].size >= 0;
7355 info->num_regsets++)
7356 ;
7357 }
7358 #endif
7359
7360 void
7361 initialize_low (void)
7362 {
7363 struct sigaction sigchld_action;
7364
7365 memset (&sigchld_action, 0, sizeof (sigchld_action));
7366 set_target_ops (the_linux_target);
7367
7368 linux_ptrace_init_warnings ();
7369 linux_proc_init_warnings ();
7370
7371 sigchld_action.sa_handler = sigchld_handler;
7372 sigemptyset (&sigchld_action.sa_mask);
7373 sigchld_action.sa_flags = SA_RESTART;
7374 sigaction (SIGCHLD, &sigchld_action, NULL);
7375
7376 initialize_low_arch ();
7377
7378 linux_check_ptrace_features ();
7379 }
This page took 0.203167 seconds and 4 git commands to generate.