755c3e00a9e4aaf056389ce39e0f59de0f5931fc
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void unsuspend_all_lwps (struct lwp_info *except);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
273 static int lwp_is_marked_dead (struct lwp_info *lwp);
274 static int finish_step_over (struct lwp_info *lwp);
275 static int kill_lwp (unsigned long lwpid, int signo);
276 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
277 static int linux_low_ptrace_options (int attached);
278 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
279
280 /* When the event-loop is doing a step-over, this points at the thread
281 being stepped. */
282 ptid_t step_over_bkpt;
283
284 /* True if the low target can hardware single-step. */
285
286 static int
287 can_hardware_single_step (void)
288 {
289 if (the_low_target.supports_hardware_single_step != NULL)
290 return the_low_target.supports_hardware_single_step ();
291 else
292 return 0;
293 }
294
295 bool
296 linux_process_target::low_supports_breakpoints ()
297 {
298 return false;
299 }
300
301 CORE_ADDR
302 linux_process_target::low_get_pc (regcache *regcache)
303 {
304 return 0;
305 }
306
307 void
308 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
309 {
310 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
311 }
312
313 std::vector<CORE_ADDR>
314 linux_process_target::low_get_next_pcs (regcache *regcache)
315 {
316 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
317 "implemented");
318 }
319
320 int
321 linux_process_target::low_decr_pc_after_break ()
322 {
323 return 0;
324 }
325
326 /* Returns true if this target can support fast tracepoints. This
327 does not mean that the in-process agent has been loaded in the
328 inferior. */
329
330 static int
331 supports_fast_tracepoints (void)
332 {
333 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
334 }
335
336 /* True if LWP is stopped in its stepping range. */
337
338 static int
339 lwp_in_step_range (struct lwp_info *lwp)
340 {
341 CORE_ADDR pc = lwp->stop_pc;
342
343 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
344 }
345
346 struct pending_signals
347 {
348 int signal;
349 siginfo_t info;
350 struct pending_signals *prev;
351 };
352
353 /* The read/write ends of the pipe registered as waitable file in the
354 event loop. */
355 static int linux_event_pipe[2] = { -1, -1 };
356
357 /* True if we're currently in async mode. */
358 #define target_is_async_p() (linux_event_pipe[0] != -1)
359
360 static void send_sigstop (struct lwp_info *lwp);
361
362 /* Return non-zero if HEADER is a 64-bit ELF file. */
363
364 static int
365 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
366 {
367 if (header->e_ident[EI_MAG0] == ELFMAG0
368 && header->e_ident[EI_MAG1] == ELFMAG1
369 && header->e_ident[EI_MAG2] == ELFMAG2
370 && header->e_ident[EI_MAG3] == ELFMAG3)
371 {
372 *machine = header->e_machine;
373 return header->e_ident[EI_CLASS] == ELFCLASS64;
374
375 }
376 *machine = EM_NONE;
377 return -1;
378 }
379
380 /* Return non-zero if FILE is a 64-bit ELF file,
381 zero if the file is not a 64-bit ELF file,
382 and -1 if the file is not accessible or doesn't exist. */
383
384 static int
385 elf_64_file_p (const char *file, unsigned int *machine)
386 {
387 Elf64_Ehdr header;
388 int fd;
389
390 fd = open (file, O_RDONLY);
391 if (fd < 0)
392 return -1;
393
394 if (read (fd, &header, sizeof (header)) != sizeof (header))
395 {
396 close (fd);
397 return 0;
398 }
399 close (fd);
400
401 return elf_64_header_p (&header, machine);
402 }
403
404 /* Accepts an integer PID; Returns true if the executable PID is
405 running is a 64-bit ELF file.. */
406
407 int
408 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
409 {
410 char file[PATH_MAX];
411
412 sprintf (file, "/proc/%d/exe", pid);
413 return elf_64_file_p (file, machine);
414 }
415
416 static void
417 delete_lwp (struct lwp_info *lwp)
418 {
419 struct thread_info *thr = get_lwp_thread (lwp);
420
421 if (debug_threads)
422 debug_printf ("deleting %ld\n", lwpid_of (thr));
423
424 remove_thread (thr);
425
426 if (the_low_target.delete_thread != NULL)
427 the_low_target.delete_thread (lwp->arch_private);
428 else
429 gdb_assert (lwp->arch_private == NULL);
430
431 free (lwp);
432 }
433
434 /* Add a process to the common process list, and set its private
435 data. */
436
437 static struct process_info *
438 linux_add_process (int pid, int attached)
439 {
440 struct process_info *proc;
441
442 proc = add_process (pid, attached);
443 proc->priv = XCNEW (struct process_info_private);
444
445 if (the_low_target.new_process != NULL)
446 proc->priv->arch_private = the_low_target.new_process ();
447
448 return proc;
449 }
450
451 void
452 linux_process_target::arch_setup_thread (thread_info *thread)
453 {
454 struct thread_info *saved_thread;
455
456 saved_thread = current_thread;
457 current_thread = thread;
458
459 low_arch_setup ();
460
461 current_thread = saved_thread;
462 }
463
464 int
465 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
466 int wstat)
467 {
468 client_state &cs = get_client_state ();
469 struct lwp_info *event_lwp = *orig_event_lwp;
470 int event = linux_ptrace_get_extended_event (wstat);
471 struct thread_info *event_thr = get_lwp_thread (event_lwp);
472 struct lwp_info *new_lwp;
473
474 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
475
476 /* All extended events we currently use are mid-syscall. Only
477 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
478 you have to be using PTRACE_SEIZE to get that. */
479 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
480
481 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
482 || (event == PTRACE_EVENT_CLONE))
483 {
484 ptid_t ptid;
485 unsigned long new_pid;
486 int ret, status;
487
488 /* Get the pid of the new lwp. */
489 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
490 &new_pid);
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
493 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497
498 ret = my_waitpid (new_pid, &status, __WALL);
499
500 if (ret == -1)
501 perror_with_name ("waiting for new child");
502 else if (ret != new_pid)
503 warning ("wait returned unexpected PID %d", ret);
504 else if (!WIFSTOPPED (status))
505 warning ("wait returned unexpected status 0x%x", status);
506 }
507
508 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
509 {
510 struct process_info *parent_proc;
511 struct process_info *child_proc;
512 struct lwp_info *child_lwp;
513 struct thread_info *child_thr;
514 struct target_desc *tdesc;
515
516 ptid = ptid_t (new_pid, new_pid, 0);
517
518 if (debug_threads)
519 {
520 debug_printf ("HEW: Got fork event from LWP %ld, "
521 "new child is %d\n",
522 ptid_of (event_thr).lwp (),
523 ptid.pid ());
524 }
525
526 /* Add the new process to the tables and clone the breakpoint
527 lists of the parent. We need to do this even if the new process
528 will be detached, since we will need the process object and the
529 breakpoints to remove any breakpoints from memory when we
530 detach, and the client side will access registers. */
531 child_proc = linux_add_process (new_pid, 0);
532 gdb_assert (child_proc != NULL);
533 child_lwp = add_lwp (ptid);
534 gdb_assert (child_lwp != NULL);
535 child_lwp->stopped = 1;
536 child_lwp->must_set_ptrace_flags = 1;
537 child_lwp->status_pending_p = 0;
538 child_thr = get_lwp_thread (child_lwp);
539 child_thr->last_resume_kind = resume_stop;
540 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
541
542 /* If we're suspending all threads, leave this one suspended
543 too. If the fork/clone parent is stepping over a breakpoint,
544 all other threads have been suspended already. Leave the
545 child suspended too. */
546 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
547 || event_lwp->bp_reinsert != 0)
548 {
549 if (debug_threads)
550 debug_printf ("HEW: leaving child suspended\n");
551 child_lwp->suspended = 1;
552 }
553
554 parent_proc = get_thread_process (event_thr);
555 child_proc->attached = parent_proc->attached;
556
557 if (event_lwp->bp_reinsert != 0
558 && supports_software_single_step ()
559 && event == PTRACE_EVENT_VFORK)
560 {
561 /* If we leave single-step breakpoints there, child will
562 hit it, so uninsert single-step breakpoints from parent
563 (and child). Once vfork child is done, reinsert
564 them back to parent. */
565 uninsert_single_step_breakpoints (event_thr);
566 }
567
568 clone_all_breakpoints (child_thr, event_thr);
569
570 tdesc = allocate_target_description ();
571 copy_target_description (tdesc, parent_proc->tdesc);
572 child_proc->tdesc = tdesc;
573
574 /* Clone arch-specific process data. */
575 if (the_low_target.new_fork != NULL)
576 the_low_target.new_fork (parent_proc, child_proc);
577
578 /* Save fork info in the parent thread. */
579 if (event == PTRACE_EVENT_FORK)
580 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
581 else if (event == PTRACE_EVENT_VFORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
583
584 event_lwp->waitstatus.value.related_pid = ptid;
585
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
592 /* Link the threads until the parent event is passed on to
593 higher layers. */
594 event_lwp->fork_relative = child_lwp;
595 child_lwp->fork_relative = event_lwp;
596
597 /* If the parent thread is doing step-over with single-step
598 breakpoints, the list of single-step breakpoints are cloned
599 from the parent's. Remove them from the child process.
600 In case of vfork, we'll reinsert them back once vforked
601 child is done. */
602 if (event_lwp->bp_reinsert != 0
603 && supports_software_single_step ())
604 {
605 /* The child process is forked and stopped, so it is safe
606 to access its memory without stopping all other threads
607 from other processes. */
608 delete_single_step_breakpoints (child_thr);
609
610 gdb_assert (has_single_step_breakpoints (event_thr));
611 gdb_assert (!has_single_step_breakpoints (child_thr));
612 }
613
614 /* Report the event. */
615 return 0;
616 }
617
618 if (debug_threads)
619 debug_printf ("HEW: Got clone event "
620 "from LWP %ld, new child is LWP %ld\n",
621 lwpid_of (event_thr), new_pid);
622
623 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
624 new_lwp = add_lwp (ptid);
625
626 /* Either we're going to immediately resume the new thread
627 or leave it stopped. resume_one_lwp is a nop if it
628 thinks the thread is currently running, so set this first
629 before calling resume_one_lwp. */
630 new_lwp->stopped = 1;
631
632 /* If we're suspending all threads, leave this one suspended
633 too. If the fork/clone parent is stepping over a breakpoint,
634 all other threads have been suspended already. Leave the
635 child suspended too. */
636 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
637 || event_lwp->bp_reinsert != 0)
638 new_lwp->suspended = 1;
639
640 /* Normally we will get the pending SIGSTOP. But in some cases
641 we might get another signal delivered to the group first.
642 If we do get another signal, be sure not to lose it. */
643 if (WSTOPSIG (status) != SIGSTOP)
644 {
645 new_lwp->stop_expected = 1;
646 new_lwp->status_pending_p = 1;
647 new_lwp->status_pending = status;
648 }
649 else if (cs.report_thread_events)
650 {
651 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
652 new_lwp->status_pending_p = 1;
653 new_lwp->status_pending = status;
654 }
655
656 #ifdef USE_THREAD_DB
657 thread_db_notice_clone (event_thr, ptid);
658 #endif
659
660 /* Don't report the event. */
661 return 1;
662 }
663 else if (event == PTRACE_EVENT_VFORK_DONE)
664 {
665 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
666
667 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
668 {
669 reinsert_single_step_breakpoints (event_thr);
670
671 gdb_assert (has_single_step_breakpoints (event_thr));
672 }
673
674 /* Report the event. */
675 return 0;
676 }
677 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
678 {
679 struct process_info *proc;
680 std::vector<int> syscalls_to_catch;
681 ptid_t event_ptid;
682 pid_t event_pid;
683
684 if (debug_threads)
685 {
686 debug_printf ("HEW: Got exec event from LWP %ld\n",
687 lwpid_of (event_thr));
688 }
689
690 /* Get the event ptid. */
691 event_ptid = ptid_of (event_thr);
692 event_pid = event_ptid.pid ();
693
694 /* Save the syscall list from the execing process. */
695 proc = get_thread_process (event_thr);
696 syscalls_to_catch = std::move (proc->syscalls_to_catch);
697
698 /* Delete the execing process and all its threads. */
699 mourn (proc);
700 current_thread = NULL;
701
702 /* Create a new process/lwp/thread. */
703 proc = linux_add_process (event_pid, 0);
704 event_lwp = add_lwp (event_ptid);
705 event_thr = get_lwp_thread (event_lwp);
706 gdb_assert (current_thread == event_thr);
707 arch_setup_thread (event_thr);
708
709 /* Set the event status. */
710 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
711 event_lwp->waitstatus.value.execd_pathname
712 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
713
714 /* Mark the exec status as pending. */
715 event_lwp->stopped = 1;
716 event_lwp->status_pending_p = 1;
717 event_lwp->status_pending = wstat;
718 event_thr->last_resume_kind = resume_continue;
719 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
720
721 /* Update syscall state in the new lwp, effectively mid-syscall too. */
722 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
723
724 /* Restore the list to catch. Don't rely on the client, which is free
725 to avoid sending a new list when the architecture doesn't change.
726 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
727 proc->syscalls_to_catch = std::move (syscalls_to_catch);
728
729 /* Report the event. */
730 *orig_event_lwp = event_lwp;
731 return 0;
732 }
733
734 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
735 }
736
737 CORE_ADDR
738 linux_process_target::get_pc (lwp_info *lwp)
739 {
740 struct thread_info *saved_thread;
741 struct regcache *regcache;
742 CORE_ADDR pc;
743
744 if (!low_supports_breakpoints ())
745 return 0;
746
747 saved_thread = current_thread;
748 current_thread = get_lwp_thread (lwp);
749
750 regcache = get_thread_regcache (current_thread, 1);
751 pc = low_get_pc (regcache);
752
753 if (debug_threads)
754 debug_printf ("pc is 0x%lx\n", (long) pc);
755
756 current_thread = saved_thread;
757 return pc;
758 }
759
760 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
761 Fill *SYSNO with the syscall nr trapped. */
762
763 static void
764 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
765 {
766 struct thread_info *saved_thread;
767 struct regcache *regcache;
768
769 if (the_low_target.get_syscall_trapinfo == NULL)
770 {
771 /* If we cannot get the syscall trapinfo, report an unknown
772 system call number. */
773 *sysno = UNKNOWN_SYSCALL;
774 return;
775 }
776
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
779
780 regcache = get_thread_regcache (current_thread, 1);
781 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
782
783 if (debug_threads)
784 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
785
786 current_thread = saved_thread;
787 }
788
789 bool
790 linux_process_target::save_stop_reason (lwp_info *lwp)
791 {
792 CORE_ADDR pc;
793 CORE_ADDR sw_breakpoint_pc;
794 struct thread_info *saved_thread;
795 #if USE_SIGTRAP_SIGINFO
796 siginfo_t siginfo;
797 #endif
798
799 if (!low_supports_breakpoints ())
800 return false;
801
802 pc = get_pc (lwp);
803 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
804
805 /* breakpoint_at reads from the current thread. */
806 saved_thread = current_thread;
807 current_thread = get_lwp_thread (lwp);
808
809 #if USE_SIGTRAP_SIGINFO
810 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
811 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
812 {
813 if (siginfo.si_signo == SIGTRAP)
814 {
815 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
816 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
817 {
818 /* The si_code is ambiguous on this arch -- check debug
819 registers. */
820 if (!check_stopped_by_watchpoint (lwp))
821 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
822 }
823 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
824 {
825 /* If we determine the LWP stopped for a SW breakpoint,
826 trust it. Particularly don't check watchpoint
827 registers, because at least on s390, we'd find
828 stopped-by-watchpoint as long as there's a watchpoint
829 set. */
830 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
831 }
832 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
833 {
834 /* This can indicate either a hardware breakpoint or
835 hardware watchpoint. Check debug registers. */
836 if (!check_stopped_by_watchpoint (lwp))
837 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
838 }
839 else if (siginfo.si_code == TRAP_TRACE)
840 {
841 /* We may have single stepped an instruction that
842 triggered a watchpoint. In that case, on some
843 architectures (such as x86), instead of TRAP_HWBKPT,
844 si_code indicates TRAP_TRACE, and we need to check
845 the debug registers separately. */
846 if (!check_stopped_by_watchpoint (lwp))
847 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
848 }
849 }
850 }
851 #else
852 /* We may have just stepped a breakpoint instruction. E.g., in
853 non-stop mode, GDB first tells the thread A to step a range, and
854 then the user inserts a breakpoint inside the range. In that
855 case we need to report the breakpoint PC. */
856 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
857 && low_breakpoint_at (sw_breakpoint_pc))
858 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
859
860 if (hardware_breakpoint_inserted_here (pc))
861 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
862
863 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
864 check_stopped_by_watchpoint (lwp);
865 #endif
866
867 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
868 {
869 if (debug_threads)
870 {
871 struct thread_info *thr = get_lwp_thread (lwp);
872
873 debug_printf ("CSBB: %s stopped by software breakpoint\n",
874 target_pid_to_str (ptid_of (thr)));
875 }
876
877 /* Back up the PC if necessary. */
878 if (pc != sw_breakpoint_pc)
879 {
880 struct regcache *regcache
881 = get_thread_regcache (current_thread, 1);
882 low_set_pc (regcache, sw_breakpoint_pc);
883 }
884
885 /* Update this so we record the correct stop PC below. */
886 pc = sw_breakpoint_pc;
887 }
888 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
897 }
898 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
899 {
900 if (debug_threads)
901 {
902 struct thread_info *thr = get_lwp_thread (lwp);
903
904 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
905 target_pid_to_str (ptid_of (thr)));
906 }
907 }
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
909 {
910 if (debug_threads)
911 {
912 struct thread_info *thr = get_lwp_thread (lwp);
913
914 debug_printf ("CSBB: %s stopped by trace\n",
915 target_pid_to_str (ptid_of (thr)));
916 }
917 }
918
919 lwp->stop_pc = pc;
920 current_thread = saved_thread;
921 return true;
922 }
923
924 static struct lwp_info *
925 add_lwp (ptid_t ptid)
926 {
927 struct lwp_info *lwp;
928
929 lwp = XCNEW (struct lwp_info);
930
931 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
932
933 lwp->thread = add_thread (ptid, lwp);
934
935 if (the_low_target.new_thread != NULL)
936 the_low_target.new_thread (lwp);
937
938 return lwp;
939 }
940
941 /* Callback to be used when calling fork_inferior, responsible for
942 actually initiating the tracing of the inferior. */
943
944 static void
945 linux_ptrace_fun ()
946 {
947 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
948 (PTRACE_TYPE_ARG4) 0) < 0)
949 trace_start_error_with_name ("ptrace");
950
951 if (setpgid (0, 0) < 0)
952 trace_start_error_with_name ("setpgid");
953
954 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
955 stdout to stderr so that inferior i/o doesn't corrupt the connection.
956 Also, redirect stdin to /dev/null. */
957 if (remote_connection_is_stdio ())
958 {
959 if (close (0) < 0)
960 trace_start_error_with_name ("close");
961 if (open ("/dev/null", O_RDONLY) < 0)
962 trace_start_error_with_name ("open");
963 if (dup2 (2, 1) < 0)
964 trace_start_error_with_name ("dup2");
965 if (write (2, "stdin/stdout redirected\n",
966 sizeof ("stdin/stdout redirected\n") - 1) < 0)
967 {
968 /* Errors ignored. */;
969 }
970 }
971 }
972
973 /* Start an inferior process and returns its pid.
974 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
975 are its arguments. */
976
977 int
978 linux_process_target::create_inferior (const char *program,
979 const std::vector<char *> &program_args)
980 {
981 client_state &cs = get_client_state ();
982 struct lwp_info *new_lwp;
983 int pid;
984 ptid_t ptid;
985
986 {
987 maybe_disable_address_space_randomization restore_personality
988 (cs.disable_randomization);
989 std::string str_program_args = stringify_argv (program_args);
990
991 pid = fork_inferior (program,
992 str_program_args.c_str (),
993 get_environ ()->envp (), linux_ptrace_fun,
994 NULL, NULL, NULL, NULL);
995 }
996
997 linux_add_process (pid, 0);
998
999 ptid = ptid_t (pid, pid, 0);
1000 new_lwp = add_lwp (ptid);
1001 new_lwp->must_set_ptrace_flags = 1;
1002
1003 post_fork_inferior (pid, program);
1004
1005 return pid;
1006 }
1007
1008 /* Implement the post_create_inferior target_ops method. */
1009
1010 void
1011 linux_process_target::post_create_inferior ()
1012 {
1013 struct lwp_info *lwp = get_thread_lwp (current_thread);
1014
1015 low_arch_setup ();
1016
1017 if (lwp->must_set_ptrace_flags)
1018 {
1019 struct process_info *proc = current_process ();
1020 int options = linux_low_ptrace_options (proc->attached);
1021
1022 linux_enable_event_reporting (lwpid_of (current_thread), options);
1023 lwp->must_set_ptrace_flags = 0;
1024 }
1025 }
1026
1027 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1028 error. */
1029
1030 int
1031 linux_attach_lwp (ptid_t ptid)
1032 {
1033 struct lwp_info *new_lwp;
1034 int lwpid = ptid.lwp ();
1035
1036 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1037 != 0)
1038 return errno;
1039
1040 new_lwp = add_lwp (ptid);
1041
1042 /* We need to wait for SIGSTOP before being able to make the next
1043 ptrace call on this LWP. */
1044 new_lwp->must_set_ptrace_flags = 1;
1045
1046 if (linux_proc_pid_is_stopped (lwpid))
1047 {
1048 if (debug_threads)
1049 debug_printf ("Attached to a stopped process\n");
1050
1051 /* The process is definitely stopped. It is in a job control
1052 stop, unless the kernel predates the TASK_STOPPED /
1053 TASK_TRACED distinction, in which case it might be in a
1054 ptrace stop. Make sure it is in a ptrace stop; from there we
1055 can kill it, signal it, et cetera.
1056
1057 First make sure there is a pending SIGSTOP. Since we are
1058 already attached, the process can not transition from stopped
1059 to running without a PTRACE_CONT; so we know this signal will
1060 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1061 probably already in the queue (unless this kernel is old
1062 enough to use TASK_STOPPED for ptrace stops); but since
1063 SIGSTOP is not an RT signal, it can only be queued once. */
1064 kill_lwp (lwpid, SIGSTOP);
1065
1066 /* Finally, resume the stopped process. This will deliver the
1067 SIGSTOP (or a higher priority signal, just like normal
1068 PTRACE_ATTACH), which we'll catch later on. */
1069 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1070 }
1071
1072 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1073 brings it to a halt.
1074
1075 There are several cases to consider here:
1076
1077 1) gdbserver has already attached to the process and is being notified
1078 of a new thread that is being created.
1079 In this case we should ignore that SIGSTOP and resume the
1080 process. This is handled below by setting stop_expected = 1,
1081 and the fact that add_thread sets last_resume_kind ==
1082 resume_continue.
1083
1084 2) This is the first thread (the process thread), and we're attaching
1085 to it via attach_inferior.
1086 In this case we want the process thread to stop.
1087 This is handled by having linux_attach set last_resume_kind ==
1088 resume_stop after we return.
1089
1090 If the pid we are attaching to is also the tgid, we attach to and
1091 stop all the existing threads. Otherwise, we attach to pid and
1092 ignore any other threads in the same group as this pid.
1093
1094 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1095 existing threads.
1096 In this case we want the thread to stop.
1097 FIXME: This case is currently not properly handled.
1098 We should wait for the SIGSTOP but don't. Things work apparently
1099 because enough time passes between when we ptrace (ATTACH) and when
1100 gdb makes the next ptrace call on the thread.
1101
1102 On the other hand, if we are currently trying to stop all threads, we
1103 should treat the new thread as if we had sent it a SIGSTOP. This works
1104 because we are guaranteed that the add_lwp call above added us to the
1105 end of the list, and so the new thread has not yet reached
1106 wait_for_sigstop (but will). */
1107 new_lwp->stop_expected = 1;
1108
1109 return 0;
1110 }
1111
1112 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1113 already attached. Returns true if a new LWP is found, false
1114 otherwise. */
1115
1116 static int
1117 attach_proc_task_lwp_callback (ptid_t ptid)
1118 {
1119 /* Is this a new thread? */
1120 if (find_thread_ptid (ptid) == NULL)
1121 {
1122 int lwpid = ptid.lwp ();
1123 int err;
1124
1125 if (debug_threads)
1126 debug_printf ("Found new lwp %d\n", lwpid);
1127
1128 err = linux_attach_lwp (ptid);
1129
1130 /* Be quiet if we simply raced with the thread exiting. EPERM
1131 is returned if the thread's task still exists, and is marked
1132 as exited or zombie, as well as other conditions, so in that
1133 case, confirm the status in /proc/PID/status. */
1134 if (err == ESRCH
1135 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1136 {
1137 if (debug_threads)
1138 {
1139 debug_printf ("Cannot attach to lwp %d: "
1140 "thread is gone (%d: %s)\n",
1141 lwpid, err, safe_strerror (err));
1142 }
1143 }
1144 else if (err != 0)
1145 {
1146 std::string reason
1147 = linux_ptrace_attach_fail_reason_string (ptid, err);
1148
1149 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1150 }
1151
1152 return 1;
1153 }
1154 return 0;
1155 }
1156
1157 static void async_file_mark (void);
1158
1159 /* Attach to PID. If PID is the tgid, attach to it and all
1160 of its threads. */
1161
1162 int
1163 linux_process_target::attach (unsigned long pid)
1164 {
1165 struct process_info *proc;
1166 struct thread_info *initial_thread;
1167 ptid_t ptid = ptid_t (pid, pid, 0);
1168 int err;
1169
1170 proc = linux_add_process (pid, 1);
1171
1172 /* Attach to PID. We will check for other threads
1173 soon. */
1174 err = linux_attach_lwp (ptid);
1175 if (err != 0)
1176 {
1177 remove_process (proc);
1178
1179 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1180 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1181 }
1182
1183 /* Don't ignore the initial SIGSTOP if we just attached to this
1184 process. It will be collected by wait shortly. */
1185 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1186 initial_thread->last_resume_kind = resume_stop;
1187
1188 /* We must attach to every LWP. If /proc is mounted, use that to
1189 find them now. On the one hand, the inferior may be using raw
1190 clone instead of using pthreads. On the other hand, even if it
1191 is using pthreads, GDB may not be connected yet (thread_db needs
1192 to do symbol lookups, through qSymbol). Also, thread_db walks
1193 structures in the inferior's address space to find the list of
1194 threads/LWPs, and those structures may well be corrupted. Note
1195 that once thread_db is loaded, we'll still use it to list threads
1196 and associate pthread info with each LWP. */
1197 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1198
1199 /* GDB will shortly read the xml target description for this
1200 process, to figure out the process' architecture. But the target
1201 description is only filled in when the first process/thread in
1202 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1203 that now, otherwise, if GDB is fast enough, it could read the
1204 target description _before_ that initial stop. */
1205 if (non_stop)
1206 {
1207 struct lwp_info *lwp;
1208 int wstat, lwpid;
1209 ptid_t pid_ptid = ptid_t (pid);
1210
1211 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1212 gdb_assert (lwpid > 0);
1213
1214 lwp = find_lwp_pid (ptid_t (lwpid));
1215
1216 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1217 {
1218 lwp->status_pending_p = 1;
1219 lwp->status_pending = wstat;
1220 }
1221
1222 initial_thread->last_resume_kind = resume_continue;
1223
1224 async_file_mark ();
1225
1226 gdb_assert (proc->tdesc != NULL);
1227 }
1228
1229 return 0;
1230 }
1231
1232 static int
1233 last_thread_of_process_p (int pid)
1234 {
1235 bool seen_one = false;
1236
1237 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1238 {
1239 if (!seen_one)
1240 {
1241 /* This is the first thread of this process we see. */
1242 seen_one = true;
1243 return false;
1244 }
1245 else
1246 {
1247 /* This is the second thread of this process we see. */
1248 return true;
1249 }
1250 });
1251
1252 return thread == NULL;
1253 }
1254
1255 /* Kill LWP. */
1256
1257 static void
1258 linux_kill_one_lwp (struct lwp_info *lwp)
1259 {
1260 struct thread_info *thr = get_lwp_thread (lwp);
1261 int pid = lwpid_of (thr);
1262
1263 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1264 there is no signal context, and ptrace(PTRACE_KILL) (or
1265 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1266 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1267 alternative is to kill with SIGKILL. We only need one SIGKILL
1268 per process, not one for each thread. But since we still support
1269 support debugging programs using raw clone without CLONE_THREAD,
1270 we send one for each thread. For years, we used PTRACE_KILL
1271 only, so we're being a bit paranoid about some old kernels where
1272 PTRACE_KILL might work better (dubious if there are any such, but
1273 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1274 second, and so we're fine everywhere. */
1275
1276 errno = 0;
1277 kill_lwp (pid, SIGKILL);
1278 if (debug_threads)
1279 {
1280 int save_errno = errno;
1281
1282 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1283 target_pid_to_str (ptid_of (thr)),
1284 save_errno ? safe_strerror (save_errno) : "OK");
1285 }
1286
1287 errno = 0;
1288 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1289 if (debug_threads)
1290 {
1291 int save_errno = errno;
1292
1293 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1294 target_pid_to_str (ptid_of (thr)),
1295 save_errno ? safe_strerror (save_errno) : "OK");
1296 }
1297 }
1298
1299 /* Kill LWP and wait for it to die. */
1300
1301 static void
1302 kill_wait_lwp (struct lwp_info *lwp)
1303 {
1304 struct thread_info *thr = get_lwp_thread (lwp);
1305 int pid = ptid_of (thr).pid ();
1306 int lwpid = ptid_of (thr).lwp ();
1307 int wstat;
1308 int res;
1309
1310 if (debug_threads)
1311 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1312
1313 do
1314 {
1315 linux_kill_one_lwp (lwp);
1316
1317 /* Make sure it died. Notes:
1318
1319 - The loop is most likely unnecessary.
1320
1321 - We don't use wait_for_event as that could delete lwps
1322 while we're iterating over them. We're not interested in
1323 any pending status at this point, only in making sure all
1324 wait status on the kernel side are collected until the
1325 process is reaped.
1326
1327 - We don't use __WALL here as the __WALL emulation relies on
1328 SIGCHLD, and killing a stopped process doesn't generate
1329 one, nor an exit status.
1330 */
1331 res = my_waitpid (lwpid, &wstat, 0);
1332 if (res == -1 && errno == ECHILD)
1333 res = my_waitpid (lwpid, &wstat, __WCLONE);
1334 } while (res > 0 && WIFSTOPPED (wstat));
1335
1336 /* Even if it was stopped, the child may have already disappeared.
1337 E.g., if it was killed by SIGKILL. */
1338 if (res < 0 && errno != ECHILD)
1339 perror_with_name ("kill_wait_lwp");
1340 }
1341
1342 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1343 except the leader. */
1344
1345 static void
1346 kill_one_lwp_callback (thread_info *thread, int pid)
1347 {
1348 struct lwp_info *lwp = get_thread_lwp (thread);
1349
1350 /* We avoid killing the first thread here, because of a Linux kernel (at
1351 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1352 the children get a chance to be reaped, it will remain a zombie
1353 forever. */
1354
1355 if (lwpid_of (thread) == pid)
1356 {
1357 if (debug_threads)
1358 debug_printf ("lkop: is last of process %s\n",
1359 target_pid_to_str (thread->id));
1360 return;
1361 }
1362
1363 kill_wait_lwp (lwp);
1364 }
1365
1366 int
1367 linux_process_target::kill (process_info *process)
1368 {
1369 int pid = process->pid;
1370
1371 /* If we're killing a running inferior, make sure it is stopped
1372 first, as PTRACE_KILL will not work otherwise. */
1373 stop_all_lwps (0, NULL);
1374
1375 for_each_thread (pid, [&] (thread_info *thread)
1376 {
1377 kill_one_lwp_callback (thread, pid);
1378 });
1379
1380 /* See the comment in linux_kill_one_lwp. We did not kill the first
1381 thread in the list, so do so now. */
1382 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1383
1384 if (lwp == NULL)
1385 {
1386 if (debug_threads)
1387 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1388 pid);
1389 }
1390 else
1391 kill_wait_lwp (lwp);
1392
1393 mourn (process);
1394
1395 /* Since we presently can only stop all lwps of all processes, we
1396 need to unstop lwps of other processes. */
1397 unstop_all_lwps (0, NULL);
1398 return 0;
1399 }
1400
1401 /* Get pending signal of THREAD, for detaching purposes. This is the
1402 signal the thread last stopped for, which we need to deliver to the
1403 thread when detaching, otherwise, it'd be suppressed/lost. */
1404
1405 static int
1406 get_detach_signal (struct thread_info *thread)
1407 {
1408 client_state &cs = get_client_state ();
1409 enum gdb_signal signo = GDB_SIGNAL_0;
1410 int status;
1411 struct lwp_info *lp = get_thread_lwp (thread);
1412
1413 if (lp->status_pending_p)
1414 status = lp->status_pending;
1415 else
1416 {
1417 /* If the thread had been suspended by gdbserver, and it stopped
1418 cleanly, then it'll have stopped with SIGSTOP. But we don't
1419 want to deliver that SIGSTOP. */
1420 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1421 || thread->last_status.value.sig == GDB_SIGNAL_0)
1422 return 0;
1423
1424 /* Otherwise, we may need to deliver the signal we
1425 intercepted. */
1426 status = lp->last_status;
1427 }
1428
1429 if (!WIFSTOPPED (status))
1430 {
1431 if (debug_threads)
1432 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1433 target_pid_to_str (ptid_of (thread)));
1434 return 0;
1435 }
1436
1437 /* Extended wait statuses aren't real SIGTRAPs. */
1438 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1439 {
1440 if (debug_threads)
1441 debug_printf ("GPS: lwp %s had stopped with extended "
1442 "status: no pending signal\n",
1443 target_pid_to_str (ptid_of (thread)));
1444 return 0;
1445 }
1446
1447 signo = gdb_signal_from_host (WSTOPSIG (status));
1448
1449 if (cs.program_signals_p && !cs.program_signals[signo])
1450 {
1451 if (debug_threads)
1452 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1453 target_pid_to_str (ptid_of (thread)),
1454 gdb_signal_to_string (signo));
1455 return 0;
1456 }
1457 else if (!cs.program_signals_p
1458 /* If we have no way to know which signals GDB does not
1459 want to have passed to the program, assume
1460 SIGTRAP/SIGINT, which is GDB's default. */
1461 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had signal %s, "
1465 "but we don't know if we should pass it. "
1466 "Default to not.\n",
1467 target_pid_to_str (ptid_of (thread)),
1468 gdb_signal_to_string (signo));
1469 return 0;
1470 }
1471 else
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1475 target_pid_to_str (ptid_of (thread)),
1476 gdb_signal_to_string (signo));
1477
1478 return WSTOPSIG (status);
1479 }
1480 }
1481
1482 /* Detach from LWP. */
1483
1484 static void
1485 linux_detach_one_lwp (struct lwp_info *lwp)
1486 {
1487 struct thread_info *thread = get_lwp_thread (lwp);
1488 int sig;
1489 int lwpid;
1490
1491 /* If there is a pending SIGSTOP, get rid of it. */
1492 if (lwp->stop_expected)
1493 {
1494 if (debug_threads)
1495 debug_printf ("Sending SIGCONT to %s\n",
1496 target_pid_to_str (ptid_of (thread)));
1497
1498 kill_lwp (lwpid_of (thread), SIGCONT);
1499 lwp->stop_expected = 0;
1500 }
1501
1502 /* Pass on any pending signal for this thread. */
1503 sig = get_detach_signal (thread);
1504
1505 /* Preparing to resume may try to write registers, and fail if the
1506 lwp is zombie. If that happens, ignore the error. We'll handle
1507 it below, when detach fails with ESRCH. */
1508 try
1509 {
1510 /* Flush any pending changes to the process's registers. */
1511 regcache_invalidate_thread (thread);
1512
1513 /* Finally, let it resume. */
1514 if (the_low_target.prepare_to_resume != NULL)
1515 the_low_target.prepare_to_resume (lwp);
1516 }
1517 catch (const gdb_exception_error &ex)
1518 {
1519 if (!check_ptrace_stopped_lwp_gone (lwp))
1520 throw;
1521 }
1522
1523 lwpid = lwpid_of (thread);
1524 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1525 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1526 {
1527 int save_errno = errno;
1528
1529 /* We know the thread exists, so ESRCH must mean the lwp is
1530 zombie. This can happen if one of the already-detached
1531 threads exits the whole thread group. In that case we're
1532 still attached, and must reap the lwp. */
1533 if (save_errno == ESRCH)
1534 {
1535 int ret, status;
1536
1537 ret = my_waitpid (lwpid, &status, __WALL);
1538 if (ret == -1)
1539 {
1540 warning (_("Couldn't reap LWP %d while detaching: %s"),
1541 lwpid, safe_strerror (errno));
1542 }
1543 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1544 {
1545 warning (_("Reaping LWP %d while detaching "
1546 "returned unexpected status 0x%x"),
1547 lwpid, status);
1548 }
1549 }
1550 else
1551 {
1552 error (_("Can't detach %s: %s"),
1553 target_pid_to_str (ptid_of (thread)),
1554 safe_strerror (save_errno));
1555 }
1556 }
1557 else if (debug_threads)
1558 {
1559 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1560 target_pid_to_str (ptid_of (thread)),
1561 strsignal (sig));
1562 }
1563
1564 delete_lwp (lwp);
1565 }
1566
1567 /* Callback for for_each_thread. Detaches from non-leader threads of a
1568 given process. */
1569
1570 static void
1571 linux_detach_lwp_callback (thread_info *thread)
1572 {
1573 /* We don't actually detach from the thread group leader just yet.
1574 If the thread group exits, we must reap the zombie clone lwps
1575 before we're able to reap the leader. */
1576 if (thread->id.pid () == thread->id.lwp ())
1577 return;
1578
1579 lwp_info *lwp = get_thread_lwp (thread);
1580 linux_detach_one_lwp (lwp);
1581 }
1582
1583 int
1584 linux_process_target::detach (process_info *process)
1585 {
1586 struct lwp_info *main_lwp;
1587
1588 /* As there's a step over already in progress, let it finish first,
1589 otherwise nesting a stabilize_threads operation on top gets real
1590 messy. */
1591 complete_ongoing_step_over ();
1592
1593 /* Stop all threads before detaching. First, ptrace requires that
1594 the thread is stopped to successfully detach. Second, thread_db
1595 may need to uninstall thread event breakpoints from memory, which
1596 only works with a stopped process anyway. */
1597 stop_all_lwps (0, NULL);
1598
1599 #ifdef USE_THREAD_DB
1600 thread_db_detach (process);
1601 #endif
1602
1603 /* Stabilize threads (move out of jump pads). */
1604 target_stabilize_threads ();
1605
1606 /* Detach from the clone lwps first. If the thread group exits just
1607 while we're detaching, we must reap the clone lwps before we're
1608 able to reap the leader. */
1609 for_each_thread (process->pid, linux_detach_lwp_callback);
1610
1611 main_lwp = find_lwp_pid (ptid_t (process->pid));
1612 linux_detach_one_lwp (main_lwp);
1613
1614 mourn (process);
1615
1616 /* Since we presently can only stop all lwps of all processes, we
1617 need to unstop lwps of other processes. */
1618 unstop_all_lwps (0, NULL);
1619 return 0;
1620 }
1621
1622 /* Remove all LWPs that belong to process PROC from the lwp list. */
1623
1624 void
1625 linux_process_target::mourn (process_info *process)
1626 {
1627 struct process_info_private *priv;
1628
1629 #ifdef USE_THREAD_DB
1630 thread_db_mourn (process);
1631 #endif
1632
1633 for_each_thread (process->pid, [] (thread_info *thread)
1634 {
1635 delete_lwp (get_thread_lwp (thread));
1636 });
1637
1638 /* Freeing all private data. */
1639 priv = process->priv;
1640 if (the_low_target.delete_process != NULL)
1641 the_low_target.delete_process (priv->arch_private);
1642 else
1643 gdb_assert (priv->arch_private == NULL);
1644 free (priv);
1645 process->priv = NULL;
1646
1647 remove_process (process);
1648 }
1649
1650 void
1651 linux_process_target::join (int pid)
1652 {
1653 int status, ret;
1654
1655 do {
1656 ret = my_waitpid (pid, &status, 0);
1657 if (WIFEXITED (status) || WIFSIGNALED (status))
1658 break;
1659 } while (ret != -1 || errno != ECHILD);
1660 }
1661
1662 /* Return true if the given thread is still alive. */
1663
1664 bool
1665 linux_process_target::thread_alive (ptid_t ptid)
1666 {
1667 struct lwp_info *lwp = find_lwp_pid (ptid);
1668
1669 /* We assume we always know if a thread exits. If a whole process
1670 exited but we still haven't been able to report it to GDB, we'll
1671 hold on to the last lwp of the dead process. */
1672 if (lwp != NULL)
1673 return !lwp_is_marked_dead (lwp);
1674 else
1675 return 0;
1676 }
1677
1678 bool
1679 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1680 {
1681 struct lwp_info *lp = get_thread_lwp (thread);
1682
1683 if (!lp->status_pending_p)
1684 return 0;
1685
1686 if (thread->last_resume_kind != resume_stop
1687 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1688 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1689 {
1690 struct thread_info *saved_thread;
1691 CORE_ADDR pc;
1692 int discard = 0;
1693
1694 gdb_assert (lp->last_status != 0);
1695
1696 pc = get_pc (lp);
1697
1698 saved_thread = current_thread;
1699 current_thread = thread;
1700
1701 if (pc != lp->stop_pc)
1702 {
1703 if (debug_threads)
1704 debug_printf ("PC of %ld changed\n",
1705 lwpid_of (thread));
1706 discard = 1;
1707 }
1708
1709 #if !USE_SIGTRAP_SIGINFO
1710 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1711 && !low_breakpoint_at (pc))
1712 {
1713 if (debug_threads)
1714 debug_printf ("previous SW breakpoint of %ld gone\n",
1715 lwpid_of (thread));
1716 discard = 1;
1717 }
1718 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1719 && !hardware_breakpoint_inserted_here (pc))
1720 {
1721 if (debug_threads)
1722 debug_printf ("previous HW breakpoint of %ld gone\n",
1723 lwpid_of (thread));
1724 discard = 1;
1725 }
1726 #endif
1727
1728 current_thread = saved_thread;
1729
1730 if (discard)
1731 {
1732 if (debug_threads)
1733 debug_printf ("discarding pending breakpoint status\n");
1734 lp->status_pending_p = 0;
1735 return 0;
1736 }
1737 }
1738
1739 return 1;
1740 }
1741
1742 /* Returns true if LWP is resumed from the client's perspective. */
1743
1744 static int
1745 lwp_resumed (struct lwp_info *lwp)
1746 {
1747 struct thread_info *thread = get_lwp_thread (lwp);
1748
1749 if (thread->last_resume_kind != resume_stop)
1750 return 1;
1751
1752 /* Did gdb send us a `vCont;t', but we haven't reported the
1753 corresponding stop to gdb yet? If so, the thread is still
1754 resumed/running from gdb's perspective. */
1755 if (thread->last_resume_kind == resume_stop
1756 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1757 return 1;
1758
1759 return 0;
1760 }
1761
1762 bool
1763 linux_process_target::status_pending_p_callback (thread_info *thread,
1764 ptid_t ptid)
1765 {
1766 struct lwp_info *lp = get_thread_lwp (thread);
1767
1768 /* Check if we're only interested in events from a specific process
1769 or a specific LWP. */
1770 if (!thread->id.matches (ptid))
1771 return 0;
1772
1773 if (!lwp_resumed (lp))
1774 return 0;
1775
1776 if (lp->status_pending_p
1777 && !thread_still_has_status_pending (thread))
1778 {
1779 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1780 return 0;
1781 }
1782
1783 return lp->status_pending_p;
1784 }
1785
1786 struct lwp_info *
1787 find_lwp_pid (ptid_t ptid)
1788 {
1789 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1790 {
1791 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1792 return thr_arg->id.lwp () == lwp;
1793 });
1794
1795 if (thread == NULL)
1796 return NULL;
1797
1798 return get_thread_lwp (thread);
1799 }
1800
1801 /* Return the number of known LWPs in the tgid given by PID. */
1802
1803 static int
1804 num_lwps (int pid)
1805 {
1806 int count = 0;
1807
1808 for_each_thread (pid, [&] (thread_info *thread)
1809 {
1810 count++;
1811 });
1812
1813 return count;
1814 }
1815
1816 /* See nat/linux-nat.h. */
1817
1818 struct lwp_info *
1819 iterate_over_lwps (ptid_t filter,
1820 gdb::function_view<iterate_over_lwps_ftype> callback)
1821 {
1822 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1823 {
1824 lwp_info *lwp = get_thread_lwp (thr_arg);
1825
1826 return callback (lwp);
1827 });
1828
1829 if (thread == NULL)
1830 return NULL;
1831
1832 return get_thread_lwp (thread);
1833 }
1834
1835 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1836 their exits until all other threads in the group have exited. */
1837
1838 static void
1839 check_zombie_leaders (void)
1840 {
1841 for_each_process ([] (process_info *proc) {
1842 pid_t leader_pid = pid_of (proc);
1843 struct lwp_info *leader_lp;
1844
1845 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1846
1847 if (debug_threads)
1848 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1849 "num_lwps=%d, zombie=%d\n",
1850 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1851 linux_proc_pid_is_zombie (leader_pid));
1852
1853 if (leader_lp != NULL && !leader_lp->stopped
1854 /* Check if there are other threads in the group, as we may
1855 have raced with the inferior simply exiting. */
1856 && !last_thread_of_process_p (leader_pid)
1857 && linux_proc_pid_is_zombie (leader_pid))
1858 {
1859 /* A leader zombie can mean one of two things:
1860
1861 - It exited, and there's an exit status pending
1862 available, or only the leader exited (not the whole
1863 program). In the latter case, we can't waitpid the
1864 leader's exit status until all other threads are gone.
1865
1866 - There are 3 or more threads in the group, and a thread
1867 other than the leader exec'd. On an exec, the Linux
1868 kernel destroys all other threads (except the execing
1869 one) in the thread group, and resets the execing thread's
1870 tid to the tgid. No exit notification is sent for the
1871 execing thread -- from the ptracer's perspective, it
1872 appears as though the execing thread just vanishes.
1873 Until we reap all other threads except the leader and the
1874 execing thread, the leader will be zombie, and the
1875 execing thread will be in `D (disc sleep)'. As soon as
1876 all other threads are reaped, the execing thread changes
1877 it's tid to the tgid, and the previous (zombie) leader
1878 vanishes, giving place to the "new" leader. We could try
1879 distinguishing the exit and exec cases, by waiting once
1880 more, and seeing if something comes out, but it doesn't
1881 sound useful. The previous leader _does_ go away, and
1882 we'll re-add the new one once we see the exec event
1883 (which is just the same as what would happen if the
1884 previous leader did exit voluntarily before some other
1885 thread execs). */
1886
1887 if (debug_threads)
1888 debug_printf ("CZL: Thread group leader %d zombie "
1889 "(it exited, or another thread execd).\n",
1890 leader_pid);
1891
1892 delete_lwp (leader_lp);
1893 }
1894 });
1895 }
1896
1897 /* Callback for `find_thread'. Returns the first LWP that is not
1898 stopped. */
1899
1900 static bool
1901 not_stopped_callback (thread_info *thread, ptid_t filter)
1902 {
1903 if (!thread->id.matches (filter))
1904 return false;
1905
1906 lwp_info *lwp = get_thread_lwp (thread);
1907
1908 return !lwp->stopped;
1909 }
1910
1911 /* Increment LWP's suspend count. */
1912
1913 static void
1914 lwp_suspended_inc (struct lwp_info *lwp)
1915 {
1916 lwp->suspended++;
1917
1918 if (debug_threads && lwp->suspended > 4)
1919 {
1920 struct thread_info *thread = get_lwp_thread (lwp);
1921
1922 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1923 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1924 }
1925 }
1926
1927 /* Decrement LWP's suspend count. */
1928
1929 static void
1930 lwp_suspended_decr (struct lwp_info *lwp)
1931 {
1932 lwp->suspended--;
1933
1934 if (lwp->suspended < 0)
1935 {
1936 struct thread_info *thread = get_lwp_thread (lwp);
1937
1938 internal_error (__FILE__, __LINE__,
1939 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1940 lwp->suspended);
1941 }
1942 }
1943
1944 /* This function should only be called if the LWP got a SIGTRAP.
1945
1946 Handle any tracepoint steps or hits. Return true if a tracepoint
1947 event was handled, 0 otherwise. */
1948
1949 static int
1950 handle_tracepoints (struct lwp_info *lwp)
1951 {
1952 struct thread_info *tinfo = get_lwp_thread (lwp);
1953 int tpoint_related_event = 0;
1954
1955 gdb_assert (lwp->suspended == 0);
1956
1957 /* If this tracepoint hit causes a tracing stop, we'll immediately
1958 uninsert tracepoints. To do this, we temporarily pause all
1959 threads, unpatch away, and then unpause threads. We need to make
1960 sure the unpausing doesn't resume LWP too. */
1961 lwp_suspended_inc (lwp);
1962
1963 /* And we need to be sure that any all-threads-stopping doesn't try
1964 to move threads out of the jump pads, as it could deadlock the
1965 inferior (LWP could be in the jump pad, maybe even holding the
1966 lock.) */
1967
1968 /* Do any necessary step collect actions. */
1969 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1970
1971 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1972
1973 /* See if we just hit a tracepoint and do its main collect
1974 actions. */
1975 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1976
1977 lwp_suspended_decr (lwp);
1978
1979 gdb_assert (lwp->suspended == 0);
1980 gdb_assert (!stabilizing_threads
1981 || (lwp->collecting_fast_tracepoint
1982 != fast_tpoint_collect_result::not_collecting));
1983
1984 if (tpoint_related_event)
1985 {
1986 if (debug_threads)
1987 debug_printf ("got a tracepoint event\n");
1988 return 1;
1989 }
1990
1991 return 0;
1992 }
1993
1994 /* Convenience wrapper. Returns information about LWP's fast tracepoint
1995 collection status. */
1996
1997 static fast_tpoint_collect_result
1998 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1999 struct fast_tpoint_collect_status *status)
2000 {
2001 CORE_ADDR thread_area;
2002 struct thread_info *thread = get_lwp_thread (lwp);
2003
2004 if (the_low_target.get_thread_area == NULL)
2005 return fast_tpoint_collect_result::not_collecting;
2006
2007 /* Get the thread area address. This is used to recognize which
2008 thread is which when tracing with the in-process agent library.
2009 We don't read anything from the address, and treat it as opaque;
2010 it's the address itself that we assume is unique per-thread. */
2011 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2012 return fast_tpoint_collect_result::not_collecting;
2013
2014 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2015 }
2016
2017 bool
2018 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2019 {
2020 struct thread_info *saved_thread;
2021
2022 saved_thread = current_thread;
2023 current_thread = get_lwp_thread (lwp);
2024
2025 if ((wstat == NULL
2026 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2027 && supports_fast_tracepoints ()
2028 && agent_loaded_p ())
2029 {
2030 struct fast_tpoint_collect_status status;
2031
2032 if (debug_threads)
2033 debug_printf ("Checking whether LWP %ld needs to move out of the "
2034 "jump pad.\n",
2035 lwpid_of (current_thread));
2036
2037 fast_tpoint_collect_result r
2038 = linux_fast_tracepoint_collecting (lwp, &status);
2039
2040 if (wstat == NULL
2041 || (WSTOPSIG (*wstat) != SIGILL
2042 && WSTOPSIG (*wstat) != SIGFPE
2043 && WSTOPSIG (*wstat) != SIGSEGV
2044 && WSTOPSIG (*wstat) != SIGBUS))
2045 {
2046 lwp->collecting_fast_tracepoint = r;
2047
2048 if (r != fast_tpoint_collect_result::not_collecting)
2049 {
2050 if (r == fast_tpoint_collect_result::before_insn
2051 && lwp->exit_jump_pad_bkpt == NULL)
2052 {
2053 /* Haven't executed the original instruction yet.
2054 Set breakpoint there, and wait till it's hit,
2055 then single-step until exiting the jump pad. */
2056 lwp->exit_jump_pad_bkpt
2057 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2058 }
2059
2060 if (debug_threads)
2061 debug_printf ("Checking whether LWP %ld needs to move out of "
2062 "the jump pad...it does\n",
2063 lwpid_of (current_thread));
2064 current_thread = saved_thread;
2065
2066 return true;
2067 }
2068 }
2069 else
2070 {
2071 /* If we get a synchronous signal while collecting, *and*
2072 while executing the (relocated) original instruction,
2073 reset the PC to point at the tpoint address, before
2074 reporting to GDB. Otherwise, it's an IPA lib bug: just
2075 report the signal to GDB, and pray for the best. */
2076
2077 lwp->collecting_fast_tracepoint
2078 = fast_tpoint_collect_result::not_collecting;
2079
2080 if (r != fast_tpoint_collect_result::not_collecting
2081 && (status.adjusted_insn_addr <= lwp->stop_pc
2082 && lwp->stop_pc < status.adjusted_insn_addr_end))
2083 {
2084 siginfo_t info;
2085 struct regcache *regcache;
2086
2087 /* The si_addr on a few signals references the address
2088 of the faulting instruction. Adjust that as
2089 well. */
2090 if ((WSTOPSIG (*wstat) == SIGILL
2091 || WSTOPSIG (*wstat) == SIGFPE
2092 || WSTOPSIG (*wstat) == SIGBUS
2093 || WSTOPSIG (*wstat) == SIGSEGV)
2094 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2095 (PTRACE_TYPE_ARG3) 0, &info) == 0
2096 /* Final check just to make sure we don't clobber
2097 the siginfo of non-kernel-sent signals. */
2098 && (uintptr_t) info.si_addr == lwp->stop_pc)
2099 {
2100 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2101 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2102 (PTRACE_TYPE_ARG3) 0, &info);
2103 }
2104
2105 regcache = get_thread_regcache (current_thread, 1);
2106 low_set_pc (regcache, status.tpoint_addr);
2107 lwp->stop_pc = status.tpoint_addr;
2108
2109 /* Cancel any fast tracepoint lock this thread was
2110 holding. */
2111 force_unlock_trace_buffer ();
2112 }
2113
2114 if (lwp->exit_jump_pad_bkpt != NULL)
2115 {
2116 if (debug_threads)
2117 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2118 "stopping all threads momentarily.\n");
2119
2120 stop_all_lwps (1, lwp);
2121
2122 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2123 lwp->exit_jump_pad_bkpt = NULL;
2124
2125 unstop_all_lwps (1, lwp);
2126
2127 gdb_assert (lwp->suspended >= 0);
2128 }
2129 }
2130 }
2131
2132 if (debug_threads)
2133 debug_printf ("Checking whether LWP %ld needs to move out of the "
2134 "jump pad...no\n",
2135 lwpid_of (current_thread));
2136
2137 current_thread = saved_thread;
2138 return false;
2139 }
2140
2141 /* Enqueue one signal in the "signals to report later when out of the
2142 jump pad" list. */
2143
2144 static void
2145 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2146 {
2147 struct pending_signals *p_sig;
2148 struct thread_info *thread = get_lwp_thread (lwp);
2149
2150 if (debug_threads)
2151 debug_printf ("Deferring signal %d for LWP %ld.\n",
2152 WSTOPSIG (*wstat), lwpid_of (thread));
2153
2154 if (debug_threads)
2155 {
2156 struct pending_signals *sig;
2157
2158 for (sig = lwp->pending_signals_to_report;
2159 sig != NULL;
2160 sig = sig->prev)
2161 debug_printf (" Already queued %d\n",
2162 sig->signal);
2163
2164 debug_printf (" (no more currently queued signals)\n");
2165 }
2166
2167 /* Don't enqueue non-RT signals if they are already in the deferred
2168 queue. (SIGSTOP being the easiest signal to see ending up here
2169 twice) */
2170 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2171 {
2172 struct pending_signals *sig;
2173
2174 for (sig = lwp->pending_signals_to_report;
2175 sig != NULL;
2176 sig = sig->prev)
2177 {
2178 if (sig->signal == WSTOPSIG (*wstat))
2179 {
2180 if (debug_threads)
2181 debug_printf ("Not requeuing already queued non-RT signal %d"
2182 " for LWP %ld\n",
2183 sig->signal,
2184 lwpid_of (thread));
2185 return;
2186 }
2187 }
2188 }
2189
2190 p_sig = XCNEW (struct pending_signals);
2191 p_sig->prev = lwp->pending_signals_to_report;
2192 p_sig->signal = WSTOPSIG (*wstat);
2193
2194 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2195 &p_sig->info);
2196
2197 lwp->pending_signals_to_report = p_sig;
2198 }
2199
2200 /* Dequeue one signal from the "signals to report later when out of
2201 the jump pad" list. */
2202
2203 static int
2204 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2205 {
2206 struct thread_info *thread = get_lwp_thread (lwp);
2207
2208 if (lwp->pending_signals_to_report != NULL)
2209 {
2210 struct pending_signals **p_sig;
2211
2212 p_sig = &lwp->pending_signals_to_report;
2213 while ((*p_sig)->prev != NULL)
2214 p_sig = &(*p_sig)->prev;
2215
2216 *wstat = W_STOPCODE ((*p_sig)->signal);
2217 if ((*p_sig)->info.si_signo != 0)
2218 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2219 &(*p_sig)->info);
2220 free (*p_sig);
2221 *p_sig = NULL;
2222
2223 if (debug_threads)
2224 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2225 WSTOPSIG (*wstat), lwpid_of (thread));
2226
2227 if (debug_threads)
2228 {
2229 struct pending_signals *sig;
2230
2231 for (sig = lwp->pending_signals_to_report;
2232 sig != NULL;
2233 sig = sig->prev)
2234 debug_printf (" Still queued %d\n",
2235 sig->signal);
2236
2237 debug_printf (" (no more queued signals)\n");
2238 }
2239
2240 return 1;
2241 }
2242
2243 return 0;
2244 }
2245
2246 bool
2247 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2248 {
2249 struct thread_info *saved_thread = current_thread;
2250 current_thread = get_lwp_thread (child);
2251
2252 if (low_stopped_by_watchpoint ())
2253 {
2254 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2255 child->stopped_data_address = low_stopped_data_address ();
2256 }
2257
2258 current_thread = saved_thread;
2259
2260 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2261 }
2262
2263 bool
2264 linux_process_target::low_stopped_by_watchpoint ()
2265 {
2266 return false;
2267 }
2268
2269 CORE_ADDR
2270 linux_process_target::low_stopped_data_address ()
2271 {
2272 return 0;
2273 }
2274
2275 /* Return the ptrace options that we want to try to enable. */
2276
2277 static int
2278 linux_low_ptrace_options (int attached)
2279 {
2280 client_state &cs = get_client_state ();
2281 int options = 0;
2282
2283 if (!attached)
2284 options |= PTRACE_O_EXITKILL;
2285
2286 if (cs.report_fork_events)
2287 options |= PTRACE_O_TRACEFORK;
2288
2289 if (cs.report_vfork_events)
2290 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2291
2292 if (cs.report_exec_events)
2293 options |= PTRACE_O_TRACEEXEC;
2294
2295 options |= PTRACE_O_TRACESYSGOOD;
2296
2297 return options;
2298 }
2299
2300 lwp_info *
2301 linux_process_target::filter_event (int lwpid, int wstat)
2302 {
2303 client_state &cs = get_client_state ();
2304 struct lwp_info *child;
2305 struct thread_info *thread;
2306 int have_stop_pc = 0;
2307
2308 child = find_lwp_pid (ptid_t (lwpid));
2309
2310 /* Check for stop events reported by a process we didn't already
2311 know about - anything not already in our LWP list.
2312
2313 If we're expecting to receive stopped processes after
2314 fork, vfork, and clone events, then we'll just add the
2315 new one to our list and go back to waiting for the event
2316 to be reported - the stopped process might be returned
2317 from waitpid before or after the event is.
2318
2319 But note the case of a non-leader thread exec'ing after the
2320 leader having exited, and gone from our lists (because
2321 check_zombie_leaders deleted it). The non-leader thread
2322 changes its tid to the tgid. */
2323
2324 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2325 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2326 {
2327 ptid_t child_ptid;
2328
2329 /* A multi-thread exec after we had seen the leader exiting. */
2330 if (debug_threads)
2331 {
2332 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2333 "after exec.\n", lwpid);
2334 }
2335
2336 child_ptid = ptid_t (lwpid, lwpid, 0);
2337 child = add_lwp (child_ptid);
2338 child->stopped = 1;
2339 current_thread = child->thread;
2340 }
2341
2342 /* If we didn't find a process, one of two things presumably happened:
2343 - A process we started and then detached from has exited. Ignore it.
2344 - A process we are controlling has forked and the new child's stop
2345 was reported to us by the kernel. Save its PID. */
2346 if (child == NULL && WIFSTOPPED (wstat))
2347 {
2348 add_to_pid_list (&stopped_pids, lwpid, wstat);
2349 return NULL;
2350 }
2351 else if (child == NULL)
2352 return NULL;
2353
2354 thread = get_lwp_thread (child);
2355
2356 child->stopped = 1;
2357
2358 child->last_status = wstat;
2359
2360 /* Check if the thread has exited. */
2361 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2362 {
2363 if (debug_threads)
2364 debug_printf ("LLFE: %d exited.\n", lwpid);
2365
2366 if (finish_step_over (child))
2367 {
2368 /* Unsuspend all other LWPs, and set them back running again. */
2369 unsuspend_all_lwps (child);
2370 }
2371
2372 /* If there is at least one more LWP, then the exit signal was
2373 not the end of the debugged application and should be
2374 ignored, unless GDB wants to hear about thread exits. */
2375 if (cs.report_thread_events
2376 || last_thread_of_process_p (pid_of (thread)))
2377 {
2378 /* Since events are serialized to GDB core, and we can't
2379 report this one right now. Leave the status pending for
2380 the next time we're able to report it. */
2381 mark_lwp_dead (child, wstat);
2382 return child;
2383 }
2384 else
2385 {
2386 delete_lwp (child);
2387 return NULL;
2388 }
2389 }
2390
2391 gdb_assert (WIFSTOPPED (wstat));
2392
2393 if (WIFSTOPPED (wstat))
2394 {
2395 struct process_info *proc;
2396
2397 /* Architecture-specific setup after inferior is running. */
2398 proc = find_process_pid (pid_of (thread));
2399 if (proc->tdesc == NULL)
2400 {
2401 if (proc->attached)
2402 {
2403 /* This needs to happen after we have attached to the
2404 inferior and it is stopped for the first time, but
2405 before we access any inferior registers. */
2406 arch_setup_thread (thread);
2407 }
2408 else
2409 {
2410 /* The process is started, but GDBserver will do
2411 architecture-specific setup after the program stops at
2412 the first instruction. */
2413 child->status_pending_p = 1;
2414 child->status_pending = wstat;
2415 return child;
2416 }
2417 }
2418 }
2419
2420 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2421 {
2422 struct process_info *proc = find_process_pid (pid_of (thread));
2423 int options = linux_low_ptrace_options (proc->attached);
2424
2425 linux_enable_event_reporting (lwpid, options);
2426 child->must_set_ptrace_flags = 0;
2427 }
2428
2429 /* Always update syscall_state, even if it will be filtered later. */
2430 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2431 {
2432 child->syscall_state
2433 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2434 ? TARGET_WAITKIND_SYSCALL_RETURN
2435 : TARGET_WAITKIND_SYSCALL_ENTRY);
2436 }
2437 else
2438 {
2439 /* Almost all other ptrace-stops are known to be outside of system
2440 calls, with further exceptions in handle_extended_wait. */
2441 child->syscall_state = TARGET_WAITKIND_IGNORE;
2442 }
2443
2444 /* Be careful to not overwrite stop_pc until save_stop_reason is
2445 called. */
2446 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2447 && linux_is_extended_waitstatus (wstat))
2448 {
2449 child->stop_pc = get_pc (child);
2450 if (handle_extended_wait (&child, wstat))
2451 {
2452 /* The event has been handled, so just return without
2453 reporting it. */
2454 return NULL;
2455 }
2456 }
2457
2458 if (linux_wstatus_maybe_breakpoint (wstat))
2459 {
2460 if (save_stop_reason (child))
2461 have_stop_pc = 1;
2462 }
2463
2464 if (!have_stop_pc)
2465 child->stop_pc = get_pc (child);
2466
2467 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2468 && child->stop_expected)
2469 {
2470 if (debug_threads)
2471 debug_printf ("Expected stop.\n");
2472 child->stop_expected = 0;
2473
2474 if (thread->last_resume_kind == resume_stop)
2475 {
2476 /* We want to report the stop to the core. Treat the
2477 SIGSTOP as a normal event. */
2478 if (debug_threads)
2479 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2480 target_pid_to_str (ptid_of (thread)));
2481 }
2482 else if (stopping_threads != NOT_STOPPING_THREADS)
2483 {
2484 /* Stopping threads. We don't want this SIGSTOP to end up
2485 pending. */
2486 if (debug_threads)
2487 debug_printf ("LLW: SIGSTOP caught for %s "
2488 "while stopping threads.\n",
2489 target_pid_to_str (ptid_of (thread)));
2490 return NULL;
2491 }
2492 else
2493 {
2494 /* This is a delayed SIGSTOP. Filter out the event. */
2495 if (debug_threads)
2496 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2497 child->stepping ? "step" : "continue",
2498 target_pid_to_str (ptid_of (thread)));
2499
2500 resume_one_lwp (child, child->stepping, 0, NULL);
2501 return NULL;
2502 }
2503 }
2504
2505 child->status_pending_p = 1;
2506 child->status_pending = wstat;
2507 return child;
2508 }
2509
2510 /* Return true if THREAD is doing hardware single step. */
2511
2512 static int
2513 maybe_hw_step (struct thread_info *thread)
2514 {
2515 if (can_hardware_single_step ())
2516 return 1;
2517 else
2518 {
2519 /* GDBserver must insert single-step breakpoint for software
2520 single step. */
2521 gdb_assert (has_single_step_breakpoints (thread));
2522 return 0;
2523 }
2524 }
2525
2526 void
2527 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2528 {
2529 struct lwp_info *lp = get_thread_lwp (thread);
2530
2531 if (lp->stopped
2532 && !lp->suspended
2533 && !lp->status_pending_p
2534 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2535 {
2536 int step = 0;
2537
2538 if (thread->last_resume_kind == resume_step)
2539 step = maybe_hw_step (thread);
2540
2541 if (debug_threads)
2542 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2543 target_pid_to_str (ptid_of (thread)),
2544 paddress (lp->stop_pc),
2545 step);
2546
2547 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2548 }
2549 }
2550
2551 int
2552 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2553 ptid_t filter_ptid,
2554 int *wstatp, int options)
2555 {
2556 struct thread_info *event_thread;
2557 struct lwp_info *event_child, *requested_child;
2558 sigset_t block_mask, prev_mask;
2559
2560 retry:
2561 /* N.B. event_thread points to the thread_info struct that contains
2562 event_child. Keep them in sync. */
2563 event_thread = NULL;
2564 event_child = NULL;
2565 requested_child = NULL;
2566
2567 /* Check for a lwp with a pending status. */
2568
2569 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2570 {
2571 event_thread = find_thread_in_random ([&] (thread_info *thread)
2572 {
2573 return status_pending_p_callback (thread, filter_ptid);
2574 });
2575
2576 if (event_thread != NULL)
2577 event_child = get_thread_lwp (event_thread);
2578 if (debug_threads && event_thread)
2579 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2580 }
2581 else if (filter_ptid != null_ptid)
2582 {
2583 requested_child = find_lwp_pid (filter_ptid);
2584
2585 if (stopping_threads == NOT_STOPPING_THREADS
2586 && requested_child->status_pending_p
2587 && (requested_child->collecting_fast_tracepoint
2588 != fast_tpoint_collect_result::not_collecting))
2589 {
2590 enqueue_one_deferred_signal (requested_child,
2591 &requested_child->status_pending);
2592 requested_child->status_pending_p = 0;
2593 requested_child->status_pending = 0;
2594 resume_one_lwp (requested_child, 0, 0, NULL);
2595 }
2596
2597 if (requested_child->suspended
2598 && requested_child->status_pending_p)
2599 {
2600 internal_error (__FILE__, __LINE__,
2601 "requesting an event out of a"
2602 " suspended child?");
2603 }
2604
2605 if (requested_child->status_pending_p)
2606 {
2607 event_child = requested_child;
2608 event_thread = get_lwp_thread (event_child);
2609 }
2610 }
2611
2612 if (event_child != NULL)
2613 {
2614 if (debug_threads)
2615 debug_printf ("Got an event from pending child %ld (%04x)\n",
2616 lwpid_of (event_thread), event_child->status_pending);
2617 *wstatp = event_child->status_pending;
2618 event_child->status_pending_p = 0;
2619 event_child->status_pending = 0;
2620 current_thread = event_thread;
2621 return lwpid_of (event_thread);
2622 }
2623
2624 /* But if we don't find a pending event, we'll have to wait.
2625
2626 We only enter this loop if no process has a pending wait status.
2627 Thus any action taken in response to a wait status inside this
2628 loop is responding as soon as we detect the status, not after any
2629 pending events. */
2630
2631 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2632 all signals while here. */
2633 sigfillset (&block_mask);
2634 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2635
2636 /* Always pull all events out of the kernel. We'll randomly select
2637 an event LWP out of all that have events, to prevent
2638 starvation. */
2639 while (event_child == NULL)
2640 {
2641 pid_t ret = 0;
2642
2643 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2644 quirks:
2645
2646 - If the thread group leader exits while other threads in the
2647 thread group still exist, waitpid(TGID, ...) hangs. That
2648 waitpid won't return an exit status until the other threads
2649 in the group are reaped.
2650
2651 - When a non-leader thread execs, that thread just vanishes
2652 without reporting an exit (so we'd hang if we waited for it
2653 explicitly in that case). The exec event is reported to
2654 the TGID pid. */
2655 errno = 0;
2656 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2657
2658 if (debug_threads)
2659 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2660 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2661
2662 if (ret > 0)
2663 {
2664 if (debug_threads)
2665 {
2666 debug_printf ("LLW: waitpid %ld received %s\n",
2667 (long) ret, status_to_str (*wstatp));
2668 }
2669
2670 /* Filter all events. IOW, leave all events pending. We'll
2671 randomly select an event LWP out of all that have events
2672 below. */
2673 filter_event (ret, *wstatp);
2674 /* Retry until nothing comes out of waitpid. A single
2675 SIGCHLD can indicate more than one child stopped. */
2676 continue;
2677 }
2678
2679 /* Now that we've pulled all events out of the kernel, resume
2680 LWPs that don't have an interesting event to report. */
2681 if (stopping_threads == NOT_STOPPING_THREADS)
2682 for_each_thread ([this] (thread_info *thread)
2683 {
2684 resume_stopped_resumed_lwps (thread);
2685 });
2686
2687 /* ... and find an LWP with a status to report to the core, if
2688 any. */
2689 event_thread = find_thread_in_random ([&] (thread_info *thread)
2690 {
2691 return status_pending_p_callback (thread, filter_ptid);
2692 });
2693
2694 if (event_thread != NULL)
2695 {
2696 event_child = get_thread_lwp (event_thread);
2697 *wstatp = event_child->status_pending;
2698 event_child->status_pending_p = 0;
2699 event_child->status_pending = 0;
2700 break;
2701 }
2702
2703 /* Check for zombie thread group leaders. Those can't be reaped
2704 until all other threads in the thread group are. */
2705 check_zombie_leaders ();
2706
2707 auto not_stopped = [&] (thread_info *thread)
2708 {
2709 return not_stopped_callback (thread, wait_ptid);
2710 };
2711
2712 /* If there are no resumed children left in the set of LWPs we
2713 want to wait for, bail. We can't just block in
2714 waitpid/sigsuspend, because lwps might have been left stopped
2715 in trace-stop state, and we'd be stuck forever waiting for
2716 their status to change (which would only happen if we resumed
2717 them). Even if WNOHANG is set, this return code is preferred
2718 over 0 (below), as it is more detailed. */
2719 if (find_thread (not_stopped) == NULL)
2720 {
2721 if (debug_threads)
2722 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2723 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2724 return -1;
2725 }
2726
2727 /* No interesting event to report to the caller. */
2728 if ((options & WNOHANG))
2729 {
2730 if (debug_threads)
2731 debug_printf ("WNOHANG set, no event found\n");
2732
2733 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2734 return 0;
2735 }
2736
2737 /* Block until we get an event reported with SIGCHLD. */
2738 if (debug_threads)
2739 debug_printf ("sigsuspend'ing\n");
2740
2741 sigsuspend (&prev_mask);
2742 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2743 goto retry;
2744 }
2745
2746 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2747
2748 current_thread = event_thread;
2749
2750 return lwpid_of (event_thread);
2751 }
2752
2753 int
2754 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2755 {
2756 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2757 }
2758
2759 /* Select one LWP out of those that have events pending. */
2760
2761 static void
2762 select_event_lwp (struct lwp_info **orig_lp)
2763 {
2764 struct thread_info *event_thread = NULL;
2765
2766 /* In all-stop, give preference to the LWP that is being
2767 single-stepped. There will be at most one, and it's the LWP that
2768 the core is most interested in. If we didn't do this, then we'd
2769 have to handle pending step SIGTRAPs somehow in case the core
2770 later continues the previously-stepped thread, otherwise we'd
2771 report the pending SIGTRAP, and the core, not having stepped the
2772 thread, wouldn't understand what the trap was for, and therefore
2773 would report it to the user as a random signal. */
2774 if (!non_stop)
2775 {
2776 event_thread = find_thread ([] (thread_info *thread)
2777 {
2778 lwp_info *lp = get_thread_lwp (thread);
2779
2780 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2781 && thread->last_resume_kind == resume_step
2782 && lp->status_pending_p);
2783 });
2784
2785 if (event_thread != NULL)
2786 {
2787 if (debug_threads)
2788 debug_printf ("SEL: Select single-step %s\n",
2789 target_pid_to_str (ptid_of (event_thread)));
2790 }
2791 }
2792 if (event_thread == NULL)
2793 {
2794 /* No single-stepping LWP. Select one at random, out of those
2795 which have had events. */
2796
2797 event_thread = find_thread_in_random ([&] (thread_info *thread)
2798 {
2799 lwp_info *lp = get_thread_lwp (thread);
2800
2801 /* Only resumed LWPs that have an event pending. */
2802 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2803 && lp->status_pending_p);
2804 });
2805 }
2806
2807 if (event_thread != NULL)
2808 {
2809 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2810
2811 /* Switch the event LWP. */
2812 *orig_lp = event_lp;
2813 }
2814 }
2815
2816 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2817 NULL. */
2818
2819 static void
2820 unsuspend_all_lwps (struct lwp_info *except)
2821 {
2822 for_each_thread ([&] (thread_info *thread)
2823 {
2824 lwp_info *lwp = get_thread_lwp (thread);
2825
2826 if (lwp != except)
2827 lwp_suspended_decr (lwp);
2828 });
2829 }
2830
2831 static bool stuck_in_jump_pad_callback (thread_info *thread);
2832 static bool lwp_running (thread_info *thread);
2833
2834 /* Stabilize threads (move out of jump pads).
2835
2836 If a thread is midway collecting a fast tracepoint, we need to
2837 finish the collection and move it out of the jump pad before
2838 reporting the signal.
2839
2840 This avoids recursion while collecting (when a signal arrives
2841 midway, and the signal handler itself collects), which would trash
2842 the trace buffer. In case the user set a breakpoint in a signal
2843 handler, this avoids the backtrace showing the jump pad, etc..
2844 Most importantly, there are certain things we can't do safely if
2845 threads are stopped in a jump pad (or in its callee's). For
2846 example:
2847
2848 - starting a new trace run. A thread still collecting the
2849 previous run, could trash the trace buffer when resumed. The trace
2850 buffer control structures would have been reset but the thread had
2851 no way to tell. The thread could even midway memcpy'ing to the
2852 buffer, which would mean that when resumed, it would clobber the
2853 trace buffer that had been set for a new run.
2854
2855 - we can't rewrite/reuse the jump pads for new tracepoints
2856 safely. Say you do tstart while a thread is stopped midway while
2857 collecting. When the thread is later resumed, it finishes the
2858 collection, and returns to the jump pad, to execute the original
2859 instruction that was under the tracepoint jump at the time the
2860 older run had been started. If the jump pad had been rewritten
2861 since for something else in the new run, the thread would now
2862 execute the wrong / random instructions. */
2863
2864 void
2865 linux_process_target::stabilize_threads ()
2866 {
2867 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2868
2869 if (thread_stuck != NULL)
2870 {
2871 if (debug_threads)
2872 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2873 lwpid_of (thread_stuck));
2874 return;
2875 }
2876
2877 thread_info *saved_thread = current_thread;
2878
2879 stabilizing_threads = 1;
2880
2881 /* Kick 'em all. */
2882 for_each_thread ([this] (thread_info *thread)
2883 {
2884 move_out_of_jump_pad (thread);
2885 });
2886
2887 /* Loop until all are stopped out of the jump pads. */
2888 while (find_thread (lwp_running) != NULL)
2889 {
2890 struct target_waitstatus ourstatus;
2891 struct lwp_info *lwp;
2892 int wstat;
2893
2894 /* Note that we go through the full wait even loop. While
2895 moving threads out of jump pad, we need to be able to step
2896 over internal breakpoints and such. */
2897 wait_1 (minus_one_ptid, &ourstatus, 0);
2898
2899 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2900 {
2901 lwp = get_thread_lwp (current_thread);
2902
2903 /* Lock it. */
2904 lwp_suspended_inc (lwp);
2905
2906 if (ourstatus.value.sig != GDB_SIGNAL_0
2907 || current_thread->last_resume_kind == resume_stop)
2908 {
2909 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2910 enqueue_one_deferred_signal (lwp, &wstat);
2911 }
2912 }
2913 }
2914
2915 unsuspend_all_lwps (NULL);
2916
2917 stabilizing_threads = 0;
2918
2919 current_thread = saved_thread;
2920
2921 if (debug_threads)
2922 {
2923 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2924
2925 if (thread_stuck != NULL)
2926 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2927 lwpid_of (thread_stuck));
2928 }
2929 }
2930
2931 /* Convenience function that is called when the kernel reports an
2932 event that is not passed out to GDB. */
2933
2934 static ptid_t
2935 ignore_event (struct target_waitstatus *ourstatus)
2936 {
2937 /* If we got an event, there may still be others, as a single
2938 SIGCHLD can indicate more than one child stopped. This forces
2939 another target_wait call. */
2940 async_file_mark ();
2941
2942 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2943 return null_ptid;
2944 }
2945
2946 /* Convenience function that is called when the kernel reports an exit
2947 event. This decides whether to report the event to GDB as a
2948 process exit event, a thread exit event, or to suppress the
2949 event. */
2950
2951 static ptid_t
2952 filter_exit_event (struct lwp_info *event_child,
2953 struct target_waitstatus *ourstatus)
2954 {
2955 client_state &cs = get_client_state ();
2956 struct thread_info *thread = get_lwp_thread (event_child);
2957 ptid_t ptid = ptid_of (thread);
2958
2959 if (!last_thread_of_process_p (pid_of (thread)))
2960 {
2961 if (cs.report_thread_events)
2962 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2963 else
2964 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2965
2966 delete_lwp (event_child);
2967 }
2968 return ptid;
2969 }
2970
2971 /* Returns 1 if GDB is interested in any event_child syscalls. */
2972
2973 static int
2974 gdb_catching_syscalls_p (struct lwp_info *event_child)
2975 {
2976 struct thread_info *thread = get_lwp_thread (event_child);
2977 struct process_info *proc = get_thread_process (thread);
2978
2979 return !proc->syscalls_to_catch.empty ();
2980 }
2981
2982 /* Returns 1 if GDB is interested in the event_child syscall.
2983 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2984
2985 static int
2986 gdb_catch_this_syscall_p (struct lwp_info *event_child)
2987 {
2988 int sysno;
2989 struct thread_info *thread = get_lwp_thread (event_child);
2990 struct process_info *proc = get_thread_process (thread);
2991
2992 if (proc->syscalls_to_catch.empty ())
2993 return 0;
2994
2995 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2996 return 1;
2997
2998 get_syscall_trapinfo (event_child, &sysno);
2999
3000 for (int iter : proc->syscalls_to_catch)
3001 if (iter == sysno)
3002 return 1;
3003
3004 return 0;
3005 }
3006
3007 ptid_t
3008 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3009 int target_options)
3010 {
3011 client_state &cs = get_client_state ();
3012 int w;
3013 struct lwp_info *event_child;
3014 int options;
3015 int pid;
3016 int step_over_finished;
3017 int bp_explains_trap;
3018 int maybe_internal_trap;
3019 int report_to_gdb;
3020 int trace_event;
3021 int in_step_range;
3022 int any_resumed;
3023
3024 if (debug_threads)
3025 {
3026 debug_enter ();
3027 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3028 }
3029
3030 /* Translate generic target options into linux options. */
3031 options = __WALL;
3032 if (target_options & TARGET_WNOHANG)
3033 options |= WNOHANG;
3034
3035 bp_explains_trap = 0;
3036 trace_event = 0;
3037 in_step_range = 0;
3038 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3039
3040 auto status_pending_p_any = [&] (thread_info *thread)
3041 {
3042 return status_pending_p_callback (thread, minus_one_ptid);
3043 };
3044
3045 auto not_stopped = [&] (thread_info *thread)
3046 {
3047 return not_stopped_callback (thread, minus_one_ptid);
3048 };
3049
3050 /* Find a resumed LWP, if any. */
3051 if (find_thread (status_pending_p_any) != NULL)
3052 any_resumed = 1;
3053 else if (find_thread (not_stopped) != NULL)
3054 any_resumed = 1;
3055 else
3056 any_resumed = 0;
3057
3058 if (step_over_bkpt == null_ptid)
3059 pid = wait_for_event (ptid, &w, options);
3060 else
3061 {
3062 if (debug_threads)
3063 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3064 target_pid_to_str (step_over_bkpt));
3065 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3066 }
3067
3068 if (pid == 0 || (pid == -1 && !any_resumed))
3069 {
3070 gdb_assert (target_options & TARGET_WNOHANG);
3071
3072 if (debug_threads)
3073 {
3074 debug_printf ("wait_1 ret = null_ptid, "
3075 "TARGET_WAITKIND_IGNORE\n");
3076 debug_exit ();
3077 }
3078
3079 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3080 return null_ptid;
3081 }
3082 else if (pid == -1)
3083 {
3084 if (debug_threads)
3085 {
3086 debug_printf ("wait_1 ret = null_ptid, "
3087 "TARGET_WAITKIND_NO_RESUMED\n");
3088 debug_exit ();
3089 }
3090
3091 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3092 return null_ptid;
3093 }
3094
3095 event_child = get_thread_lwp (current_thread);
3096
3097 /* wait_for_event only returns an exit status for the last
3098 child of a process. Report it. */
3099 if (WIFEXITED (w) || WIFSIGNALED (w))
3100 {
3101 if (WIFEXITED (w))
3102 {
3103 ourstatus->kind = TARGET_WAITKIND_EXITED;
3104 ourstatus->value.integer = WEXITSTATUS (w);
3105
3106 if (debug_threads)
3107 {
3108 debug_printf ("wait_1 ret = %s, exited with "
3109 "retcode %d\n",
3110 target_pid_to_str (ptid_of (current_thread)),
3111 WEXITSTATUS (w));
3112 debug_exit ();
3113 }
3114 }
3115 else
3116 {
3117 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3118 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3119
3120 if (debug_threads)
3121 {
3122 debug_printf ("wait_1 ret = %s, terminated with "
3123 "signal %d\n",
3124 target_pid_to_str (ptid_of (current_thread)),
3125 WTERMSIG (w));
3126 debug_exit ();
3127 }
3128 }
3129
3130 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3131 return filter_exit_event (event_child, ourstatus);
3132
3133 return ptid_of (current_thread);
3134 }
3135
3136 /* If step-over executes a breakpoint instruction, in the case of a
3137 hardware single step it means a gdb/gdbserver breakpoint had been
3138 planted on top of a permanent breakpoint, in the case of a software
3139 single step it may just mean that gdbserver hit the reinsert breakpoint.
3140 The PC has been adjusted by save_stop_reason to point at
3141 the breakpoint address.
3142 So in the case of the hardware single step advance the PC manually
3143 past the breakpoint and in the case of software single step advance only
3144 if it's not the single_step_breakpoint we are hitting.
3145 This avoids that a program would keep trapping a permanent breakpoint
3146 forever. */
3147 if (step_over_bkpt != null_ptid
3148 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3149 && (event_child->stepping
3150 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3151 {
3152 int increment_pc = 0;
3153 int breakpoint_kind = 0;
3154 CORE_ADDR stop_pc = event_child->stop_pc;
3155
3156 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3157 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3158
3159 if (debug_threads)
3160 {
3161 debug_printf ("step-over for %s executed software breakpoint\n",
3162 target_pid_to_str (ptid_of (current_thread)));
3163 }
3164
3165 if (increment_pc != 0)
3166 {
3167 struct regcache *regcache
3168 = get_thread_regcache (current_thread, 1);
3169
3170 event_child->stop_pc += increment_pc;
3171 low_set_pc (regcache, event_child->stop_pc);
3172
3173 if (!low_breakpoint_at (event_child->stop_pc))
3174 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3175 }
3176 }
3177
3178 /* If this event was not handled before, and is not a SIGTRAP, we
3179 report it. SIGILL and SIGSEGV are also treated as traps in case
3180 a breakpoint is inserted at the current PC. If this target does
3181 not support internal breakpoints at all, we also report the
3182 SIGTRAP without further processing; it's of no concern to us. */
3183 maybe_internal_trap
3184 = (low_supports_breakpoints ()
3185 && (WSTOPSIG (w) == SIGTRAP
3186 || ((WSTOPSIG (w) == SIGILL
3187 || WSTOPSIG (w) == SIGSEGV)
3188 && low_breakpoint_at (event_child->stop_pc))));
3189
3190 if (maybe_internal_trap)
3191 {
3192 /* Handle anything that requires bookkeeping before deciding to
3193 report the event or continue waiting. */
3194
3195 /* First check if we can explain the SIGTRAP with an internal
3196 breakpoint, or if we should possibly report the event to GDB.
3197 Do this before anything that may remove or insert a
3198 breakpoint. */
3199 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3200
3201 /* We have a SIGTRAP, possibly a step-over dance has just
3202 finished. If so, tweak the state machine accordingly,
3203 reinsert breakpoints and delete any single-step
3204 breakpoints. */
3205 step_over_finished = finish_step_over (event_child);
3206
3207 /* Now invoke the callbacks of any internal breakpoints there. */
3208 check_breakpoints (event_child->stop_pc);
3209
3210 /* Handle tracepoint data collecting. This may overflow the
3211 trace buffer, and cause a tracing stop, removing
3212 breakpoints. */
3213 trace_event = handle_tracepoints (event_child);
3214
3215 if (bp_explains_trap)
3216 {
3217 if (debug_threads)
3218 debug_printf ("Hit a gdbserver breakpoint.\n");
3219 }
3220 }
3221 else
3222 {
3223 /* We have some other signal, possibly a step-over dance was in
3224 progress, and it should be cancelled too. */
3225 step_over_finished = finish_step_over (event_child);
3226 }
3227
3228 /* We have all the data we need. Either report the event to GDB, or
3229 resume threads and keep waiting for more. */
3230
3231 /* If we're collecting a fast tracepoint, finish the collection and
3232 move out of the jump pad before delivering a signal. See
3233 linux_stabilize_threads. */
3234
3235 if (WIFSTOPPED (w)
3236 && WSTOPSIG (w) != SIGTRAP
3237 && supports_fast_tracepoints ()
3238 && agent_loaded_p ())
3239 {
3240 if (debug_threads)
3241 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3242 "to defer or adjust it.\n",
3243 WSTOPSIG (w), lwpid_of (current_thread));
3244
3245 /* Allow debugging the jump pad itself. */
3246 if (current_thread->last_resume_kind != resume_step
3247 && maybe_move_out_of_jump_pad (event_child, &w))
3248 {
3249 enqueue_one_deferred_signal (event_child, &w);
3250
3251 if (debug_threads)
3252 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3253 WSTOPSIG (w), lwpid_of (current_thread));
3254
3255 resume_one_lwp (event_child, 0, 0, NULL);
3256
3257 if (debug_threads)
3258 debug_exit ();
3259 return ignore_event (ourstatus);
3260 }
3261 }
3262
3263 if (event_child->collecting_fast_tracepoint
3264 != fast_tpoint_collect_result::not_collecting)
3265 {
3266 if (debug_threads)
3267 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3268 "Check if we're already there.\n",
3269 lwpid_of (current_thread),
3270 (int) event_child->collecting_fast_tracepoint);
3271
3272 trace_event = 1;
3273
3274 event_child->collecting_fast_tracepoint
3275 = linux_fast_tracepoint_collecting (event_child, NULL);
3276
3277 if (event_child->collecting_fast_tracepoint
3278 != fast_tpoint_collect_result::before_insn)
3279 {
3280 /* No longer need this breakpoint. */
3281 if (event_child->exit_jump_pad_bkpt != NULL)
3282 {
3283 if (debug_threads)
3284 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3285 "stopping all threads momentarily.\n");
3286
3287 /* Other running threads could hit this breakpoint.
3288 We don't handle moribund locations like GDB does,
3289 instead we always pause all threads when removing
3290 breakpoints, so that any step-over or
3291 decr_pc_after_break adjustment is always taken
3292 care of while the breakpoint is still
3293 inserted. */
3294 stop_all_lwps (1, event_child);
3295
3296 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3297 event_child->exit_jump_pad_bkpt = NULL;
3298
3299 unstop_all_lwps (1, event_child);
3300
3301 gdb_assert (event_child->suspended >= 0);
3302 }
3303 }
3304
3305 if (event_child->collecting_fast_tracepoint
3306 == fast_tpoint_collect_result::not_collecting)
3307 {
3308 if (debug_threads)
3309 debug_printf ("fast tracepoint finished "
3310 "collecting successfully.\n");
3311
3312 /* We may have a deferred signal to report. */
3313 if (dequeue_one_deferred_signal (event_child, &w))
3314 {
3315 if (debug_threads)
3316 debug_printf ("dequeued one signal.\n");
3317 }
3318 else
3319 {
3320 if (debug_threads)
3321 debug_printf ("no deferred signals.\n");
3322
3323 if (stabilizing_threads)
3324 {
3325 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3326 ourstatus->value.sig = GDB_SIGNAL_0;
3327
3328 if (debug_threads)
3329 {
3330 debug_printf ("wait_1 ret = %s, stopped "
3331 "while stabilizing threads\n",
3332 target_pid_to_str (ptid_of (current_thread)));
3333 debug_exit ();
3334 }
3335
3336 return ptid_of (current_thread);
3337 }
3338 }
3339 }
3340 }
3341
3342 /* Check whether GDB would be interested in this event. */
3343
3344 /* Check if GDB is interested in this syscall. */
3345 if (WIFSTOPPED (w)
3346 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3347 && !gdb_catch_this_syscall_p (event_child))
3348 {
3349 if (debug_threads)
3350 {
3351 debug_printf ("Ignored syscall for LWP %ld.\n",
3352 lwpid_of (current_thread));
3353 }
3354
3355 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3356
3357 if (debug_threads)
3358 debug_exit ();
3359 return ignore_event (ourstatus);
3360 }
3361
3362 /* If GDB is not interested in this signal, don't stop other
3363 threads, and don't report it to GDB. Just resume the inferior
3364 right away. We do this for threading-related signals as well as
3365 any that GDB specifically requested we ignore. But never ignore
3366 SIGSTOP if we sent it ourselves, and do not ignore signals when
3367 stepping - they may require special handling to skip the signal
3368 handler. Also never ignore signals that could be caused by a
3369 breakpoint. */
3370 if (WIFSTOPPED (w)
3371 && current_thread->last_resume_kind != resume_step
3372 && (
3373 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3374 (current_process ()->priv->thread_db != NULL
3375 && (WSTOPSIG (w) == __SIGRTMIN
3376 || WSTOPSIG (w) == __SIGRTMIN + 1))
3377 ||
3378 #endif
3379 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3380 && !(WSTOPSIG (w) == SIGSTOP
3381 && current_thread->last_resume_kind == resume_stop)
3382 && !linux_wstatus_maybe_breakpoint (w))))
3383 {
3384 siginfo_t info, *info_p;
3385
3386 if (debug_threads)
3387 debug_printf ("Ignored signal %d for LWP %ld.\n",
3388 WSTOPSIG (w), lwpid_of (current_thread));
3389
3390 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3391 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3392 info_p = &info;
3393 else
3394 info_p = NULL;
3395
3396 if (step_over_finished)
3397 {
3398 /* We cancelled this thread's step-over above. We still
3399 need to unsuspend all other LWPs, and set them back
3400 running again while the signal handler runs. */
3401 unsuspend_all_lwps (event_child);
3402
3403 /* Enqueue the pending signal info so that proceed_all_lwps
3404 doesn't lose it. */
3405 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3406
3407 proceed_all_lwps ();
3408 }
3409 else
3410 {
3411 resume_one_lwp (event_child, event_child->stepping,
3412 WSTOPSIG (w), info_p);
3413 }
3414
3415 if (debug_threads)
3416 debug_exit ();
3417
3418 return ignore_event (ourstatus);
3419 }
3420
3421 /* Note that all addresses are always "out of the step range" when
3422 there's no range to begin with. */
3423 in_step_range = lwp_in_step_range (event_child);
3424
3425 /* If GDB wanted this thread to single step, and the thread is out
3426 of the step range, we always want to report the SIGTRAP, and let
3427 GDB handle it. Watchpoints should always be reported. So should
3428 signals we can't explain. A SIGTRAP we can't explain could be a
3429 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3430 do, we're be able to handle GDB breakpoints on top of internal
3431 breakpoints, by handling the internal breakpoint and still
3432 reporting the event to GDB. If we don't, we're out of luck, GDB
3433 won't see the breakpoint hit. If we see a single-step event but
3434 the thread should be continuing, don't pass the trap to gdb.
3435 That indicates that we had previously finished a single-step but
3436 left the single-step pending -- see
3437 complete_ongoing_step_over. */
3438 report_to_gdb = (!maybe_internal_trap
3439 || (current_thread->last_resume_kind == resume_step
3440 && !in_step_range)
3441 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3442 || (!in_step_range
3443 && !bp_explains_trap
3444 && !trace_event
3445 && !step_over_finished
3446 && !(current_thread->last_resume_kind == resume_continue
3447 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3448 || (gdb_breakpoint_here (event_child->stop_pc)
3449 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3450 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3451 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3452
3453 run_breakpoint_commands (event_child->stop_pc);
3454
3455 /* We found no reason GDB would want us to stop. We either hit one
3456 of our own breakpoints, or finished an internal step GDB
3457 shouldn't know about. */
3458 if (!report_to_gdb)
3459 {
3460 if (debug_threads)
3461 {
3462 if (bp_explains_trap)
3463 debug_printf ("Hit a gdbserver breakpoint.\n");
3464 if (step_over_finished)
3465 debug_printf ("Step-over finished.\n");
3466 if (trace_event)
3467 debug_printf ("Tracepoint event.\n");
3468 if (lwp_in_step_range (event_child))
3469 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3470 paddress (event_child->stop_pc),
3471 paddress (event_child->step_range_start),
3472 paddress (event_child->step_range_end));
3473 }
3474
3475 /* We're not reporting this breakpoint to GDB, so apply the
3476 decr_pc_after_break adjustment to the inferior's regcache
3477 ourselves. */
3478
3479 if (low_supports_breakpoints ())
3480 {
3481 struct regcache *regcache
3482 = get_thread_regcache (current_thread, 1);
3483 low_set_pc (regcache, event_child->stop_pc);
3484 }
3485
3486 if (step_over_finished)
3487 {
3488 /* If we have finished stepping over a breakpoint, we've
3489 stopped and suspended all LWPs momentarily except the
3490 stepping one. This is where we resume them all again.
3491 We're going to keep waiting, so use proceed, which
3492 handles stepping over the next breakpoint. */
3493 unsuspend_all_lwps (event_child);
3494 }
3495 else
3496 {
3497 /* Remove the single-step breakpoints if any. Note that
3498 there isn't single-step breakpoint if we finished stepping
3499 over. */
3500 if (supports_software_single_step ()
3501 && has_single_step_breakpoints (current_thread))
3502 {
3503 stop_all_lwps (0, event_child);
3504 delete_single_step_breakpoints (current_thread);
3505 unstop_all_lwps (0, event_child);
3506 }
3507 }
3508
3509 if (debug_threads)
3510 debug_printf ("proceeding all threads.\n");
3511 proceed_all_lwps ();
3512
3513 if (debug_threads)
3514 debug_exit ();
3515
3516 return ignore_event (ourstatus);
3517 }
3518
3519 if (debug_threads)
3520 {
3521 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3522 {
3523 std::string str
3524 = target_waitstatus_to_string (&event_child->waitstatus);
3525
3526 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3527 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3528 }
3529 if (current_thread->last_resume_kind == resume_step)
3530 {
3531 if (event_child->step_range_start == event_child->step_range_end)
3532 debug_printf ("GDB wanted to single-step, reporting event.\n");
3533 else if (!lwp_in_step_range (event_child))
3534 debug_printf ("Out of step range, reporting event.\n");
3535 }
3536 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3537 debug_printf ("Stopped by watchpoint.\n");
3538 else if (gdb_breakpoint_here (event_child->stop_pc))
3539 debug_printf ("Stopped by GDB breakpoint.\n");
3540 if (debug_threads)
3541 debug_printf ("Hit a non-gdbserver trap event.\n");
3542 }
3543
3544 /* Alright, we're going to report a stop. */
3545
3546 /* Remove single-step breakpoints. */
3547 if (supports_software_single_step ())
3548 {
3549 /* Remove single-step breakpoints or not. It it is true, stop all
3550 lwps, so that other threads won't hit the breakpoint in the
3551 staled memory. */
3552 int remove_single_step_breakpoints_p = 0;
3553
3554 if (non_stop)
3555 {
3556 remove_single_step_breakpoints_p
3557 = has_single_step_breakpoints (current_thread);
3558 }
3559 else
3560 {
3561 /* In all-stop, a stop reply cancels all previous resume
3562 requests. Delete all single-step breakpoints. */
3563
3564 find_thread ([&] (thread_info *thread) {
3565 if (has_single_step_breakpoints (thread))
3566 {
3567 remove_single_step_breakpoints_p = 1;
3568 return true;
3569 }
3570
3571 return false;
3572 });
3573 }
3574
3575 if (remove_single_step_breakpoints_p)
3576 {
3577 /* If we remove single-step breakpoints from memory, stop all lwps,
3578 so that other threads won't hit the breakpoint in the staled
3579 memory. */
3580 stop_all_lwps (0, event_child);
3581
3582 if (non_stop)
3583 {
3584 gdb_assert (has_single_step_breakpoints (current_thread));
3585 delete_single_step_breakpoints (current_thread);
3586 }
3587 else
3588 {
3589 for_each_thread ([] (thread_info *thread){
3590 if (has_single_step_breakpoints (thread))
3591 delete_single_step_breakpoints (thread);
3592 });
3593 }
3594
3595 unstop_all_lwps (0, event_child);
3596 }
3597 }
3598
3599 if (!stabilizing_threads)
3600 {
3601 /* In all-stop, stop all threads. */
3602 if (!non_stop)
3603 stop_all_lwps (0, NULL);
3604
3605 if (step_over_finished)
3606 {
3607 if (!non_stop)
3608 {
3609 /* If we were doing a step-over, all other threads but
3610 the stepping one had been paused in start_step_over,
3611 with their suspend counts incremented. We don't want
3612 to do a full unstop/unpause, because we're in
3613 all-stop mode (so we want threads stopped), but we
3614 still need to unsuspend the other threads, to
3615 decrement their `suspended' count back. */
3616 unsuspend_all_lwps (event_child);
3617 }
3618 else
3619 {
3620 /* If we just finished a step-over, then all threads had
3621 been momentarily paused. In all-stop, that's fine,
3622 we want threads stopped by now anyway. In non-stop,
3623 we need to re-resume threads that GDB wanted to be
3624 running. */
3625 unstop_all_lwps (1, event_child);
3626 }
3627 }
3628
3629 /* If we're not waiting for a specific LWP, choose an event LWP
3630 from among those that have had events. Giving equal priority
3631 to all LWPs that have had events helps prevent
3632 starvation. */
3633 if (ptid == minus_one_ptid)
3634 {
3635 event_child->status_pending_p = 1;
3636 event_child->status_pending = w;
3637
3638 select_event_lwp (&event_child);
3639
3640 /* current_thread and event_child must stay in sync. */
3641 current_thread = get_lwp_thread (event_child);
3642
3643 event_child->status_pending_p = 0;
3644 w = event_child->status_pending;
3645 }
3646
3647
3648 /* Stabilize threads (move out of jump pads). */
3649 if (!non_stop)
3650 target_stabilize_threads ();
3651 }
3652 else
3653 {
3654 /* If we just finished a step-over, then all threads had been
3655 momentarily paused. In all-stop, that's fine, we want
3656 threads stopped by now anyway. In non-stop, we need to
3657 re-resume threads that GDB wanted to be running. */
3658 if (step_over_finished)
3659 unstop_all_lwps (1, event_child);
3660 }
3661
3662 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3663 {
3664 /* If the reported event is an exit, fork, vfork or exec, let
3665 GDB know. */
3666
3667 /* Break the unreported fork relationship chain. */
3668 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3669 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3670 {
3671 event_child->fork_relative->fork_relative = NULL;
3672 event_child->fork_relative = NULL;
3673 }
3674
3675 *ourstatus = event_child->waitstatus;
3676 /* Clear the event lwp's waitstatus since we handled it already. */
3677 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3678 }
3679 else
3680 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3681
3682 /* Now that we've selected our final event LWP, un-adjust its PC if
3683 it was a software breakpoint, and the client doesn't know we can
3684 adjust the breakpoint ourselves. */
3685 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3686 && !cs.swbreak_feature)
3687 {
3688 int decr_pc = low_decr_pc_after_break ();
3689
3690 if (decr_pc != 0)
3691 {
3692 struct regcache *regcache
3693 = get_thread_regcache (current_thread, 1);
3694 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3695 }
3696 }
3697
3698 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3699 {
3700 get_syscall_trapinfo (event_child,
3701 &ourstatus->value.syscall_number);
3702 ourstatus->kind = event_child->syscall_state;
3703 }
3704 else if (current_thread->last_resume_kind == resume_stop
3705 && WSTOPSIG (w) == SIGSTOP)
3706 {
3707 /* A thread that has been requested to stop by GDB with vCont;t,
3708 and it stopped cleanly, so report as SIG0. The use of
3709 SIGSTOP is an implementation detail. */
3710 ourstatus->value.sig = GDB_SIGNAL_0;
3711 }
3712 else if (current_thread->last_resume_kind == resume_stop
3713 && WSTOPSIG (w) != SIGSTOP)
3714 {
3715 /* A thread that has been requested to stop by GDB with vCont;t,
3716 but, it stopped for other reasons. */
3717 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3718 }
3719 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3720 {
3721 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3722 }
3723
3724 gdb_assert (step_over_bkpt == null_ptid);
3725
3726 if (debug_threads)
3727 {
3728 debug_printf ("wait_1 ret = %s, %d, %d\n",
3729 target_pid_to_str (ptid_of (current_thread)),
3730 ourstatus->kind, ourstatus->value.sig);
3731 debug_exit ();
3732 }
3733
3734 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3735 return filter_exit_event (event_child, ourstatus);
3736
3737 return ptid_of (current_thread);
3738 }
3739
3740 /* Get rid of any pending event in the pipe. */
3741 static void
3742 async_file_flush (void)
3743 {
3744 int ret;
3745 char buf;
3746
3747 do
3748 ret = read (linux_event_pipe[0], &buf, 1);
3749 while (ret >= 0 || (ret == -1 && errno == EINTR));
3750 }
3751
3752 /* Put something in the pipe, so the event loop wakes up. */
3753 static void
3754 async_file_mark (void)
3755 {
3756 int ret;
3757
3758 async_file_flush ();
3759
3760 do
3761 ret = write (linux_event_pipe[1], "+", 1);
3762 while (ret == 0 || (ret == -1 && errno == EINTR));
3763
3764 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3765 be awakened anyway. */
3766 }
3767
3768 ptid_t
3769 linux_process_target::wait (ptid_t ptid,
3770 target_waitstatus *ourstatus,
3771 int target_options)
3772 {
3773 ptid_t event_ptid;
3774
3775 /* Flush the async file first. */
3776 if (target_is_async_p ())
3777 async_file_flush ();
3778
3779 do
3780 {
3781 event_ptid = wait_1 (ptid, ourstatus, target_options);
3782 }
3783 while ((target_options & TARGET_WNOHANG) == 0
3784 && event_ptid == null_ptid
3785 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3786
3787 /* If at least one stop was reported, there may be more. A single
3788 SIGCHLD can signal more than one child stop. */
3789 if (target_is_async_p ()
3790 && (target_options & TARGET_WNOHANG) != 0
3791 && event_ptid != null_ptid)
3792 async_file_mark ();
3793
3794 return event_ptid;
3795 }
3796
3797 /* Send a signal to an LWP. */
3798
3799 static int
3800 kill_lwp (unsigned long lwpid, int signo)
3801 {
3802 int ret;
3803
3804 errno = 0;
3805 ret = syscall (__NR_tkill, lwpid, signo);
3806 if (errno == ENOSYS)
3807 {
3808 /* If tkill fails, then we are not using nptl threads, a
3809 configuration we no longer support. */
3810 perror_with_name (("tkill"));
3811 }
3812 return ret;
3813 }
3814
3815 void
3816 linux_stop_lwp (struct lwp_info *lwp)
3817 {
3818 send_sigstop (lwp);
3819 }
3820
3821 static void
3822 send_sigstop (struct lwp_info *lwp)
3823 {
3824 int pid;
3825
3826 pid = lwpid_of (get_lwp_thread (lwp));
3827
3828 /* If we already have a pending stop signal for this process, don't
3829 send another. */
3830 if (lwp->stop_expected)
3831 {
3832 if (debug_threads)
3833 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3834
3835 return;
3836 }
3837
3838 if (debug_threads)
3839 debug_printf ("Sending sigstop to lwp %d\n", pid);
3840
3841 lwp->stop_expected = 1;
3842 kill_lwp (pid, SIGSTOP);
3843 }
3844
3845 static void
3846 send_sigstop (thread_info *thread, lwp_info *except)
3847 {
3848 struct lwp_info *lwp = get_thread_lwp (thread);
3849
3850 /* Ignore EXCEPT. */
3851 if (lwp == except)
3852 return;
3853
3854 if (lwp->stopped)
3855 return;
3856
3857 send_sigstop (lwp);
3858 }
3859
3860 /* Increment the suspend count of an LWP, and stop it, if not stopped
3861 yet. */
3862 static void
3863 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3864 {
3865 struct lwp_info *lwp = get_thread_lwp (thread);
3866
3867 /* Ignore EXCEPT. */
3868 if (lwp == except)
3869 return;
3870
3871 lwp_suspended_inc (lwp);
3872
3873 send_sigstop (thread, except);
3874 }
3875
3876 static void
3877 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3878 {
3879 /* Store the exit status for later. */
3880 lwp->status_pending_p = 1;
3881 lwp->status_pending = wstat;
3882
3883 /* Store in waitstatus as well, as there's nothing else to process
3884 for this event. */
3885 if (WIFEXITED (wstat))
3886 {
3887 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3888 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3889 }
3890 else if (WIFSIGNALED (wstat))
3891 {
3892 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3893 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3894 }
3895
3896 /* Prevent trying to stop it. */
3897 lwp->stopped = 1;
3898
3899 /* No further stops are expected from a dead lwp. */
3900 lwp->stop_expected = 0;
3901 }
3902
3903 /* Return true if LWP has exited already, and has a pending exit event
3904 to report to GDB. */
3905
3906 static int
3907 lwp_is_marked_dead (struct lwp_info *lwp)
3908 {
3909 return (lwp->status_pending_p
3910 && (WIFEXITED (lwp->status_pending)
3911 || WIFSIGNALED (lwp->status_pending)));
3912 }
3913
3914 void
3915 linux_process_target::wait_for_sigstop ()
3916 {
3917 struct thread_info *saved_thread;
3918 ptid_t saved_tid;
3919 int wstat;
3920 int ret;
3921
3922 saved_thread = current_thread;
3923 if (saved_thread != NULL)
3924 saved_tid = saved_thread->id;
3925 else
3926 saved_tid = null_ptid; /* avoid bogus unused warning */
3927
3928 if (debug_threads)
3929 debug_printf ("wait_for_sigstop: pulling events\n");
3930
3931 /* Passing NULL_PTID as filter indicates we want all events to be
3932 left pending. Eventually this returns when there are no
3933 unwaited-for children left. */
3934 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3935 gdb_assert (ret == -1);
3936
3937 if (saved_thread == NULL || mythread_alive (saved_tid))
3938 current_thread = saved_thread;
3939 else
3940 {
3941 if (debug_threads)
3942 debug_printf ("Previously current thread died.\n");
3943
3944 /* We can't change the current inferior behind GDB's back,
3945 otherwise, a subsequent command may apply to the wrong
3946 process. */
3947 current_thread = NULL;
3948 }
3949 }
3950
3951 /* Returns true if THREAD is stopped in a jump pad, and we can't
3952 move it out, because we need to report the stop event to GDB. For
3953 example, if the user puts a breakpoint in the jump pad, it's
3954 because she wants to debug it. */
3955
3956 static bool
3957 stuck_in_jump_pad_callback (thread_info *thread)
3958 {
3959 struct lwp_info *lwp = get_thread_lwp (thread);
3960
3961 if (lwp->suspended != 0)
3962 {
3963 internal_error (__FILE__, __LINE__,
3964 "LWP %ld is suspended, suspended=%d\n",
3965 lwpid_of (thread), lwp->suspended);
3966 }
3967 gdb_assert (lwp->stopped);
3968
3969 /* Allow debugging the jump pad, gdb_collect, etc.. */
3970 return (supports_fast_tracepoints ()
3971 && agent_loaded_p ()
3972 && (gdb_breakpoint_here (lwp->stop_pc)
3973 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3974 || thread->last_resume_kind == resume_step)
3975 && (linux_fast_tracepoint_collecting (lwp, NULL)
3976 != fast_tpoint_collect_result::not_collecting));
3977 }
3978
3979 void
3980 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3981 {
3982 struct thread_info *saved_thread;
3983 struct lwp_info *lwp = get_thread_lwp (thread);
3984 int *wstat;
3985
3986 if (lwp->suspended != 0)
3987 {
3988 internal_error (__FILE__, __LINE__,
3989 "LWP %ld is suspended, suspended=%d\n",
3990 lwpid_of (thread), lwp->suspended);
3991 }
3992 gdb_assert (lwp->stopped);
3993
3994 /* For gdb_breakpoint_here. */
3995 saved_thread = current_thread;
3996 current_thread = thread;
3997
3998 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3999
4000 /* Allow debugging the jump pad, gdb_collect, etc. */
4001 if (!gdb_breakpoint_here (lwp->stop_pc)
4002 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4003 && thread->last_resume_kind != resume_step
4004 && maybe_move_out_of_jump_pad (lwp, wstat))
4005 {
4006 if (debug_threads)
4007 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4008 lwpid_of (thread));
4009
4010 if (wstat)
4011 {
4012 lwp->status_pending_p = 0;
4013 enqueue_one_deferred_signal (lwp, wstat);
4014
4015 if (debug_threads)
4016 debug_printf ("Signal %d for LWP %ld deferred "
4017 "(in jump pad)\n",
4018 WSTOPSIG (*wstat), lwpid_of (thread));
4019 }
4020
4021 resume_one_lwp (lwp, 0, 0, NULL);
4022 }
4023 else
4024 lwp_suspended_inc (lwp);
4025
4026 current_thread = saved_thread;
4027 }
4028
4029 static bool
4030 lwp_running (thread_info *thread)
4031 {
4032 struct lwp_info *lwp = get_thread_lwp (thread);
4033
4034 if (lwp_is_marked_dead (lwp))
4035 return false;
4036
4037 return !lwp->stopped;
4038 }
4039
4040 void
4041 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4042 {
4043 /* Should not be called recursively. */
4044 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4045
4046 if (debug_threads)
4047 {
4048 debug_enter ();
4049 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4050 suspend ? "stop-and-suspend" : "stop",
4051 except != NULL
4052 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4053 : "none");
4054 }
4055
4056 stopping_threads = (suspend
4057 ? STOPPING_AND_SUSPENDING_THREADS
4058 : STOPPING_THREADS);
4059
4060 if (suspend)
4061 for_each_thread ([&] (thread_info *thread)
4062 {
4063 suspend_and_send_sigstop (thread, except);
4064 });
4065 else
4066 for_each_thread ([&] (thread_info *thread)
4067 {
4068 send_sigstop (thread, except);
4069 });
4070
4071 wait_for_sigstop ();
4072 stopping_threads = NOT_STOPPING_THREADS;
4073
4074 if (debug_threads)
4075 {
4076 debug_printf ("stop_all_lwps done, setting stopping_threads "
4077 "back to !stopping\n");
4078 debug_exit ();
4079 }
4080 }
4081
4082 /* Enqueue one signal in the chain of signals which need to be
4083 delivered to this process on next resume. */
4084
4085 static void
4086 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4087 {
4088 struct pending_signals *p_sig = XNEW (struct pending_signals);
4089
4090 p_sig->prev = lwp->pending_signals;
4091 p_sig->signal = signal;
4092 if (info == NULL)
4093 memset (&p_sig->info, 0, sizeof (siginfo_t));
4094 else
4095 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4096 lwp->pending_signals = p_sig;
4097 }
4098
4099 void
4100 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4101 {
4102 struct thread_info *thread = get_lwp_thread (lwp);
4103 struct regcache *regcache = get_thread_regcache (thread, 1);
4104
4105 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4106
4107 current_thread = thread;
4108 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4109
4110 for (CORE_ADDR pc : next_pcs)
4111 set_single_step_breakpoint (pc, current_ptid);
4112 }
4113
4114 int
4115 linux_process_target::single_step (lwp_info* lwp)
4116 {
4117 int step = 0;
4118
4119 if (can_hardware_single_step ())
4120 {
4121 step = 1;
4122 }
4123 else if (supports_software_single_step ())
4124 {
4125 install_software_single_step_breakpoints (lwp);
4126 step = 0;
4127 }
4128 else
4129 {
4130 if (debug_threads)
4131 debug_printf ("stepping is not implemented on this target");
4132 }
4133
4134 return step;
4135 }
4136
4137 /* The signal can be delivered to the inferior if we are not trying to
4138 finish a fast tracepoint collect. Since signal can be delivered in
4139 the step-over, the program may go to signal handler and trap again
4140 after return from the signal handler. We can live with the spurious
4141 double traps. */
4142
4143 static int
4144 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4145 {
4146 return (lwp->collecting_fast_tracepoint
4147 == fast_tpoint_collect_result::not_collecting);
4148 }
4149
4150 void
4151 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4152 int signal, siginfo_t *info)
4153 {
4154 struct thread_info *thread = get_lwp_thread (lwp);
4155 struct thread_info *saved_thread;
4156 int ptrace_request;
4157 struct process_info *proc = get_thread_process (thread);
4158
4159 /* Note that target description may not be initialised
4160 (proc->tdesc == NULL) at this point because the program hasn't
4161 stopped at the first instruction yet. It means GDBserver skips
4162 the extra traps from the wrapper program (see option --wrapper).
4163 Code in this function that requires register access should be
4164 guarded by proc->tdesc == NULL or something else. */
4165
4166 if (lwp->stopped == 0)
4167 return;
4168
4169 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4170
4171 fast_tpoint_collect_result fast_tp_collecting
4172 = lwp->collecting_fast_tracepoint;
4173
4174 gdb_assert (!stabilizing_threads
4175 || (fast_tp_collecting
4176 != fast_tpoint_collect_result::not_collecting));
4177
4178 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4179 user used the "jump" command, or "set $pc = foo"). */
4180 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4181 {
4182 /* Collecting 'while-stepping' actions doesn't make sense
4183 anymore. */
4184 release_while_stepping_state_list (thread);
4185 }
4186
4187 /* If we have pending signals or status, and a new signal, enqueue the
4188 signal. Also enqueue the signal if it can't be delivered to the
4189 inferior right now. */
4190 if (signal != 0
4191 && (lwp->status_pending_p
4192 || lwp->pending_signals != NULL
4193 || !lwp_signal_can_be_delivered (lwp)))
4194 {
4195 enqueue_pending_signal (lwp, signal, info);
4196
4197 /* Postpone any pending signal. It was enqueued above. */
4198 signal = 0;
4199 }
4200
4201 if (lwp->status_pending_p)
4202 {
4203 if (debug_threads)
4204 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4205 " has pending status\n",
4206 lwpid_of (thread), step ? "step" : "continue",
4207 lwp->stop_expected ? "expected" : "not expected");
4208 return;
4209 }
4210
4211 saved_thread = current_thread;
4212 current_thread = thread;
4213
4214 /* This bit needs some thinking about. If we get a signal that
4215 we must report while a single-step reinsert is still pending,
4216 we often end up resuming the thread. It might be better to
4217 (ew) allow a stack of pending events; then we could be sure that
4218 the reinsert happened right away and not lose any signals.
4219
4220 Making this stack would also shrink the window in which breakpoints are
4221 uninserted (see comment in linux_wait_for_lwp) but not enough for
4222 complete correctness, so it won't solve that problem. It may be
4223 worthwhile just to solve this one, however. */
4224 if (lwp->bp_reinsert != 0)
4225 {
4226 if (debug_threads)
4227 debug_printf (" pending reinsert at 0x%s\n",
4228 paddress (lwp->bp_reinsert));
4229
4230 if (can_hardware_single_step ())
4231 {
4232 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4233 {
4234 if (step == 0)
4235 warning ("BAD - reinserting but not stepping.");
4236 if (lwp->suspended)
4237 warning ("BAD - reinserting and suspended(%d).",
4238 lwp->suspended);
4239 }
4240 }
4241
4242 step = maybe_hw_step (thread);
4243 }
4244
4245 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4246 {
4247 if (debug_threads)
4248 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4249 " (exit-jump-pad-bkpt)\n",
4250 lwpid_of (thread));
4251 }
4252 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4253 {
4254 if (debug_threads)
4255 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4256 " single-stepping\n",
4257 lwpid_of (thread));
4258
4259 if (can_hardware_single_step ())
4260 step = 1;
4261 else
4262 {
4263 internal_error (__FILE__, __LINE__,
4264 "moving out of jump pad single-stepping"
4265 " not implemented on this target");
4266 }
4267 }
4268
4269 /* If we have while-stepping actions in this thread set it stepping.
4270 If we have a signal to deliver, it may or may not be set to
4271 SIG_IGN, we don't know. Assume so, and allow collecting
4272 while-stepping into a signal handler. A possible smart thing to
4273 do would be to set an internal breakpoint at the signal return
4274 address, continue, and carry on catching this while-stepping
4275 action only when that breakpoint is hit. A future
4276 enhancement. */
4277 if (thread->while_stepping != NULL)
4278 {
4279 if (debug_threads)
4280 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4281 lwpid_of (thread));
4282
4283 step = single_step (lwp);
4284 }
4285
4286 if (proc->tdesc != NULL && low_supports_breakpoints ())
4287 {
4288 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4289
4290 lwp->stop_pc = low_get_pc (regcache);
4291
4292 if (debug_threads)
4293 {
4294 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4295 (long) lwp->stop_pc);
4296 }
4297 }
4298
4299 /* If we have pending signals, consume one if it can be delivered to
4300 the inferior. */
4301 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4302 {
4303 struct pending_signals **p_sig;
4304
4305 p_sig = &lwp->pending_signals;
4306 while ((*p_sig)->prev != NULL)
4307 p_sig = &(*p_sig)->prev;
4308
4309 signal = (*p_sig)->signal;
4310 if ((*p_sig)->info.si_signo != 0)
4311 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4312 &(*p_sig)->info);
4313
4314 free (*p_sig);
4315 *p_sig = NULL;
4316 }
4317
4318 if (debug_threads)
4319 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4320 lwpid_of (thread), step ? "step" : "continue", signal,
4321 lwp->stop_expected ? "expected" : "not expected");
4322
4323 if (the_low_target.prepare_to_resume != NULL)
4324 the_low_target.prepare_to_resume (lwp);
4325
4326 regcache_invalidate_thread (thread);
4327 errno = 0;
4328 lwp->stepping = step;
4329 if (step)
4330 ptrace_request = PTRACE_SINGLESTEP;
4331 else if (gdb_catching_syscalls_p (lwp))
4332 ptrace_request = PTRACE_SYSCALL;
4333 else
4334 ptrace_request = PTRACE_CONT;
4335 ptrace (ptrace_request,
4336 lwpid_of (thread),
4337 (PTRACE_TYPE_ARG3) 0,
4338 /* Coerce to a uintptr_t first to avoid potential gcc warning
4339 of coercing an 8 byte integer to a 4 byte pointer. */
4340 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4341
4342 current_thread = saved_thread;
4343 if (errno)
4344 perror_with_name ("resuming thread");
4345
4346 /* Successfully resumed. Clear state that no longer makes sense,
4347 and mark the LWP as running. Must not do this before resuming
4348 otherwise if that fails other code will be confused. E.g., we'd
4349 later try to stop the LWP and hang forever waiting for a stop
4350 status. Note that we must not throw after this is cleared,
4351 otherwise handle_zombie_lwp_error would get confused. */
4352 lwp->stopped = 0;
4353 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4354 }
4355
4356 /* Called when we try to resume a stopped LWP and that errors out. If
4357 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4358 or about to become), discard the error, clear any pending status
4359 the LWP may have, and return true (we'll collect the exit status
4360 soon enough). Otherwise, return false. */
4361
4362 static int
4363 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4364 {
4365 struct thread_info *thread = get_lwp_thread (lp);
4366
4367 /* If we get an error after resuming the LWP successfully, we'd
4368 confuse !T state for the LWP being gone. */
4369 gdb_assert (lp->stopped);
4370
4371 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4372 because even if ptrace failed with ESRCH, the tracee may be "not
4373 yet fully dead", but already refusing ptrace requests. In that
4374 case the tracee has 'R (Running)' state for a little bit
4375 (observed in Linux 3.18). See also the note on ESRCH in the
4376 ptrace(2) man page. Instead, check whether the LWP has any state
4377 other than ptrace-stopped. */
4378
4379 /* Don't assume anything if /proc/PID/status can't be read. */
4380 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4381 {
4382 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4383 lp->status_pending_p = 0;
4384 return 1;
4385 }
4386 return 0;
4387 }
4388
4389 void
4390 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4391 siginfo_t *info)
4392 {
4393 try
4394 {
4395 resume_one_lwp_throw (lwp, step, signal, info);
4396 }
4397 catch (const gdb_exception_error &ex)
4398 {
4399 if (!check_ptrace_stopped_lwp_gone (lwp))
4400 throw;
4401 }
4402 }
4403
4404 /* This function is called once per thread via for_each_thread.
4405 We look up which resume request applies to THREAD and mark it with a
4406 pointer to the appropriate resume request.
4407
4408 This algorithm is O(threads * resume elements), but resume elements
4409 is small (and will remain small at least until GDB supports thread
4410 suspension). */
4411
4412 static void
4413 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4414 {
4415 struct lwp_info *lwp = get_thread_lwp (thread);
4416
4417 for (int ndx = 0; ndx < n; ndx++)
4418 {
4419 ptid_t ptid = resume[ndx].thread;
4420 if (ptid == minus_one_ptid
4421 || ptid == thread->id
4422 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4423 of PID'. */
4424 || (ptid.pid () == pid_of (thread)
4425 && (ptid.is_pid ()
4426 || ptid.lwp () == -1)))
4427 {
4428 if (resume[ndx].kind == resume_stop
4429 && thread->last_resume_kind == resume_stop)
4430 {
4431 if (debug_threads)
4432 debug_printf ("already %s LWP %ld at GDB's request\n",
4433 (thread->last_status.kind
4434 == TARGET_WAITKIND_STOPPED)
4435 ? "stopped"
4436 : "stopping",
4437 lwpid_of (thread));
4438
4439 continue;
4440 }
4441
4442 /* Ignore (wildcard) resume requests for already-resumed
4443 threads. */
4444 if (resume[ndx].kind != resume_stop
4445 && thread->last_resume_kind != resume_stop)
4446 {
4447 if (debug_threads)
4448 debug_printf ("already %s LWP %ld at GDB's request\n",
4449 (thread->last_resume_kind
4450 == resume_step)
4451 ? "stepping"
4452 : "continuing",
4453 lwpid_of (thread));
4454 continue;
4455 }
4456
4457 /* Don't let wildcard resumes resume fork children that GDB
4458 does not yet know are new fork children. */
4459 if (lwp->fork_relative != NULL)
4460 {
4461 struct lwp_info *rel = lwp->fork_relative;
4462
4463 if (rel->status_pending_p
4464 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4465 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4466 {
4467 if (debug_threads)
4468 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4469 lwpid_of (thread));
4470 continue;
4471 }
4472 }
4473
4474 /* If the thread has a pending event that has already been
4475 reported to GDBserver core, but GDB has not pulled the
4476 event out of the vStopped queue yet, likewise, ignore the
4477 (wildcard) resume request. */
4478 if (in_queued_stop_replies (thread->id))
4479 {
4480 if (debug_threads)
4481 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4482 lwpid_of (thread));
4483 continue;
4484 }
4485
4486 lwp->resume = &resume[ndx];
4487 thread->last_resume_kind = lwp->resume->kind;
4488
4489 lwp->step_range_start = lwp->resume->step_range_start;
4490 lwp->step_range_end = lwp->resume->step_range_end;
4491
4492 /* If we had a deferred signal to report, dequeue one now.
4493 This can happen if LWP gets more than one signal while
4494 trying to get out of a jump pad. */
4495 if (lwp->stopped
4496 && !lwp->status_pending_p
4497 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4498 {
4499 lwp->status_pending_p = 1;
4500
4501 if (debug_threads)
4502 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4503 "leaving status pending.\n",
4504 WSTOPSIG (lwp->status_pending),
4505 lwpid_of (thread));
4506 }
4507
4508 return;
4509 }
4510 }
4511
4512 /* No resume action for this thread. */
4513 lwp->resume = NULL;
4514 }
4515
4516 bool
4517 linux_process_target::resume_status_pending (thread_info *thread)
4518 {
4519 struct lwp_info *lwp = get_thread_lwp (thread);
4520
4521 /* LWPs which will not be resumed are not interesting, because
4522 we might not wait for them next time through linux_wait. */
4523 if (lwp->resume == NULL)
4524 return false;
4525
4526 return thread_still_has_status_pending (thread);
4527 }
4528
4529 bool
4530 linux_process_target::thread_needs_step_over (thread_info *thread)
4531 {
4532 struct lwp_info *lwp = get_thread_lwp (thread);
4533 struct thread_info *saved_thread;
4534 CORE_ADDR pc;
4535 struct process_info *proc = get_thread_process (thread);
4536
4537 /* GDBserver is skipping the extra traps from the wrapper program,
4538 don't have to do step over. */
4539 if (proc->tdesc == NULL)
4540 return false;
4541
4542 /* LWPs which will not be resumed are not interesting, because we
4543 might not wait for them next time through linux_wait. */
4544
4545 if (!lwp->stopped)
4546 {
4547 if (debug_threads)
4548 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4549 lwpid_of (thread));
4550 return false;
4551 }
4552
4553 if (thread->last_resume_kind == resume_stop)
4554 {
4555 if (debug_threads)
4556 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4557 " stopped\n",
4558 lwpid_of (thread));
4559 return false;
4560 }
4561
4562 gdb_assert (lwp->suspended >= 0);
4563
4564 if (lwp->suspended)
4565 {
4566 if (debug_threads)
4567 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4568 lwpid_of (thread));
4569 return false;
4570 }
4571
4572 if (lwp->status_pending_p)
4573 {
4574 if (debug_threads)
4575 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4576 " status.\n",
4577 lwpid_of (thread));
4578 return false;
4579 }
4580
4581 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4582 or we have. */
4583 pc = get_pc (lwp);
4584
4585 /* If the PC has changed since we stopped, then don't do anything,
4586 and let the breakpoint/tracepoint be hit. This happens if, for
4587 instance, GDB handled the decr_pc_after_break subtraction itself,
4588 GDB is OOL stepping this thread, or the user has issued a "jump"
4589 command, or poked thread's registers herself. */
4590 if (pc != lwp->stop_pc)
4591 {
4592 if (debug_threads)
4593 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4594 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4595 lwpid_of (thread),
4596 paddress (lwp->stop_pc), paddress (pc));
4597 return false;
4598 }
4599
4600 /* On software single step target, resume the inferior with signal
4601 rather than stepping over. */
4602 if (supports_software_single_step ()
4603 && lwp->pending_signals != NULL
4604 && lwp_signal_can_be_delivered (lwp))
4605 {
4606 if (debug_threads)
4607 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4608 " signals.\n",
4609 lwpid_of (thread));
4610
4611 return false;
4612 }
4613
4614 saved_thread = current_thread;
4615 current_thread = thread;
4616
4617 /* We can only step over breakpoints we know about. */
4618 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4619 {
4620 /* Don't step over a breakpoint that GDB expects to hit
4621 though. If the condition is being evaluated on the target's side
4622 and it evaluate to false, step over this breakpoint as well. */
4623 if (gdb_breakpoint_here (pc)
4624 && gdb_condition_true_at_breakpoint (pc)
4625 && gdb_no_commands_at_breakpoint (pc))
4626 {
4627 if (debug_threads)
4628 debug_printf ("Need step over [LWP %ld]? yes, but found"
4629 " GDB breakpoint at 0x%s; skipping step over\n",
4630 lwpid_of (thread), paddress (pc));
4631
4632 current_thread = saved_thread;
4633 return false;
4634 }
4635 else
4636 {
4637 if (debug_threads)
4638 debug_printf ("Need step over [LWP %ld]? yes, "
4639 "found breakpoint at 0x%s\n",
4640 lwpid_of (thread), paddress (pc));
4641
4642 /* We've found an lwp that needs stepping over --- return 1 so
4643 that find_thread stops looking. */
4644 current_thread = saved_thread;
4645
4646 return true;
4647 }
4648 }
4649
4650 current_thread = saved_thread;
4651
4652 if (debug_threads)
4653 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4654 " at 0x%s\n",
4655 lwpid_of (thread), paddress (pc));
4656
4657 return false;
4658 }
4659
4660 void
4661 linux_process_target::start_step_over (lwp_info *lwp)
4662 {
4663 struct thread_info *thread = get_lwp_thread (lwp);
4664 struct thread_info *saved_thread;
4665 CORE_ADDR pc;
4666 int step;
4667
4668 if (debug_threads)
4669 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4670 lwpid_of (thread));
4671
4672 stop_all_lwps (1, lwp);
4673
4674 if (lwp->suspended != 0)
4675 {
4676 internal_error (__FILE__, __LINE__,
4677 "LWP %ld suspended=%d\n", lwpid_of (thread),
4678 lwp->suspended);
4679 }
4680
4681 if (debug_threads)
4682 debug_printf ("Done stopping all threads for step-over.\n");
4683
4684 /* Note, we should always reach here with an already adjusted PC,
4685 either by GDB (if we're resuming due to GDB's request), or by our
4686 caller, if we just finished handling an internal breakpoint GDB
4687 shouldn't care about. */
4688 pc = get_pc (lwp);
4689
4690 saved_thread = current_thread;
4691 current_thread = thread;
4692
4693 lwp->bp_reinsert = pc;
4694 uninsert_breakpoints_at (pc);
4695 uninsert_fast_tracepoint_jumps_at (pc);
4696
4697 step = single_step (lwp);
4698
4699 current_thread = saved_thread;
4700
4701 resume_one_lwp (lwp, step, 0, NULL);
4702
4703 /* Require next event from this LWP. */
4704 step_over_bkpt = thread->id;
4705 }
4706
4707 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4708 start_step_over, if still there, and delete any single-step
4709 breakpoints we've set, on non hardware single-step targets. */
4710
4711 static int
4712 finish_step_over (struct lwp_info *lwp)
4713 {
4714 if (lwp->bp_reinsert != 0)
4715 {
4716 struct thread_info *saved_thread = current_thread;
4717
4718 if (debug_threads)
4719 debug_printf ("Finished step over.\n");
4720
4721 current_thread = get_lwp_thread (lwp);
4722
4723 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4724 may be no breakpoint to reinsert there by now. */
4725 reinsert_breakpoints_at (lwp->bp_reinsert);
4726 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4727
4728 lwp->bp_reinsert = 0;
4729
4730 /* Delete any single-step breakpoints. No longer needed. We
4731 don't have to worry about other threads hitting this trap,
4732 and later not being able to explain it, because we were
4733 stepping over a breakpoint, and we hold all threads but
4734 LWP stopped while doing that. */
4735 if (!can_hardware_single_step ())
4736 {
4737 gdb_assert (has_single_step_breakpoints (current_thread));
4738 delete_single_step_breakpoints (current_thread);
4739 }
4740
4741 step_over_bkpt = null_ptid;
4742 current_thread = saved_thread;
4743 return 1;
4744 }
4745 else
4746 return 0;
4747 }
4748
4749 void
4750 linux_process_target::complete_ongoing_step_over ()
4751 {
4752 if (step_over_bkpt != null_ptid)
4753 {
4754 struct lwp_info *lwp;
4755 int wstat;
4756 int ret;
4757
4758 if (debug_threads)
4759 debug_printf ("detach: step over in progress, finish it first\n");
4760
4761 /* Passing NULL_PTID as filter indicates we want all events to
4762 be left pending. Eventually this returns when there are no
4763 unwaited-for children left. */
4764 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4765 __WALL);
4766 gdb_assert (ret == -1);
4767
4768 lwp = find_lwp_pid (step_over_bkpt);
4769 if (lwp != NULL)
4770 finish_step_over (lwp);
4771 step_over_bkpt = null_ptid;
4772 unsuspend_all_lwps (lwp);
4773 }
4774 }
4775
4776 void
4777 linux_process_target::resume_one_thread (thread_info *thread,
4778 bool leave_all_stopped)
4779 {
4780 struct lwp_info *lwp = get_thread_lwp (thread);
4781 int leave_pending;
4782
4783 if (lwp->resume == NULL)
4784 return;
4785
4786 if (lwp->resume->kind == resume_stop)
4787 {
4788 if (debug_threads)
4789 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4790
4791 if (!lwp->stopped)
4792 {
4793 if (debug_threads)
4794 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4795
4796 /* Stop the thread, and wait for the event asynchronously,
4797 through the event loop. */
4798 send_sigstop (lwp);
4799 }
4800 else
4801 {
4802 if (debug_threads)
4803 debug_printf ("already stopped LWP %ld\n",
4804 lwpid_of (thread));
4805
4806 /* The LWP may have been stopped in an internal event that
4807 was not meant to be notified back to GDB (e.g., gdbserver
4808 breakpoint), so we should be reporting a stop event in
4809 this case too. */
4810
4811 /* If the thread already has a pending SIGSTOP, this is a
4812 no-op. Otherwise, something later will presumably resume
4813 the thread and this will cause it to cancel any pending
4814 operation, due to last_resume_kind == resume_stop. If
4815 the thread already has a pending status to report, we
4816 will still report it the next time we wait - see
4817 status_pending_p_callback. */
4818
4819 /* If we already have a pending signal to report, then
4820 there's no need to queue a SIGSTOP, as this means we're
4821 midway through moving the LWP out of the jumppad, and we
4822 will report the pending signal as soon as that is
4823 finished. */
4824 if (lwp->pending_signals_to_report == NULL)
4825 send_sigstop (lwp);
4826 }
4827
4828 /* For stop requests, we're done. */
4829 lwp->resume = NULL;
4830 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4831 return;
4832 }
4833
4834 /* If this thread which is about to be resumed has a pending status,
4835 then don't resume it - we can just report the pending status.
4836 Likewise if it is suspended, because e.g., another thread is
4837 stepping past a breakpoint. Make sure to queue any signals that
4838 would otherwise be sent. In all-stop mode, we do this decision
4839 based on if *any* thread has a pending status. If there's a
4840 thread that needs the step-over-breakpoint dance, then don't
4841 resume any other thread but that particular one. */
4842 leave_pending = (lwp->suspended
4843 || lwp->status_pending_p
4844 || leave_all_stopped);
4845
4846 /* If we have a new signal, enqueue the signal. */
4847 if (lwp->resume->sig != 0)
4848 {
4849 siginfo_t info, *info_p;
4850
4851 /* If this is the same signal we were previously stopped by,
4852 make sure to queue its siginfo. */
4853 if (WIFSTOPPED (lwp->last_status)
4854 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4855 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4856 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4857 info_p = &info;
4858 else
4859 info_p = NULL;
4860
4861 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4862 }
4863
4864 if (!leave_pending)
4865 {
4866 if (debug_threads)
4867 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4868
4869 proceed_one_lwp (thread, NULL);
4870 }
4871 else
4872 {
4873 if (debug_threads)
4874 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4875 }
4876
4877 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4878 lwp->resume = NULL;
4879 }
4880
4881 void
4882 linux_process_target::resume (thread_resume *resume_info, size_t n)
4883 {
4884 struct thread_info *need_step_over = NULL;
4885
4886 if (debug_threads)
4887 {
4888 debug_enter ();
4889 debug_printf ("linux_resume:\n");
4890 }
4891
4892 for_each_thread ([&] (thread_info *thread)
4893 {
4894 linux_set_resume_request (thread, resume_info, n);
4895 });
4896
4897 /* If there is a thread which would otherwise be resumed, which has
4898 a pending status, then don't resume any threads - we can just
4899 report the pending status. Make sure to queue any signals that
4900 would otherwise be sent. In non-stop mode, we'll apply this
4901 logic to each thread individually. We consume all pending events
4902 before considering to start a step-over (in all-stop). */
4903 bool any_pending = false;
4904 if (!non_stop)
4905 any_pending = find_thread ([this] (thread_info *thread)
4906 {
4907 return resume_status_pending (thread);
4908 }) != nullptr;
4909
4910 /* If there is a thread which would otherwise be resumed, which is
4911 stopped at a breakpoint that needs stepping over, then don't
4912 resume any threads - have it step over the breakpoint with all
4913 other threads stopped, then resume all threads again. Make sure
4914 to queue any signals that would otherwise be delivered or
4915 queued. */
4916 if (!any_pending && low_supports_breakpoints ())
4917 need_step_over = find_thread ([this] (thread_info *thread)
4918 {
4919 return thread_needs_step_over (thread);
4920 });
4921
4922 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4923
4924 if (debug_threads)
4925 {
4926 if (need_step_over != NULL)
4927 debug_printf ("Not resuming all, need step over\n");
4928 else if (any_pending)
4929 debug_printf ("Not resuming, all-stop and found "
4930 "an LWP with pending status\n");
4931 else
4932 debug_printf ("Resuming, no pending status or step over needed\n");
4933 }
4934
4935 /* Even if we're leaving threads stopped, queue all signals we'd
4936 otherwise deliver. */
4937 for_each_thread ([&] (thread_info *thread)
4938 {
4939 resume_one_thread (thread, leave_all_stopped);
4940 });
4941
4942 if (need_step_over)
4943 start_step_over (get_thread_lwp (need_step_over));
4944
4945 if (debug_threads)
4946 {
4947 debug_printf ("linux_resume done\n");
4948 debug_exit ();
4949 }
4950
4951 /* We may have events that were pending that can/should be sent to
4952 the client now. Trigger a linux_wait call. */
4953 if (target_is_async_p ())
4954 async_file_mark ();
4955 }
4956
4957 void
4958 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4959 {
4960 struct lwp_info *lwp = get_thread_lwp (thread);
4961 int step;
4962
4963 if (lwp == except)
4964 return;
4965
4966 if (debug_threads)
4967 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4968
4969 if (!lwp->stopped)
4970 {
4971 if (debug_threads)
4972 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4973 return;
4974 }
4975
4976 if (thread->last_resume_kind == resume_stop
4977 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4978 {
4979 if (debug_threads)
4980 debug_printf (" client wants LWP to remain %ld stopped\n",
4981 lwpid_of (thread));
4982 return;
4983 }
4984
4985 if (lwp->status_pending_p)
4986 {
4987 if (debug_threads)
4988 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4989 lwpid_of (thread));
4990 return;
4991 }
4992
4993 gdb_assert (lwp->suspended >= 0);
4994
4995 if (lwp->suspended)
4996 {
4997 if (debug_threads)
4998 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4999 return;
5000 }
5001
5002 if (thread->last_resume_kind == resume_stop
5003 && lwp->pending_signals_to_report == NULL
5004 && (lwp->collecting_fast_tracepoint
5005 == fast_tpoint_collect_result::not_collecting))
5006 {
5007 /* We haven't reported this LWP as stopped yet (otherwise, the
5008 last_status.kind check above would catch it, and we wouldn't
5009 reach here. This LWP may have been momentarily paused by a
5010 stop_all_lwps call while handling for example, another LWP's
5011 step-over. In that case, the pending expected SIGSTOP signal
5012 that was queued at vCont;t handling time will have already
5013 been consumed by wait_for_sigstop, and so we need to requeue
5014 another one here. Note that if the LWP already has a SIGSTOP
5015 pending, this is a no-op. */
5016
5017 if (debug_threads)
5018 debug_printf ("Client wants LWP %ld to stop. "
5019 "Making sure it has a SIGSTOP pending\n",
5020 lwpid_of (thread));
5021
5022 send_sigstop (lwp);
5023 }
5024
5025 if (thread->last_resume_kind == resume_step)
5026 {
5027 if (debug_threads)
5028 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5029 lwpid_of (thread));
5030
5031 /* If resume_step is requested by GDB, install single-step
5032 breakpoints when the thread is about to be actually resumed if
5033 the single-step breakpoints weren't removed. */
5034 if (supports_software_single_step ()
5035 && !has_single_step_breakpoints (thread))
5036 install_software_single_step_breakpoints (lwp);
5037
5038 step = maybe_hw_step (thread);
5039 }
5040 else if (lwp->bp_reinsert != 0)
5041 {
5042 if (debug_threads)
5043 debug_printf (" stepping LWP %ld, reinsert set\n",
5044 lwpid_of (thread));
5045
5046 step = maybe_hw_step (thread);
5047 }
5048 else
5049 step = 0;
5050
5051 resume_one_lwp (lwp, step, 0, NULL);
5052 }
5053
5054 void
5055 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5056 lwp_info *except)
5057 {
5058 struct lwp_info *lwp = get_thread_lwp (thread);
5059
5060 if (lwp == except)
5061 return;
5062
5063 lwp_suspended_decr (lwp);
5064
5065 proceed_one_lwp (thread, except);
5066 }
5067
5068 void
5069 linux_process_target::proceed_all_lwps ()
5070 {
5071 struct thread_info *need_step_over;
5072
5073 /* If there is a thread which would otherwise be resumed, which is
5074 stopped at a breakpoint that needs stepping over, then don't
5075 resume any threads - have it step over the breakpoint with all
5076 other threads stopped, then resume all threads again. */
5077
5078 if (low_supports_breakpoints ())
5079 {
5080 need_step_over = find_thread ([this] (thread_info *thread)
5081 {
5082 return thread_needs_step_over (thread);
5083 });
5084
5085 if (need_step_over != NULL)
5086 {
5087 if (debug_threads)
5088 debug_printf ("proceed_all_lwps: found "
5089 "thread %ld needing a step-over\n",
5090 lwpid_of (need_step_over));
5091
5092 start_step_over (get_thread_lwp (need_step_over));
5093 return;
5094 }
5095 }
5096
5097 if (debug_threads)
5098 debug_printf ("Proceeding, no step-over needed\n");
5099
5100 for_each_thread ([this] (thread_info *thread)
5101 {
5102 proceed_one_lwp (thread, NULL);
5103 });
5104 }
5105
5106 void
5107 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5108 {
5109 if (debug_threads)
5110 {
5111 debug_enter ();
5112 if (except)
5113 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5114 lwpid_of (get_lwp_thread (except)));
5115 else
5116 debug_printf ("unstopping all lwps\n");
5117 }
5118
5119 if (unsuspend)
5120 for_each_thread ([&] (thread_info *thread)
5121 {
5122 unsuspend_and_proceed_one_lwp (thread, except);
5123 });
5124 else
5125 for_each_thread ([&] (thread_info *thread)
5126 {
5127 proceed_one_lwp (thread, except);
5128 });
5129
5130 if (debug_threads)
5131 {
5132 debug_printf ("unstop_all_lwps done\n");
5133 debug_exit ();
5134 }
5135 }
5136
5137
5138 #ifdef HAVE_LINUX_REGSETS
5139
5140 #define use_linux_regsets 1
5141
5142 /* Returns true if REGSET has been disabled. */
5143
5144 static int
5145 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5146 {
5147 return (info->disabled_regsets != NULL
5148 && info->disabled_regsets[regset - info->regsets]);
5149 }
5150
5151 /* Disable REGSET. */
5152
5153 static void
5154 disable_regset (struct regsets_info *info, struct regset_info *regset)
5155 {
5156 int dr_offset;
5157
5158 dr_offset = regset - info->regsets;
5159 if (info->disabled_regsets == NULL)
5160 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5161 info->disabled_regsets[dr_offset] = 1;
5162 }
5163
5164 static int
5165 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5166 struct regcache *regcache)
5167 {
5168 struct regset_info *regset;
5169 int saw_general_regs = 0;
5170 int pid;
5171 struct iovec iov;
5172
5173 pid = lwpid_of (current_thread);
5174 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5175 {
5176 void *buf, *data;
5177 int nt_type, res;
5178
5179 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5180 continue;
5181
5182 buf = xmalloc (regset->size);
5183
5184 nt_type = regset->nt_type;
5185 if (nt_type)
5186 {
5187 iov.iov_base = buf;
5188 iov.iov_len = regset->size;
5189 data = (void *) &iov;
5190 }
5191 else
5192 data = buf;
5193
5194 #ifndef __sparc__
5195 res = ptrace (regset->get_request, pid,
5196 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5197 #else
5198 res = ptrace (regset->get_request, pid, data, nt_type);
5199 #endif
5200 if (res < 0)
5201 {
5202 if (errno == EIO
5203 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5204 {
5205 /* If we get EIO on a regset, or an EINVAL and the regset is
5206 optional, do not try it again for this process mode. */
5207 disable_regset (regsets_info, regset);
5208 }
5209 else if (errno == ENODATA)
5210 {
5211 /* ENODATA may be returned if the regset is currently
5212 not "active". This can happen in normal operation,
5213 so suppress the warning in this case. */
5214 }
5215 else if (errno == ESRCH)
5216 {
5217 /* At this point, ESRCH should mean the process is
5218 already gone, in which case we simply ignore attempts
5219 to read its registers. */
5220 }
5221 else
5222 {
5223 char s[256];
5224 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5225 pid);
5226 perror (s);
5227 }
5228 }
5229 else
5230 {
5231 if (regset->type == GENERAL_REGS)
5232 saw_general_regs = 1;
5233 regset->store_function (regcache, buf);
5234 }
5235 free (buf);
5236 }
5237 if (saw_general_regs)
5238 return 0;
5239 else
5240 return 1;
5241 }
5242
5243 static int
5244 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5245 struct regcache *regcache)
5246 {
5247 struct regset_info *regset;
5248 int saw_general_regs = 0;
5249 int pid;
5250 struct iovec iov;
5251
5252 pid = lwpid_of (current_thread);
5253 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5254 {
5255 void *buf, *data;
5256 int nt_type, res;
5257
5258 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5259 || regset->fill_function == NULL)
5260 continue;
5261
5262 buf = xmalloc (regset->size);
5263
5264 /* First fill the buffer with the current register set contents,
5265 in case there are any items in the kernel's regset that are
5266 not in gdbserver's regcache. */
5267
5268 nt_type = regset->nt_type;
5269 if (nt_type)
5270 {
5271 iov.iov_base = buf;
5272 iov.iov_len = regset->size;
5273 data = (void *) &iov;
5274 }
5275 else
5276 data = buf;
5277
5278 #ifndef __sparc__
5279 res = ptrace (regset->get_request, pid,
5280 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5281 #else
5282 res = ptrace (regset->get_request, pid, data, nt_type);
5283 #endif
5284
5285 if (res == 0)
5286 {
5287 /* Then overlay our cached registers on that. */
5288 regset->fill_function (regcache, buf);
5289
5290 /* Only now do we write the register set. */
5291 #ifndef __sparc__
5292 res = ptrace (regset->set_request, pid,
5293 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5294 #else
5295 res = ptrace (regset->set_request, pid, data, nt_type);
5296 #endif
5297 }
5298
5299 if (res < 0)
5300 {
5301 if (errno == EIO
5302 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5303 {
5304 /* If we get EIO on a regset, or an EINVAL and the regset is
5305 optional, do not try it again for this process mode. */
5306 disable_regset (regsets_info, regset);
5307 }
5308 else if (errno == ESRCH)
5309 {
5310 /* At this point, ESRCH should mean the process is
5311 already gone, in which case we simply ignore attempts
5312 to change its registers. See also the related
5313 comment in resume_one_lwp. */
5314 free (buf);
5315 return 0;
5316 }
5317 else
5318 {
5319 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5320 }
5321 }
5322 else if (regset->type == GENERAL_REGS)
5323 saw_general_regs = 1;
5324 free (buf);
5325 }
5326 if (saw_general_regs)
5327 return 0;
5328 else
5329 return 1;
5330 }
5331
5332 #else /* !HAVE_LINUX_REGSETS */
5333
5334 #define use_linux_regsets 0
5335 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5336 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5337
5338 #endif
5339
5340 /* Return 1 if register REGNO is supported by one of the regset ptrace
5341 calls or 0 if it has to be transferred individually. */
5342
5343 static int
5344 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5345 {
5346 unsigned char mask = 1 << (regno % 8);
5347 size_t index = regno / 8;
5348
5349 return (use_linux_regsets
5350 && (regs_info->regset_bitmap == NULL
5351 || (regs_info->regset_bitmap[index] & mask) != 0));
5352 }
5353
5354 #ifdef HAVE_LINUX_USRREGS
5355
5356 static int
5357 register_addr (const struct usrregs_info *usrregs, int regnum)
5358 {
5359 int addr;
5360
5361 if (regnum < 0 || regnum >= usrregs->num_regs)
5362 error ("Invalid register number %d.", regnum);
5363
5364 addr = usrregs->regmap[regnum];
5365
5366 return addr;
5367 }
5368
5369
5370 void
5371 linux_process_target::fetch_register (const usrregs_info *usrregs,
5372 regcache *regcache, int regno)
5373 {
5374 CORE_ADDR regaddr;
5375 int i, size;
5376 char *buf;
5377 int pid;
5378
5379 if (regno >= usrregs->num_regs)
5380 return;
5381 if (low_cannot_fetch_register (regno))
5382 return;
5383
5384 regaddr = register_addr (usrregs, regno);
5385 if (regaddr == -1)
5386 return;
5387
5388 size = ((register_size (regcache->tdesc, regno)
5389 + sizeof (PTRACE_XFER_TYPE) - 1)
5390 & -sizeof (PTRACE_XFER_TYPE));
5391 buf = (char *) alloca (size);
5392
5393 pid = lwpid_of (current_thread);
5394 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5395 {
5396 errno = 0;
5397 *(PTRACE_XFER_TYPE *) (buf + i) =
5398 ptrace (PTRACE_PEEKUSER, pid,
5399 /* Coerce to a uintptr_t first to avoid potential gcc warning
5400 of coercing an 8 byte integer to a 4 byte pointer. */
5401 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5402 regaddr += sizeof (PTRACE_XFER_TYPE);
5403 if (errno != 0)
5404 {
5405 /* Mark register REGNO unavailable. */
5406 supply_register (regcache, regno, NULL);
5407 return;
5408 }
5409 }
5410
5411 low_supply_ptrace_register (regcache, regno, buf);
5412 }
5413
5414 void
5415 linux_process_target::store_register (const usrregs_info *usrregs,
5416 regcache *regcache, int regno)
5417 {
5418 CORE_ADDR regaddr;
5419 int i, size;
5420 char *buf;
5421 int pid;
5422
5423 if (regno >= usrregs->num_regs)
5424 return;
5425 if (low_cannot_store_register (regno))
5426 return;
5427
5428 regaddr = register_addr (usrregs, regno);
5429 if (regaddr == -1)
5430 return;
5431
5432 size = ((register_size (regcache->tdesc, regno)
5433 + sizeof (PTRACE_XFER_TYPE) - 1)
5434 & -sizeof (PTRACE_XFER_TYPE));
5435 buf = (char *) alloca (size);
5436 memset (buf, 0, size);
5437
5438 low_collect_ptrace_register (regcache, regno, buf);
5439
5440 pid = lwpid_of (current_thread);
5441 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5442 {
5443 errno = 0;
5444 ptrace (PTRACE_POKEUSER, pid,
5445 /* Coerce to a uintptr_t first to avoid potential gcc warning
5446 about coercing an 8 byte integer to a 4 byte pointer. */
5447 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5448 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5449 if (errno != 0)
5450 {
5451 /* At this point, ESRCH should mean the process is
5452 already gone, in which case we simply ignore attempts
5453 to change its registers. See also the related
5454 comment in resume_one_lwp. */
5455 if (errno == ESRCH)
5456 return;
5457
5458
5459 if (!low_cannot_store_register (regno))
5460 error ("writing register %d: %s", regno, safe_strerror (errno));
5461 }
5462 regaddr += sizeof (PTRACE_XFER_TYPE);
5463 }
5464 }
5465 #endif /* HAVE_LINUX_USRREGS */
5466
5467 void
5468 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5469 int regno, char *buf)
5470 {
5471 collect_register (regcache, regno, buf);
5472 }
5473
5474 void
5475 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5476 int regno, const char *buf)
5477 {
5478 supply_register (regcache, regno, buf);
5479 }
5480
5481 void
5482 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5483 regcache *regcache,
5484 int regno, int all)
5485 {
5486 #ifdef HAVE_LINUX_USRREGS
5487 struct usrregs_info *usr = regs_info->usrregs;
5488
5489 if (regno == -1)
5490 {
5491 for (regno = 0; regno < usr->num_regs; regno++)
5492 if (all || !linux_register_in_regsets (regs_info, regno))
5493 fetch_register (usr, regcache, regno);
5494 }
5495 else
5496 fetch_register (usr, regcache, regno);
5497 #endif
5498 }
5499
5500 void
5501 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5502 regcache *regcache,
5503 int regno, int all)
5504 {
5505 #ifdef HAVE_LINUX_USRREGS
5506 struct usrregs_info *usr = regs_info->usrregs;
5507
5508 if (regno == -1)
5509 {
5510 for (regno = 0; regno < usr->num_regs; regno++)
5511 if (all || !linux_register_in_regsets (regs_info, regno))
5512 store_register (usr, regcache, regno);
5513 }
5514 else
5515 store_register (usr, regcache, regno);
5516 #endif
5517 }
5518
5519 void
5520 linux_process_target::fetch_registers (regcache *regcache, int regno)
5521 {
5522 int use_regsets;
5523 int all = 0;
5524 const regs_info *regs_info = get_regs_info ();
5525
5526 if (regno == -1)
5527 {
5528 if (regs_info->usrregs != NULL)
5529 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5530 low_fetch_register (regcache, regno);
5531
5532 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5533 if (regs_info->usrregs != NULL)
5534 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5535 }
5536 else
5537 {
5538 if (low_fetch_register (regcache, regno))
5539 return;
5540
5541 use_regsets = linux_register_in_regsets (regs_info, regno);
5542 if (use_regsets)
5543 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5544 regcache);
5545 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5546 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5547 }
5548 }
5549
5550 void
5551 linux_process_target::store_registers (regcache *regcache, int regno)
5552 {
5553 int use_regsets;
5554 int all = 0;
5555 const regs_info *regs_info = get_regs_info ();
5556
5557 if (regno == -1)
5558 {
5559 all = regsets_store_inferior_registers (regs_info->regsets_info,
5560 regcache);
5561 if (regs_info->usrregs != NULL)
5562 usr_store_inferior_registers (regs_info, regcache, regno, all);
5563 }
5564 else
5565 {
5566 use_regsets = linux_register_in_regsets (regs_info, regno);
5567 if (use_regsets)
5568 all = regsets_store_inferior_registers (regs_info->regsets_info,
5569 regcache);
5570 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5571 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5572 }
5573 }
5574
5575 bool
5576 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5577 {
5578 return false;
5579 }
5580
5581 /* A wrapper for the read_memory target op. */
5582
5583 static int
5584 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5585 {
5586 return the_target->read_memory (memaddr, myaddr, len);
5587 }
5588
5589 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5590 to debugger memory starting at MYADDR. */
5591
5592 int
5593 linux_process_target::read_memory (CORE_ADDR memaddr,
5594 unsigned char *myaddr, int len)
5595 {
5596 int pid = lwpid_of (current_thread);
5597 PTRACE_XFER_TYPE *buffer;
5598 CORE_ADDR addr;
5599 int count;
5600 char filename[64];
5601 int i;
5602 int ret;
5603 int fd;
5604
5605 /* Try using /proc. Don't bother for one word. */
5606 if (len >= 3 * sizeof (long))
5607 {
5608 int bytes;
5609
5610 /* We could keep this file open and cache it - possibly one per
5611 thread. That requires some juggling, but is even faster. */
5612 sprintf (filename, "/proc/%d/mem", pid);
5613 fd = open (filename, O_RDONLY | O_LARGEFILE);
5614 if (fd == -1)
5615 goto no_proc;
5616
5617 /* If pread64 is available, use it. It's faster if the kernel
5618 supports it (only one syscall), and it's 64-bit safe even on
5619 32-bit platforms (for instance, SPARC debugging a SPARC64
5620 application). */
5621 #ifdef HAVE_PREAD64
5622 bytes = pread64 (fd, myaddr, len, memaddr);
5623 #else
5624 bytes = -1;
5625 if (lseek (fd, memaddr, SEEK_SET) != -1)
5626 bytes = read (fd, myaddr, len);
5627 #endif
5628
5629 close (fd);
5630 if (bytes == len)
5631 return 0;
5632
5633 /* Some data was read, we'll try to get the rest with ptrace. */
5634 if (bytes > 0)
5635 {
5636 memaddr += bytes;
5637 myaddr += bytes;
5638 len -= bytes;
5639 }
5640 }
5641
5642 no_proc:
5643 /* Round starting address down to longword boundary. */
5644 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5645 /* Round ending address up; get number of longwords that makes. */
5646 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5647 / sizeof (PTRACE_XFER_TYPE));
5648 /* Allocate buffer of that many longwords. */
5649 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5650
5651 /* Read all the longwords */
5652 errno = 0;
5653 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5654 {
5655 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5656 about coercing an 8 byte integer to a 4 byte pointer. */
5657 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5658 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5659 (PTRACE_TYPE_ARG4) 0);
5660 if (errno)
5661 break;
5662 }
5663 ret = errno;
5664
5665 /* Copy appropriate bytes out of the buffer. */
5666 if (i > 0)
5667 {
5668 i *= sizeof (PTRACE_XFER_TYPE);
5669 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5670 memcpy (myaddr,
5671 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5672 i < len ? i : len);
5673 }
5674
5675 return ret;
5676 }
5677
5678 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5679 memory at MEMADDR. On failure (cannot write to the inferior)
5680 returns the value of errno. Always succeeds if LEN is zero. */
5681
5682 int
5683 linux_process_target::write_memory (CORE_ADDR memaddr,
5684 const unsigned char *myaddr, int len)
5685 {
5686 int i;
5687 /* Round starting address down to longword boundary. */
5688 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5689 /* Round ending address up; get number of longwords that makes. */
5690 int count
5691 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5692 / sizeof (PTRACE_XFER_TYPE);
5693
5694 /* Allocate buffer of that many longwords. */
5695 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5696
5697 int pid = lwpid_of (current_thread);
5698
5699 if (len == 0)
5700 {
5701 /* Zero length write always succeeds. */
5702 return 0;
5703 }
5704
5705 if (debug_threads)
5706 {
5707 /* Dump up to four bytes. */
5708 char str[4 * 2 + 1];
5709 char *p = str;
5710 int dump = len < 4 ? len : 4;
5711
5712 for (i = 0; i < dump; i++)
5713 {
5714 sprintf (p, "%02x", myaddr[i]);
5715 p += 2;
5716 }
5717 *p = '\0';
5718
5719 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5720 str, (long) memaddr, pid);
5721 }
5722
5723 /* Fill start and end extra bytes of buffer with existing memory data. */
5724
5725 errno = 0;
5726 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5727 about coercing an 8 byte integer to a 4 byte pointer. */
5728 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5729 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5730 (PTRACE_TYPE_ARG4) 0);
5731 if (errno)
5732 return errno;
5733
5734 if (count > 1)
5735 {
5736 errno = 0;
5737 buffer[count - 1]
5738 = ptrace (PTRACE_PEEKTEXT, pid,
5739 /* Coerce to a uintptr_t first to avoid potential gcc warning
5740 about coercing an 8 byte integer to a 4 byte pointer. */
5741 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5742 * sizeof (PTRACE_XFER_TYPE)),
5743 (PTRACE_TYPE_ARG4) 0);
5744 if (errno)
5745 return errno;
5746 }
5747
5748 /* Copy data to be written over corresponding part of buffer. */
5749
5750 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5751 myaddr, len);
5752
5753 /* Write the entire buffer. */
5754
5755 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5756 {
5757 errno = 0;
5758 ptrace (PTRACE_POKETEXT, pid,
5759 /* Coerce to a uintptr_t first to avoid potential gcc warning
5760 about coercing an 8 byte integer to a 4 byte pointer. */
5761 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5762 (PTRACE_TYPE_ARG4) buffer[i]);
5763 if (errno)
5764 return errno;
5765 }
5766
5767 return 0;
5768 }
5769
5770 void
5771 linux_process_target::look_up_symbols ()
5772 {
5773 #ifdef USE_THREAD_DB
5774 struct process_info *proc = current_process ();
5775
5776 if (proc->priv->thread_db != NULL)
5777 return;
5778
5779 thread_db_init ();
5780 #endif
5781 }
5782
5783 void
5784 linux_process_target::request_interrupt ()
5785 {
5786 /* Send a SIGINT to the process group. This acts just like the user
5787 typed a ^C on the controlling terminal. */
5788 ::kill (-signal_pid, SIGINT);
5789 }
5790
5791 bool
5792 linux_process_target::supports_read_auxv ()
5793 {
5794 return true;
5795 }
5796
5797 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5798 to debugger memory starting at MYADDR. */
5799
5800 int
5801 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5802 unsigned int len)
5803 {
5804 char filename[PATH_MAX];
5805 int fd, n;
5806 int pid = lwpid_of (current_thread);
5807
5808 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5809
5810 fd = open (filename, O_RDONLY);
5811 if (fd < 0)
5812 return -1;
5813
5814 if (offset != (CORE_ADDR) 0
5815 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5816 n = -1;
5817 else
5818 n = read (fd, myaddr, len);
5819
5820 close (fd);
5821
5822 return n;
5823 }
5824
5825 int
5826 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5827 int size, raw_breakpoint *bp)
5828 {
5829 if (type == raw_bkpt_type_sw)
5830 return insert_memory_breakpoint (bp);
5831 else
5832 return low_insert_point (type, addr, size, bp);
5833 }
5834
5835 int
5836 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5837 int size, raw_breakpoint *bp)
5838 {
5839 /* Unsupported (see target.h). */
5840 return 1;
5841 }
5842
5843 int
5844 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5845 int size, raw_breakpoint *bp)
5846 {
5847 if (type == raw_bkpt_type_sw)
5848 return remove_memory_breakpoint (bp);
5849 else
5850 return low_remove_point (type, addr, size, bp);
5851 }
5852
5853 int
5854 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5855 int size, raw_breakpoint *bp)
5856 {
5857 /* Unsupported (see target.h). */
5858 return 1;
5859 }
5860
5861 /* Implement the stopped_by_sw_breakpoint target_ops
5862 method. */
5863
5864 bool
5865 linux_process_target::stopped_by_sw_breakpoint ()
5866 {
5867 struct lwp_info *lwp = get_thread_lwp (current_thread);
5868
5869 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5870 }
5871
5872 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5873 method. */
5874
5875 bool
5876 linux_process_target::supports_stopped_by_sw_breakpoint ()
5877 {
5878 return USE_SIGTRAP_SIGINFO;
5879 }
5880
5881 /* Implement the stopped_by_hw_breakpoint target_ops
5882 method. */
5883
5884 bool
5885 linux_process_target::stopped_by_hw_breakpoint ()
5886 {
5887 struct lwp_info *lwp = get_thread_lwp (current_thread);
5888
5889 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5890 }
5891
5892 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5893 method. */
5894
5895 bool
5896 linux_process_target::supports_stopped_by_hw_breakpoint ()
5897 {
5898 return USE_SIGTRAP_SIGINFO;
5899 }
5900
5901 /* Implement the supports_hardware_single_step target_ops method. */
5902
5903 bool
5904 linux_process_target::supports_hardware_single_step ()
5905 {
5906 return can_hardware_single_step ();
5907 }
5908
5909 bool
5910 linux_process_target::stopped_by_watchpoint ()
5911 {
5912 struct lwp_info *lwp = get_thread_lwp (current_thread);
5913
5914 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5915 }
5916
5917 CORE_ADDR
5918 linux_process_target::stopped_data_address ()
5919 {
5920 struct lwp_info *lwp = get_thread_lwp (current_thread);
5921
5922 return lwp->stopped_data_address;
5923 }
5924
5925 /* This is only used for targets that define PT_TEXT_ADDR,
5926 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5927 the target has different ways of acquiring this information, like
5928 loadmaps. */
5929
5930 bool
5931 linux_process_target::supports_read_offsets ()
5932 {
5933 #ifdef SUPPORTS_READ_OFFSETS
5934 return true;
5935 #else
5936 return false;
5937 #endif
5938 }
5939
5940 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5941 to tell gdb about. */
5942
5943 int
5944 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5945 {
5946 #ifdef SUPPORTS_READ_OFFSETS
5947 unsigned long text, text_end, data;
5948 int pid = lwpid_of (current_thread);
5949
5950 errno = 0;
5951
5952 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5953 (PTRACE_TYPE_ARG4) 0);
5954 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5955 (PTRACE_TYPE_ARG4) 0);
5956 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5957 (PTRACE_TYPE_ARG4) 0);
5958
5959 if (errno == 0)
5960 {
5961 /* Both text and data offsets produced at compile-time (and so
5962 used by gdb) are relative to the beginning of the program,
5963 with the data segment immediately following the text segment.
5964 However, the actual runtime layout in memory may put the data
5965 somewhere else, so when we send gdb a data base-address, we
5966 use the real data base address and subtract the compile-time
5967 data base-address from it (which is just the length of the
5968 text segment). BSS immediately follows data in both
5969 cases. */
5970 *text_p = text;
5971 *data_p = data - (text_end - text);
5972
5973 return 1;
5974 }
5975 return 0;
5976 #else
5977 gdb_assert_not_reached ("target op read_offsets not supported");
5978 #endif
5979 }
5980
5981 bool
5982 linux_process_target::supports_get_tls_address ()
5983 {
5984 #ifdef USE_THREAD_DB
5985 return true;
5986 #else
5987 return false;
5988 #endif
5989 }
5990
5991 int
5992 linux_process_target::get_tls_address (thread_info *thread,
5993 CORE_ADDR offset,
5994 CORE_ADDR load_module,
5995 CORE_ADDR *address)
5996 {
5997 #ifdef USE_THREAD_DB
5998 return thread_db_get_tls_address (thread, offset, load_module, address);
5999 #else
6000 return -1;
6001 #endif
6002 }
6003
6004 bool
6005 linux_process_target::supports_qxfer_osdata ()
6006 {
6007 return true;
6008 }
6009
6010 int
6011 linux_process_target::qxfer_osdata (const char *annex,
6012 unsigned char *readbuf,
6013 unsigned const char *writebuf,
6014 CORE_ADDR offset, int len)
6015 {
6016 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6017 }
6018
6019 void
6020 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
6021 gdb_byte *inf_siginfo, int direction)
6022 {
6023 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
6024
6025 /* If there was no callback, or the callback didn't do anything,
6026 then just do a straight memcpy. */
6027 if (!done)
6028 {
6029 if (direction == 1)
6030 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6031 else
6032 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6033 }
6034 }
6035
6036 bool
6037 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
6038 int direction)
6039 {
6040 return false;
6041 }
6042
6043 bool
6044 linux_process_target::supports_qxfer_siginfo ()
6045 {
6046 return true;
6047 }
6048
6049 int
6050 linux_process_target::qxfer_siginfo (const char *annex,
6051 unsigned char *readbuf,
6052 unsigned const char *writebuf,
6053 CORE_ADDR offset, int len)
6054 {
6055 int pid;
6056 siginfo_t siginfo;
6057 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6058
6059 if (current_thread == NULL)
6060 return -1;
6061
6062 pid = lwpid_of (current_thread);
6063
6064 if (debug_threads)
6065 debug_printf ("%s siginfo for lwp %d.\n",
6066 readbuf != NULL ? "Reading" : "Writing",
6067 pid);
6068
6069 if (offset >= sizeof (siginfo))
6070 return -1;
6071
6072 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6073 return -1;
6074
6075 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6076 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6077 inferior with a 64-bit GDBSERVER should look the same as debugging it
6078 with a 32-bit GDBSERVER, we need to convert it. */
6079 siginfo_fixup (&siginfo, inf_siginfo, 0);
6080
6081 if (offset + len > sizeof (siginfo))
6082 len = sizeof (siginfo) - offset;
6083
6084 if (readbuf != NULL)
6085 memcpy (readbuf, inf_siginfo + offset, len);
6086 else
6087 {
6088 memcpy (inf_siginfo + offset, writebuf, len);
6089
6090 /* Convert back to ptrace layout before flushing it out. */
6091 siginfo_fixup (&siginfo, inf_siginfo, 1);
6092
6093 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6094 return -1;
6095 }
6096
6097 return len;
6098 }
6099
6100 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6101 so we notice when children change state; as the handler for the
6102 sigsuspend in my_waitpid. */
6103
6104 static void
6105 sigchld_handler (int signo)
6106 {
6107 int old_errno = errno;
6108
6109 if (debug_threads)
6110 {
6111 do
6112 {
6113 /* Use the async signal safe debug function. */
6114 if (debug_write ("sigchld_handler\n",
6115 sizeof ("sigchld_handler\n") - 1) < 0)
6116 break; /* just ignore */
6117 } while (0);
6118 }
6119
6120 if (target_is_async_p ())
6121 async_file_mark (); /* trigger a linux_wait */
6122
6123 errno = old_errno;
6124 }
6125
6126 bool
6127 linux_process_target::supports_non_stop ()
6128 {
6129 return true;
6130 }
6131
6132 bool
6133 linux_process_target::async (bool enable)
6134 {
6135 bool previous = target_is_async_p ();
6136
6137 if (debug_threads)
6138 debug_printf ("linux_async (%d), previous=%d\n",
6139 enable, previous);
6140
6141 if (previous != enable)
6142 {
6143 sigset_t mask;
6144 sigemptyset (&mask);
6145 sigaddset (&mask, SIGCHLD);
6146
6147 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6148
6149 if (enable)
6150 {
6151 if (pipe (linux_event_pipe) == -1)
6152 {
6153 linux_event_pipe[0] = -1;
6154 linux_event_pipe[1] = -1;
6155 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6156
6157 warning ("creating event pipe failed.");
6158 return previous;
6159 }
6160
6161 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6162 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6163
6164 /* Register the event loop handler. */
6165 add_file_handler (linux_event_pipe[0],
6166 handle_target_event, NULL);
6167
6168 /* Always trigger a linux_wait. */
6169 async_file_mark ();
6170 }
6171 else
6172 {
6173 delete_file_handler (linux_event_pipe[0]);
6174
6175 close (linux_event_pipe[0]);
6176 close (linux_event_pipe[1]);
6177 linux_event_pipe[0] = -1;
6178 linux_event_pipe[1] = -1;
6179 }
6180
6181 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6182 }
6183
6184 return previous;
6185 }
6186
6187 int
6188 linux_process_target::start_non_stop (bool nonstop)
6189 {
6190 /* Register or unregister from event-loop accordingly. */
6191 target_async (nonstop);
6192
6193 if (target_is_async_p () != (nonstop != false))
6194 return -1;
6195
6196 return 0;
6197 }
6198
6199 bool
6200 linux_process_target::supports_multi_process ()
6201 {
6202 return true;
6203 }
6204
6205 /* Check if fork events are supported. */
6206
6207 bool
6208 linux_process_target::supports_fork_events ()
6209 {
6210 return linux_supports_tracefork ();
6211 }
6212
6213 /* Check if vfork events are supported. */
6214
6215 bool
6216 linux_process_target::supports_vfork_events ()
6217 {
6218 return linux_supports_tracefork ();
6219 }
6220
6221 /* Check if exec events are supported. */
6222
6223 bool
6224 linux_process_target::supports_exec_events ()
6225 {
6226 return linux_supports_traceexec ();
6227 }
6228
6229 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6230 ptrace flags for all inferiors. This is in case the new GDB connection
6231 doesn't support the same set of events that the previous one did. */
6232
6233 void
6234 linux_process_target::handle_new_gdb_connection ()
6235 {
6236 /* Request that all the lwps reset their ptrace options. */
6237 for_each_thread ([] (thread_info *thread)
6238 {
6239 struct lwp_info *lwp = get_thread_lwp (thread);
6240
6241 if (!lwp->stopped)
6242 {
6243 /* Stop the lwp so we can modify its ptrace options. */
6244 lwp->must_set_ptrace_flags = 1;
6245 linux_stop_lwp (lwp);
6246 }
6247 else
6248 {
6249 /* Already stopped; go ahead and set the ptrace options. */
6250 struct process_info *proc = find_process_pid (pid_of (thread));
6251 int options = linux_low_ptrace_options (proc->attached);
6252
6253 linux_enable_event_reporting (lwpid_of (thread), options);
6254 lwp->must_set_ptrace_flags = 0;
6255 }
6256 });
6257 }
6258
6259 int
6260 linux_process_target::handle_monitor_command (char *mon)
6261 {
6262 #ifdef USE_THREAD_DB
6263 return thread_db_handle_monitor_command (mon);
6264 #else
6265 return 0;
6266 #endif
6267 }
6268
6269 int
6270 linux_process_target::core_of_thread (ptid_t ptid)
6271 {
6272 return linux_common_core_of_thread (ptid);
6273 }
6274
6275 bool
6276 linux_process_target::supports_disable_randomization ()
6277 {
6278 #ifdef HAVE_PERSONALITY
6279 return true;
6280 #else
6281 return false;
6282 #endif
6283 }
6284
6285 bool
6286 linux_process_target::supports_agent ()
6287 {
6288 return true;
6289 }
6290
6291 bool
6292 linux_process_target::supports_range_stepping ()
6293 {
6294 if (supports_software_single_step ())
6295 return true;
6296 if (*the_low_target.supports_range_stepping == NULL)
6297 return false;
6298
6299 return (*the_low_target.supports_range_stepping) ();
6300 }
6301
6302 bool
6303 linux_process_target::supports_pid_to_exec_file ()
6304 {
6305 return true;
6306 }
6307
6308 char *
6309 linux_process_target::pid_to_exec_file (int pid)
6310 {
6311 return linux_proc_pid_to_exec_file (pid);
6312 }
6313
6314 bool
6315 linux_process_target::supports_multifs ()
6316 {
6317 return true;
6318 }
6319
6320 int
6321 linux_process_target::multifs_open (int pid, const char *filename,
6322 int flags, mode_t mode)
6323 {
6324 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6325 }
6326
6327 int
6328 linux_process_target::multifs_unlink (int pid, const char *filename)
6329 {
6330 return linux_mntns_unlink (pid, filename);
6331 }
6332
6333 ssize_t
6334 linux_process_target::multifs_readlink (int pid, const char *filename,
6335 char *buf, size_t bufsiz)
6336 {
6337 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6338 }
6339
6340 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6341 struct target_loadseg
6342 {
6343 /* Core address to which the segment is mapped. */
6344 Elf32_Addr addr;
6345 /* VMA recorded in the program header. */
6346 Elf32_Addr p_vaddr;
6347 /* Size of this segment in memory. */
6348 Elf32_Word p_memsz;
6349 };
6350
6351 # if defined PT_GETDSBT
6352 struct target_loadmap
6353 {
6354 /* Protocol version number, must be zero. */
6355 Elf32_Word version;
6356 /* Pointer to the DSBT table, its size, and the DSBT index. */
6357 unsigned *dsbt_table;
6358 unsigned dsbt_size, dsbt_index;
6359 /* Number of segments in this map. */
6360 Elf32_Word nsegs;
6361 /* The actual memory map. */
6362 struct target_loadseg segs[/*nsegs*/];
6363 };
6364 # define LINUX_LOADMAP PT_GETDSBT
6365 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6366 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6367 # else
6368 struct target_loadmap
6369 {
6370 /* Protocol version number, must be zero. */
6371 Elf32_Half version;
6372 /* Number of segments in this map. */
6373 Elf32_Half nsegs;
6374 /* The actual memory map. */
6375 struct target_loadseg segs[/*nsegs*/];
6376 };
6377 # define LINUX_LOADMAP PTRACE_GETFDPIC
6378 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6379 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6380 # endif
6381
6382 bool
6383 linux_process_target::supports_read_loadmap ()
6384 {
6385 return true;
6386 }
6387
6388 int
6389 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6390 unsigned char *myaddr, unsigned int len)
6391 {
6392 int pid = lwpid_of (current_thread);
6393 int addr = -1;
6394 struct target_loadmap *data = NULL;
6395 unsigned int actual_length, copy_length;
6396
6397 if (strcmp (annex, "exec") == 0)
6398 addr = (int) LINUX_LOADMAP_EXEC;
6399 else if (strcmp (annex, "interp") == 0)
6400 addr = (int) LINUX_LOADMAP_INTERP;
6401 else
6402 return -1;
6403
6404 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6405 return -1;
6406
6407 if (data == NULL)
6408 return -1;
6409
6410 actual_length = sizeof (struct target_loadmap)
6411 + sizeof (struct target_loadseg) * data->nsegs;
6412
6413 if (offset < 0 || offset > actual_length)
6414 return -1;
6415
6416 copy_length = actual_length - offset < len ? actual_length - offset : len;
6417 memcpy (myaddr, (char *) data + offset, copy_length);
6418 return copy_length;
6419 }
6420 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6421
6422 void
6423 linux_process_target::process_qsupported (char **features, int count)
6424 {
6425 if (the_low_target.process_qsupported != NULL)
6426 the_low_target.process_qsupported (features, count);
6427 }
6428
6429 bool
6430 linux_process_target::supports_catch_syscall ()
6431 {
6432 return (the_low_target.get_syscall_trapinfo != NULL
6433 && linux_supports_tracesysgood ());
6434 }
6435
6436 int
6437 linux_process_target::get_ipa_tdesc_idx ()
6438 {
6439 if (the_low_target.get_ipa_tdesc_idx == NULL)
6440 return 0;
6441
6442 return (*the_low_target.get_ipa_tdesc_idx) ();
6443 }
6444
6445 bool
6446 linux_process_target::supports_tracepoints ()
6447 {
6448 if (*the_low_target.supports_tracepoints == NULL)
6449 return false;
6450
6451 return (*the_low_target.supports_tracepoints) ();
6452 }
6453
6454 CORE_ADDR
6455 linux_process_target::read_pc (regcache *regcache)
6456 {
6457 if (!low_supports_breakpoints ())
6458 return 0;
6459
6460 return low_get_pc (regcache);
6461 }
6462
6463 void
6464 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6465 {
6466 gdb_assert (low_supports_breakpoints ());
6467
6468 low_set_pc (regcache, pc);
6469 }
6470
6471 bool
6472 linux_process_target::supports_thread_stopped ()
6473 {
6474 return true;
6475 }
6476
6477 bool
6478 linux_process_target::thread_stopped (thread_info *thread)
6479 {
6480 return get_thread_lwp (thread)->stopped;
6481 }
6482
6483 /* This exposes stop-all-threads functionality to other modules. */
6484
6485 void
6486 linux_process_target::pause_all (bool freeze)
6487 {
6488 stop_all_lwps (freeze, NULL);
6489 }
6490
6491 /* This exposes unstop-all-threads functionality to other gdbserver
6492 modules. */
6493
6494 void
6495 linux_process_target::unpause_all (bool unfreeze)
6496 {
6497 unstop_all_lwps (unfreeze, NULL);
6498 }
6499
6500 int
6501 linux_process_target::prepare_to_access_memory ()
6502 {
6503 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6504 running LWP. */
6505 if (non_stop)
6506 target_pause_all (true);
6507 return 0;
6508 }
6509
6510 void
6511 linux_process_target::done_accessing_memory ()
6512 {
6513 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6514 running LWP. */
6515 if (non_stop)
6516 target_unpause_all (true);
6517 }
6518
6519 bool
6520 linux_process_target::supports_fast_tracepoints ()
6521 {
6522 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6523 }
6524
6525 int
6526 linux_process_target::install_fast_tracepoint_jump_pad
6527 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6528 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6529 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6530 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6531 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6532 char *err)
6533 {
6534 return (*the_low_target.install_fast_tracepoint_jump_pad)
6535 (tpoint, tpaddr, collector, lockaddr, orig_size,
6536 jump_entry, trampoline, trampoline_size,
6537 jjump_pad_insn, jjump_pad_insn_size,
6538 adjusted_insn_addr, adjusted_insn_addr_end,
6539 err);
6540 }
6541
6542 emit_ops *
6543 linux_process_target::emit_ops ()
6544 {
6545 if (the_low_target.emit_ops != NULL)
6546 return (*the_low_target.emit_ops) ();
6547 else
6548 return NULL;
6549 }
6550
6551 int
6552 linux_process_target::get_min_fast_tracepoint_insn_len ()
6553 {
6554 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6555 }
6556
6557 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6558
6559 static int
6560 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6561 CORE_ADDR *phdr_memaddr, int *num_phdr)
6562 {
6563 char filename[PATH_MAX];
6564 int fd;
6565 const int auxv_size = is_elf64
6566 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6567 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6568
6569 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6570
6571 fd = open (filename, O_RDONLY);
6572 if (fd < 0)
6573 return 1;
6574
6575 *phdr_memaddr = 0;
6576 *num_phdr = 0;
6577 while (read (fd, buf, auxv_size) == auxv_size
6578 && (*phdr_memaddr == 0 || *num_phdr == 0))
6579 {
6580 if (is_elf64)
6581 {
6582 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6583
6584 switch (aux->a_type)
6585 {
6586 case AT_PHDR:
6587 *phdr_memaddr = aux->a_un.a_val;
6588 break;
6589 case AT_PHNUM:
6590 *num_phdr = aux->a_un.a_val;
6591 break;
6592 }
6593 }
6594 else
6595 {
6596 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6597
6598 switch (aux->a_type)
6599 {
6600 case AT_PHDR:
6601 *phdr_memaddr = aux->a_un.a_val;
6602 break;
6603 case AT_PHNUM:
6604 *num_phdr = aux->a_un.a_val;
6605 break;
6606 }
6607 }
6608 }
6609
6610 close (fd);
6611
6612 if (*phdr_memaddr == 0 || *num_phdr == 0)
6613 {
6614 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6615 "phdr_memaddr = %ld, phdr_num = %d",
6616 (long) *phdr_memaddr, *num_phdr);
6617 return 2;
6618 }
6619
6620 return 0;
6621 }
6622
6623 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6624
6625 static CORE_ADDR
6626 get_dynamic (const int pid, const int is_elf64)
6627 {
6628 CORE_ADDR phdr_memaddr, relocation;
6629 int num_phdr, i;
6630 unsigned char *phdr_buf;
6631 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6632
6633 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6634 return 0;
6635
6636 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6637 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6638
6639 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6640 return 0;
6641
6642 /* Compute relocation: it is expected to be 0 for "regular" executables,
6643 non-zero for PIE ones. */
6644 relocation = -1;
6645 for (i = 0; relocation == -1 && i < num_phdr; i++)
6646 if (is_elf64)
6647 {
6648 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6649
6650 if (p->p_type == PT_PHDR)
6651 relocation = phdr_memaddr - p->p_vaddr;
6652 }
6653 else
6654 {
6655 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6656
6657 if (p->p_type == PT_PHDR)
6658 relocation = phdr_memaddr - p->p_vaddr;
6659 }
6660
6661 if (relocation == -1)
6662 {
6663 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6664 any real world executables, including PIE executables, have always
6665 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6666 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6667 or present DT_DEBUG anyway (fpc binaries are statically linked).
6668
6669 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6670
6671 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6672
6673 return 0;
6674 }
6675
6676 for (i = 0; i < num_phdr; i++)
6677 {
6678 if (is_elf64)
6679 {
6680 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6681
6682 if (p->p_type == PT_DYNAMIC)
6683 return p->p_vaddr + relocation;
6684 }
6685 else
6686 {
6687 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6688
6689 if (p->p_type == PT_DYNAMIC)
6690 return p->p_vaddr + relocation;
6691 }
6692 }
6693
6694 return 0;
6695 }
6696
6697 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6698 can be 0 if the inferior does not yet have the library list initialized.
6699 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6700 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6701
6702 static CORE_ADDR
6703 get_r_debug (const int pid, const int is_elf64)
6704 {
6705 CORE_ADDR dynamic_memaddr;
6706 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6707 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6708 CORE_ADDR map = -1;
6709
6710 dynamic_memaddr = get_dynamic (pid, is_elf64);
6711 if (dynamic_memaddr == 0)
6712 return map;
6713
6714 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6715 {
6716 if (is_elf64)
6717 {
6718 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6719 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6720 union
6721 {
6722 Elf64_Xword map;
6723 unsigned char buf[sizeof (Elf64_Xword)];
6724 }
6725 rld_map;
6726 #endif
6727 #ifdef DT_MIPS_RLD_MAP
6728 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6729 {
6730 if (linux_read_memory (dyn->d_un.d_val,
6731 rld_map.buf, sizeof (rld_map.buf)) == 0)
6732 return rld_map.map;
6733 else
6734 break;
6735 }
6736 #endif /* DT_MIPS_RLD_MAP */
6737 #ifdef DT_MIPS_RLD_MAP_REL
6738 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6739 {
6740 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6741 rld_map.buf, sizeof (rld_map.buf)) == 0)
6742 return rld_map.map;
6743 else
6744 break;
6745 }
6746 #endif /* DT_MIPS_RLD_MAP_REL */
6747
6748 if (dyn->d_tag == DT_DEBUG && map == -1)
6749 map = dyn->d_un.d_val;
6750
6751 if (dyn->d_tag == DT_NULL)
6752 break;
6753 }
6754 else
6755 {
6756 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6757 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6758 union
6759 {
6760 Elf32_Word map;
6761 unsigned char buf[sizeof (Elf32_Word)];
6762 }
6763 rld_map;
6764 #endif
6765 #ifdef DT_MIPS_RLD_MAP
6766 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6767 {
6768 if (linux_read_memory (dyn->d_un.d_val,
6769 rld_map.buf, sizeof (rld_map.buf)) == 0)
6770 return rld_map.map;
6771 else
6772 break;
6773 }
6774 #endif /* DT_MIPS_RLD_MAP */
6775 #ifdef DT_MIPS_RLD_MAP_REL
6776 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6777 {
6778 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6779 rld_map.buf, sizeof (rld_map.buf)) == 0)
6780 return rld_map.map;
6781 else
6782 break;
6783 }
6784 #endif /* DT_MIPS_RLD_MAP_REL */
6785
6786 if (dyn->d_tag == DT_DEBUG && map == -1)
6787 map = dyn->d_un.d_val;
6788
6789 if (dyn->d_tag == DT_NULL)
6790 break;
6791 }
6792
6793 dynamic_memaddr += dyn_size;
6794 }
6795
6796 return map;
6797 }
6798
6799 /* Read one pointer from MEMADDR in the inferior. */
6800
6801 static int
6802 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6803 {
6804 int ret;
6805
6806 /* Go through a union so this works on either big or little endian
6807 hosts, when the inferior's pointer size is smaller than the size
6808 of CORE_ADDR. It is assumed the inferior's endianness is the
6809 same of the superior's. */
6810 union
6811 {
6812 CORE_ADDR core_addr;
6813 unsigned int ui;
6814 unsigned char uc;
6815 } addr;
6816
6817 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6818 if (ret == 0)
6819 {
6820 if (ptr_size == sizeof (CORE_ADDR))
6821 *ptr = addr.core_addr;
6822 else if (ptr_size == sizeof (unsigned int))
6823 *ptr = addr.ui;
6824 else
6825 gdb_assert_not_reached ("unhandled pointer size");
6826 }
6827 return ret;
6828 }
6829
6830 bool
6831 linux_process_target::supports_qxfer_libraries_svr4 ()
6832 {
6833 return true;
6834 }
6835
6836 struct link_map_offsets
6837 {
6838 /* Offset and size of r_debug.r_version. */
6839 int r_version_offset;
6840
6841 /* Offset and size of r_debug.r_map. */
6842 int r_map_offset;
6843
6844 /* Offset to l_addr field in struct link_map. */
6845 int l_addr_offset;
6846
6847 /* Offset to l_name field in struct link_map. */
6848 int l_name_offset;
6849
6850 /* Offset to l_ld field in struct link_map. */
6851 int l_ld_offset;
6852
6853 /* Offset to l_next field in struct link_map. */
6854 int l_next_offset;
6855
6856 /* Offset to l_prev field in struct link_map. */
6857 int l_prev_offset;
6858 };
6859
6860 /* Construct qXfer:libraries-svr4:read reply. */
6861
6862 int
6863 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6864 unsigned char *readbuf,
6865 unsigned const char *writebuf,
6866 CORE_ADDR offset, int len)
6867 {
6868 struct process_info_private *const priv = current_process ()->priv;
6869 char filename[PATH_MAX];
6870 int pid, is_elf64;
6871
6872 static const struct link_map_offsets lmo_32bit_offsets =
6873 {
6874 0, /* r_version offset. */
6875 4, /* r_debug.r_map offset. */
6876 0, /* l_addr offset in link_map. */
6877 4, /* l_name offset in link_map. */
6878 8, /* l_ld offset in link_map. */
6879 12, /* l_next offset in link_map. */
6880 16 /* l_prev offset in link_map. */
6881 };
6882
6883 static const struct link_map_offsets lmo_64bit_offsets =
6884 {
6885 0, /* r_version offset. */
6886 8, /* r_debug.r_map offset. */
6887 0, /* l_addr offset in link_map. */
6888 8, /* l_name offset in link_map. */
6889 16, /* l_ld offset in link_map. */
6890 24, /* l_next offset in link_map. */
6891 32 /* l_prev offset in link_map. */
6892 };
6893 const struct link_map_offsets *lmo;
6894 unsigned int machine;
6895 int ptr_size;
6896 CORE_ADDR lm_addr = 0, lm_prev = 0;
6897 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6898 int header_done = 0;
6899
6900 if (writebuf != NULL)
6901 return -2;
6902 if (readbuf == NULL)
6903 return -1;
6904
6905 pid = lwpid_of (current_thread);
6906 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6907 is_elf64 = elf_64_file_p (filename, &machine);
6908 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6909 ptr_size = is_elf64 ? 8 : 4;
6910
6911 while (annex[0] != '\0')
6912 {
6913 const char *sep;
6914 CORE_ADDR *addrp;
6915 int name_len;
6916
6917 sep = strchr (annex, '=');
6918 if (sep == NULL)
6919 break;
6920
6921 name_len = sep - annex;
6922 if (name_len == 5 && startswith (annex, "start"))
6923 addrp = &lm_addr;
6924 else if (name_len == 4 && startswith (annex, "prev"))
6925 addrp = &lm_prev;
6926 else
6927 {
6928 annex = strchr (sep, ';');
6929 if (annex == NULL)
6930 break;
6931 annex++;
6932 continue;
6933 }
6934
6935 annex = decode_address_to_semicolon (addrp, sep + 1);
6936 }
6937
6938 if (lm_addr == 0)
6939 {
6940 int r_version = 0;
6941
6942 if (priv->r_debug == 0)
6943 priv->r_debug = get_r_debug (pid, is_elf64);
6944
6945 /* We failed to find DT_DEBUG. Such situation will not change
6946 for this inferior - do not retry it. Report it to GDB as
6947 E01, see for the reasons at the GDB solib-svr4.c side. */
6948 if (priv->r_debug == (CORE_ADDR) -1)
6949 return -1;
6950
6951 if (priv->r_debug != 0)
6952 {
6953 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6954 (unsigned char *) &r_version,
6955 sizeof (r_version)) != 0
6956 || r_version != 1)
6957 {
6958 warning ("unexpected r_debug version %d", r_version);
6959 }
6960 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6961 &lm_addr, ptr_size) != 0)
6962 {
6963 warning ("unable to read r_map from 0x%lx",
6964 (long) priv->r_debug + lmo->r_map_offset);
6965 }
6966 }
6967 }
6968
6969 std::string document = "<library-list-svr4 version=\"1.0\"";
6970
6971 while (lm_addr
6972 && read_one_ptr (lm_addr + lmo->l_name_offset,
6973 &l_name, ptr_size) == 0
6974 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6975 &l_addr, ptr_size) == 0
6976 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6977 &l_ld, ptr_size) == 0
6978 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6979 &l_prev, ptr_size) == 0
6980 && read_one_ptr (lm_addr + lmo->l_next_offset,
6981 &l_next, ptr_size) == 0)
6982 {
6983 unsigned char libname[PATH_MAX];
6984
6985 if (lm_prev != l_prev)
6986 {
6987 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6988 (long) lm_prev, (long) l_prev);
6989 break;
6990 }
6991
6992 /* Ignore the first entry even if it has valid name as the first entry
6993 corresponds to the main executable. The first entry should not be
6994 skipped if the dynamic loader was loaded late by a static executable
6995 (see solib-svr4.c parameter ignore_first). But in such case the main
6996 executable does not have PT_DYNAMIC present and this function already
6997 exited above due to failed get_r_debug. */
6998 if (lm_prev == 0)
6999 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7000 else
7001 {
7002 /* Not checking for error because reading may stop before
7003 we've got PATH_MAX worth of characters. */
7004 libname[0] = '\0';
7005 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7006 libname[sizeof (libname) - 1] = '\0';
7007 if (libname[0] != '\0')
7008 {
7009 if (!header_done)
7010 {
7011 /* Terminate `<library-list-svr4'. */
7012 document += '>';
7013 header_done = 1;
7014 }
7015
7016 string_appendf (document, "<library name=\"");
7017 xml_escape_text_append (&document, (char *) libname);
7018 string_appendf (document, "\" lm=\"0x%lx\" "
7019 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7020 (unsigned long) lm_addr, (unsigned long) l_addr,
7021 (unsigned long) l_ld);
7022 }
7023 }
7024
7025 lm_prev = lm_addr;
7026 lm_addr = l_next;
7027 }
7028
7029 if (!header_done)
7030 {
7031 /* Empty list; terminate `<library-list-svr4'. */
7032 document += "/>";
7033 }
7034 else
7035 document += "</library-list-svr4>";
7036
7037 int document_len = document.length ();
7038 if (offset < document_len)
7039 document_len -= offset;
7040 else
7041 document_len = 0;
7042 if (len > document_len)
7043 len = document_len;
7044
7045 memcpy (readbuf, document.data () + offset, len);
7046
7047 return len;
7048 }
7049
7050 #ifdef HAVE_LINUX_BTRACE
7051
7052 btrace_target_info *
7053 linux_process_target::enable_btrace (ptid_t ptid,
7054 const btrace_config *conf)
7055 {
7056 return linux_enable_btrace (ptid, conf);
7057 }
7058
7059 /* See to_disable_btrace target method. */
7060
7061 int
7062 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7063 {
7064 enum btrace_error err;
7065
7066 err = linux_disable_btrace (tinfo);
7067 return (err == BTRACE_ERR_NONE ? 0 : -1);
7068 }
7069
7070 /* Encode an Intel Processor Trace configuration. */
7071
7072 static void
7073 linux_low_encode_pt_config (struct buffer *buffer,
7074 const struct btrace_data_pt_config *config)
7075 {
7076 buffer_grow_str (buffer, "<pt-config>\n");
7077
7078 switch (config->cpu.vendor)
7079 {
7080 case CV_INTEL:
7081 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7082 "model=\"%u\" stepping=\"%u\"/>\n",
7083 config->cpu.family, config->cpu.model,
7084 config->cpu.stepping);
7085 break;
7086
7087 default:
7088 break;
7089 }
7090
7091 buffer_grow_str (buffer, "</pt-config>\n");
7092 }
7093
7094 /* Encode a raw buffer. */
7095
7096 static void
7097 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7098 unsigned int size)
7099 {
7100 if (size == 0)
7101 return;
7102
7103 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7104 buffer_grow_str (buffer, "<raw>\n");
7105
7106 while (size-- > 0)
7107 {
7108 char elem[2];
7109
7110 elem[0] = tohex ((*data >> 4) & 0xf);
7111 elem[1] = tohex (*data++ & 0xf);
7112
7113 buffer_grow (buffer, elem, 2);
7114 }
7115
7116 buffer_grow_str (buffer, "</raw>\n");
7117 }
7118
7119 /* See to_read_btrace target method. */
7120
7121 int
7122 linux_process_target::read_btrace (btrace_target_info *tinfo,
7123 buffer *buffer,
7124 enum btrace_read_type type)
7125 {
7126 struct btrace_data btrace;
7127 enum btrace_error err;
7128
7129 err = linux_read_btrace (&btrace, tinfo, type);
7130 if (err != BTRACE_ERR_NONE)
7131 {
7132 if (err == BTRACE_ERR_OVERFLOW)
7133 buffer_grow_str0 (buffer, "E.Overflow.");
7134 else
7135 buffer_grow_str0 (buffer, "E.Generic Error.");
7136
7137 return -1;
7138 }
7139
7140 switch (btrace.format)
7141 {
7142 case BTRACE_FORMAT_NONE:
7143 buffer_grow_str0 (buffer, "E.No Trace.");
7144 return -1;
7145
7146 case BTRACE_FORMAT_BTS:
7147 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7148 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7149
7150 for (const btrace_block &block : *btrace.variant.bts.blocks)
7151 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7152 paddress (block.begin), paddress (block.end));
7153
7154 buffer_grow_str0 (buffer, "</btrace>\n");
7155 break;
7156
7157 case BTRACE_FORMAT_PT:
7158 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7159 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7160 buffer_grow_str (buffer, "<pt>\n");
7161
7162 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7163
7164 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7165 btrace.variant.pt.size);
7166
7167 buffer_grow_str (buffer, "</pt>\n");
7168 buffer_grow_str0 (buffer, "</btrace>\n");
7169 break;
7170
7171 default:
7172 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7173 return -1;
7174 }
7175
7176 return 0;
7177 }
7178
7179 /* See to_btrace_conf target method. */
7180
7181 int
7182 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7183 buffer *buffer)
7184 {
7185 const struct btrace_config *conf;
7186
7187 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7188 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7189
7190 conf = linux_btrace_conf (tinfo);
7191 if (conf != NULL)
7192 {
7193 switch (conf->format)
7194 {
7195 case BTRACE_FORMAT_NONE:
7196 break;
7197
7198 case BTRACE_FORMAT_BTS:
7199 buffer_xml_printf (buffer, "<bts");
7200 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7201 buffer_xml_printf (buffer, " />\n");
7202 break;
7203
7204 case BTRACE_FORMAT_PT:
7205 buffer_xml_printf (buffer, "<pt");
7206 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7207 buffer_xml_printf (buffer, "/>\n");
7208 break;
7209 }
7210 }
7211
7212 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7213 return 0;
7214 }
7215 #endif /* HAVE_LINUX_BTRACE */
7216
7217 /* See nat/linux-nat.h. */
7218
7219 ptid_t
7220 current_lwp_ptid (void)
7221 {
7222 return ptid_of (current_thread);
7223 }
7224
7225 const char *
7226 linux_process_target::thread_name (ptid_t thread)
7227 {
7228 return linux_proc_tid_get_name (thread);
7229 }
7230
7231 #if USE_THREAD_DB
7232 bool
7233 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7234 int *handle_len)
7235 {
7236 return thread_db_thread_handle (ptid, handle, handle_len);
7237 }
7238 #endif
7239
7240 /* Default implementation of linux_target_ops method "set_pc" for
7241 32-bit pc register which is literally named "pc". */
7242
7243 void
7244 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7245 {
7246 uint32_t newpc = pc;
7247
7248 supply_register_by_name (regcache, "pc", &newpc);
7249 }
7250
7251 /* Default implementation of linux_target_ops method "get_pc" for
7252 32-bit pc register which is literally named "pc". */
7253
7254 CORE_ADDR
7255 linux_get_pc_32bit (struct regcache *regcache)
7256 {
7257 uint32_t pc;
7258
7259 collect_register_by_name (regcache, "pc", &pc);
7260 if (debug_threads)
7261 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7262 return pc;
7263 }
7264
7265 /* Default implementation of linux_target_ops method "set_pc" for
7266 64-bit pc register which is literally named "pc". */
7267
7268 void
7269 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7270 {
7271 uint64_t newpc = pc;
7272
7273 supply_register_by_name (regcache, "pc", &newpc);
7274 }
7275
7276 /* Default implementation of linux_target_ops method "get_pc" for
7277 64-bit pc register which is literally named "pc". */
7278
7279 CORE_ADDR
7280 linux_get_pc_64bit (struct regcache *regcache)
7281 {
7282 uint64_t pc;
7283
7284 collect_register_by_name (regcache, "pc", &pc);
7285 if (debug_threads)
7286 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7287 return pc;
7288 }
7289
7290 /* See linux-low.h. */
7291
7292 int
7293 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7294 {
7295 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7296 int offset = 0;
7297
7298 gdb_assert (wordsize == 4 || wordsize == 8);
7299
7300 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7301 {
7302 if (wordsize == 4)
7303 {
7304 uint32_t *data_p = (uint32_t *) data;
7305 if (data_p[0] == match)
7306 {
7307 *valp = data_p[1];
7308 return 1;
7309 }
7310 }
7311 else
7312 {
7313 uint64_t *data_p = (uint64_t *) data;
7314 if (data_p[0] == match)
7315 {
7316 *valp = data_p[1];
7317 return 1;
7318 }
7319 }
7320
7321 offset += 2 * wordsize;
7322 }
7323
7324 return 0;
7325 }
7326
7327 /* See linux-low.h. */
7328
7329 CORE_ADDR
7330 linux_get_hwcap (int wordsize)
7331 {
7332 CORE_ADDR hwcap = 0;
7333 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7334 return hwcap;
7335 }
7336
7337 /* See linux-low.h. */
7338
7339 CORE_ADDR
7340 linux_get_hwcap2 (int wordsize)
7341 {
7342 CORE_ADDR hwcap2 = 0;
7343 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7344 return hwcap2;
7345 }
7346
7347 #ifdef HAVE_LINUX_REGSETS
7348 void
7349 initialize_regsets_info (struct regsets_info *info)
7350 {
7351 for (info->num_regsets = 0;
7352 info->regsets[info->num_regsets].size >= 0;
7353 info->num_regsets++)
7354 ;
7355 }
7356 #endif
7357
7358 void
7359 initialize_low (void)
7360 {
7361 struct sigaction sigchld_action;
7362
7363 memset (&sigchld_action, 0, sizeof (sigchld_action));
7364 set_target_ops (the_linux_target);
7365
7366 linux_ptrace_init_warnings ();
7367 linux_proc_init_warnings ();
7368
7369 sigchld_action.sa_handler = sigchld_handler;
7370 sigemptyset (&sigchld_action.sa_mask);
7371 sigchld_action.sa_flags = SA_RESTART;
7372 sigaction (SIGCHLD, &sigchld_action, NULL);
7373
7374 initialize_low_arch ();
7375
7376 linux_check_ptrace_features ();
7377 }
This page took 0.261573 seconds and 3 git commands to generate.