gdb: resume ongoing step after handling fork or vfork
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef O_LARGEFILE
64 #define O_LARGEFILE 0
65 #endif
66
67 #ifndef AT_HWCAP2
68 #define AT_HWCAP2 26
69 #endif
70
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
87 #endif
88 #endif
89
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 static struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void unsuspend_all_lwps (struct lwp_info *except);
259 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
260 static int lwp_is_marked_dead (struct lwp_info *lwp);
261 static int kill_lwp (unsigned long lwpid, int signo);
262 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
263 static int linux_low_ptrace_options (int attached);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
265
266 /* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
268 static ptid_t step_over_bkpt;
269
270 bool
271 linux_process_target::low_supports_breakpoints ()
272 {
273 return false;
274 }
275
276 CORE_ADDR
277 linux_process_target::low_get_pc (regcache *regcache)
278 {
279 return 0;
280 }
281
282 void
283 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
284 {
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
286 }
287
288 std::vector<CORE_ADDR>
289 linux_process_target::low_get_next_pcs (regcache *regcache)
290 {
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293 }
294
295 int
296 linux_process_target::low_decr_pc_after_break ()
297 {
298 return 0;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 /* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313 static int linux_event_pipe[2] = { -1, -1 };
314
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
317
318 static void send_sigstop (struct lwp_info *lwp);
319
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
321
322 static int
323 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
324 {
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
336 }
337
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
342 static int
343 elf_64_file_p (const char *file, unsigned int *machine)
344 {
345 Elf64_Ehdr header;
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
359 return elf_64_header_p (&header, machine);
360 }
361
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365 int
366 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
367 {
368 char file[PATH_MAX];
369
370 sprintf (file, "/proc/%d/exe", pid);
371 return elf_64_file_p (file, machine);
372 }
373
374 void
375 linux_process_target::delete_lwp (lwp_info *lwp)
376 {
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 if (debug_threads)
380 debug_printf ("deleting %ld\n", lwpid_of (thr));
381
382 remove_thread (thr);
383
384 low_delete_thread (lwp->arch_private);
385
386 delete lwp;
387 }
388
389 void
390 linux_process_target::low_delete_thread (arch_lwp_info *info)
391 {
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395 }
396
397 process_info *
398 linux_process_target::add_linux_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 proc->priv->arch_private = low_new_process ();
406
407 return proc;
408 }
409
410 arch_process_info *
411 linux_process_target::low_new_process ()
412 {
413 return nullptr;
414 }
415
416 void
417 linux_process_target::low_delete_process (arch_process_info *info)
418 {
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422 }
423
424 void
425 linux_process_target::low_new_fork (process_info *parent, process_info *child)
426 {
427 /* Nop. */
428 }
429
430 void
431 linux_process_target::arch_setup_thread (thread_info *thread)
432 {
433 struct thread_info *saved_thread;
434
435 saved_thread = current_thread;
436 current_thread = thread;
437
438 low_arch_setup ();
439
440 current_thread = saved_thread;
441 }
442
443 int
444 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
445 int wstat)
446 {
447 client_state &cs = get_client_state ();
448 struct lwp_info *event_lwp = *orig_event_lwp;
449 int event = linux_ptrace_get_extended_event (wstat);
450 struct thread_info *event_thr = get_lwp_thread (event_lwp);
451 struct lwp_info *new_lwp;
452
453 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
454
455 /* All extended events we currently use are mid-syscall. Only
456 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457 you have to be using PTRACE_SEIZE to get that. */
458 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
459
460 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
461 || (event == PTRACE_EVENT_CLONE))
462 {
463 ptid_t ptid;
464 unsigned long new_pid;
465 int ret, status;
466
467 /* Get the pid of the new lwp. */
468 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
469 &new_pid);
470
471 /* If we haven't already seen the new PID stop, wait for it now. */
472 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
473 {
474 /* The new child has a pending SIGSTOP. We can't affect it until it
475 hits the SIGSTOP, but we're already attached. */
476
477 ret = my_waitpid (new_pid, &status, __WALL);
478
479 if (ret == -1)
480 perror_with_name ("waiting for new child");
481 else if (ret != new_pid)
482 warning ("wait returned unexpected PID %d", ret);
483 else if (!WIFSTOPPED (status))
484 warning ("wait returned unexpected status 0x%x", status);
485 }
486
487 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
488 {
489 struct process_info *parent_proc;
490 struct process_info *child_proc;
491 struct lwp_info *child_lwp;
492 struct thread_info *child_thr;
493
494 ptid = ptid_t (new_pid, new_pid, 0);
495
496 if (debug_threads)
497 {
498 debug_printf ("HEW: Got fork event from LWP %ld, "
499 "new child is %d\n",
500 ptid_of (event_thr).lwp (),
501 ptid.pid ());
502 }
503
504 /* Add the new process to the tables and clone the breakpoint
505 lists of the parent. We need to do this even if the new process
506 will be detached, since we will need the process object and the
507 breakpoints to remove any breakpoints from memory when we
508 detach, and the client side will access registers. */
509 child_proc = add_linux_process (new_pid, 0);
510 gdb_assert (child_proc != NULL);
511 child_lwp = add_lwp (ptid);
512 gdb_assert (child_lwp != NULL);
513 child_lwp->stopped = 1;
514 child_lwp->must_set_ptrace_flags = 1;
515 child_lwp->status_pending_p = 0;
516 child_thr = get_lwp_thread (child_lwp);
517 child_thr->last_resume_kind = resume_stop;
518 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
519
520 /* If we're suspending all threads, leave this one suspended
521 too. If the fork/clone parent is stepping over a breakpoint,
522 all other threads have been suspended already. Leave the
523 child suspended too. */
524 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
525 || event_lwp->bp_reinsert != 0)
526 {
527 if (debug_threads)
528 debug_printf ("HEW: leaving child suspended\n");
529 child_lwp->suspended = 1;
530 }
531
532 parent_proc = get_thread_process (event_thr);
533 child_proc->attached = parent_proc->attached;
534
535 if (event_lwp->bp_reinsert != 0
536 && supports_software_single_step ()
537 && event == PTRACE_EVENT_VFORK)
538 {
539 /* If we leave single-step breakpoints there, child will
540 hit it, so uninsert single-step breakpoints from parent
541 (and child). Once vfork child is done, reinsert
542 them back to parent. */
543 uninsert_single_step_breakpoints (event_thr);
544 }
545
546 clone_all_breakpoints (child_thr, event_thr);
547
548 target_desc_up tdesc = allocate_target_description ();
549 copy_target_description (tdesc.get (), parent_proc->tdesc);
550 child_proc->tdesc = tdesc.release ();
551
552 /* Clone arch-specific process data. */
553 low_new_fork (parent_proc, child_proc);
554
555 /* Save fork info in the parent thread. */
556 if (event == PTRACE_EVENT_FORK)
557 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
558 else if (event == PTRACE_EVENT_VFORK)
559 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
560
561 event_lwp->waitstatus.value.related_pid = ptid;
562
563 /* The status_pending field contains bits denoting the
564 extended event, so when the pending event is handled,
565 the handler will look at lwp->waitstatus. */
566 event_lwp->status_pending_p = 1;
567 event_lwp->status_pending = wstat;
568
569 /* Link the threads until the parent event is passed on to
570 higher layers. */
571 event_lwp->fork_relative = child_lwp;
572 child_lwp->fork_relative = event_lwp;
573
574 /* If the parent thread is doing step-over with single-step
575 breakpoints, the list of single-step breakpoints are cloned
576 from the parent's. Remove them from the child process.
577 In case of vfork, we'll reinsert them back once vforked
578 child is done. */
579 if (event_lwp->bp_reinsert != 0
580 && supports_software_single_step ())
581 {
582 /* The child process is forked and stopped, so it is safe
583 to access its memory without stopping all other threads
584 from other processes. */
585 delete_single_step_breakpoints (child_thr);
586
587 gdb_assert (has_single_step_breakpoints (event_thr));
588 gdb_assert (!has_single_step_breakpoints (child_thr));
589 }
590
591 /* Report the event. */
592 return 0;
593 }
594
595 if (debug_threads)
596 debug_printf ("HEW: Got clone event "
597 "from LWP %ld, new child is LWP %ld\n",
598 lwpid_of (event_thr), new_pid);
599
600 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
601 new_lwp = add_lwp (ptid);
602
603 /* Either we're going to immediately resume the new thread
604 or leave it stopped. resume_one_lwp is a nop if it
605 thinks the thread is currently running, so set this first
606 before calling resume_one_lwp. */
607 new_lwp->stopped = 1;
608
609 /* If we're suspending all threads, leave this one suspended
610 too. If the fork/clone parent is stepping over a breakpoint,
611 all other threads have been suspended already. Leave the
612 child suspended too. */
613 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
614 || event_lwp->bp_reinsert != 0)
615 new_lwp->suspended = 1;
616
617 /* Normally we will get the pending SIGSTOP. But in some cases
618 we might get another signal delivered to the group first.
619 If we do get another signal, be sure not to lose it. */
620 if (WSTOPSIG (status) != SIGSTOP)
621 {
622 new_lwp->stop_expected = 1;
623 new_lwp->status_pending_p = 1;
624 new_lwp->status_pending = status;
625 }
626 else if (cs.report_thread_events)
627 {
628 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
629 new_lwp->status_pending_p = 1;
630 new_lwp->status_pending = status;
631 }
632
633 #ifdef USE_THREAD_DB
634 thread_db_notice_clone (event_thr, ptid);
635 #endif
636
637 /* Don't report the event. */
638 return 1;
639 }
640 else if (event == PTRACE_EVENT_VFORK_DONE)
641 {
642 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
643
644 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
645 {
646 reinsert_single_step_breakpoints (event_thr);
647
648 gdb_assert (has_single_step_breakpoints (event_thr));
649 }
650
651 /* Report the event. */
652 return 0;
653 }
654 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
655 {
656 struct process_info *proc;
657 std::vector<int> syscalls_to_catch;
658 ptid_t event_ptid;
659 pid_t event_pid;
660
661 if (debug_threads)
662 {
663 debug_printf ("HEW: Got exec event from LWP %ld\n",
664 lwpid_of (event_thr));
665 }
666
667 /* Get the event ptid. */
668 event_ptid = ptid_of (event_thr);
669 event_pid = event_ptid.pid ();
670
671 /* Save the syscall list from the execing process. */
672 proc = get_thread_process (event_thr);
673 syscalls_to_catch = std::move (proc->syscalls_to_catch);
674
675 /* Delete the execing process and all its threads. */
676 mourn (proc);
677 current_thread = NULL;
678
679 /* Create a new process/lwp/thread. */
680 proc = add_linux_process (event_pid, 0);
681 event_lwp = add_lwp (event_ptid);
682 event_thr = get_lwp_thread (event_lwp);
683 gdb_assert (current_thread == event_thr);
684 arch_setup_thread (event_thr);
685
686 /* Set the event status. */
687 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
688 event_lwp->waitstatus.value.execd_pathname
689 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
690
691 /* Mark the exec status as pending. */
692 event_lwp->stopped = 1;
693 event_lwp->status_pending_p = 1;
694 event_lwp->status_pending = wstat;
695 event_thr->last_resume_kind = resume_continue;
696 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
697
698 /* Update syscall state in the new lwp, effectively mid-syscall too. */
699 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
700
701 /* Restore the list to catch. Don't rely on the client, which is free
702 to avoid sending a new list when the architecture doesn't change.
703 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
704 proc->syscalls_to_catch = std::move (syscalls_to_catch);
705
706 /* Report the event. */
707 *orig_event_lwp = event_lwp;
708 return 0;
709 }
710
711 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
712 }
713
714 CORE_ADDR
715 linux_process_target::get_pc (lwp_info *lwp)
716 {
717 struct thread_info *saved_thread;
718 struct regcache *regcache;
719 CORE_ADDR pc;
720
721 if (!low_supports_breakpoints ())
722 return 0;
723
724 saved_thread = current_thread;
725 current_thread = get_lwp_thread (lwp);
726
727 regcache = get_thread_regcache (current_thread, 1);
728 pc = low_get_pc (regcache);
729
730 if (debug_threads)
731 debug_printf ("pc is 0x%lx\n", (long) pc);
732
733 current_thread = saved_thread;
734 return pc;
735 }
736
737 void
738 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
739 {
740 struct thread_info *saved_thread;
741 struct regcache *regcache;
742
743 saved_thread = current_thread;
744 current_thread = get_lwp_thread (lwp);
745
746 regcache = get_thread_regcache (current_thread, 1);
747 low_get_syscall_trapinfo (regcache, sysno);
748
749 if (debug_threads)
750 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
751
752 current_thread = saved_thread;
753 }
754
755 void
756 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
757 {
758 /* By default, report an unknown system call number. */
759 *sysno = UNKNOWN_SYSCALL;
760 }
761
762 bool
763 linux_process_target::save_stop_reason (lwp_info *lwp)
764 {
765 CORE_ADDR pc;
766 CORE_ADDR sw_breakpoint_pc;
767 struct thread_info *saved_thread;
768 #if USE_SIGTRAP_SIGINFO
769 siginfo_t siginfo;
770 #endif
771
772 if (!low_supports_breakpoints ())
773 return false;
774
775 pc = get_pc (lwp);
776 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
777
778 /* breakpoint_at reads from the current thread. */
779 saved_thread = current_thread;
780 current_thread = get_lwp_thread (lwp);
781
782 #if USE_SIGTRAP_SIGINFO
783 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
784 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
785 {
786 if (siginfo.si_signo == SIGTRAP)
787 {
788 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
789 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
790 {
791 /* The si_code is ambiguous on this arch -- check debug
792 registers. */
793 if (!check_stopped_by_watchpoint (lwp))
794 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
795 }
796 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
797 {
798 /* If we determine the LWP stopped for a SW breakpoint,
799 trust it. Particularly don't check watchpoint
800 registers, because at least on s390, we'd find
801 stopped-by-watchpoint as long as there's a watchpoint
802 set. */
803 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
804 }
805 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
806 {
807 /* This can indicate either a hardware breakpoint or
808 hardware watchpoint. Check debug registers. */
809 if (!check_stopped_by_watchpoint (lwp))
810 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
811 }
812 else if (siginfo.si_code == TRAP_TRACE)
813 {
814 /* We may have single stepped an instruction that
815 triggered a watchpoint. In that case, on some
816 architectures (such as x86), instead of TRAP_HWBKPT,
817 si_code indicates TRAP_TRACE, and we need to check
818 the debug registers separately. */
819 if (!check_stopped_by_watchpoint (lwp))
820 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
821 }
822 }
823 }
824 #else
825 /* We may have just stepped a breakpoint instruction. E.g., in
826 non-stop mode, GDB first tells the thread A to step a range, and
827 then the user inserts a breakpoint inside the range. In that
828 case we need to report the breakpoint PC. */
829 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
830 && low_breakpoint_at (sw_breakpoint_pc))
831 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
832
833 if (hardware_breakpoint_inserted_here (pc))
834 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
835
836 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
837 check_stopped_by_watchpoint (lwp);
838 #endif
839
840 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
841 {
842 if (debug_threads)
843 {
844 struct thread_info *thr = get_lwp_thread (lwp);
845
846 debug_printf ("CSBB: %s stopped by software breakpoint\n",
847 target_pid_to_str (ptid_of (thr)));
848 }
849
850 /* Back up the PC if necessary. */
851 if (pc != sw_breakpoint_pc)
852 {
853 struct regcache *regcache
854 = get_thread_regcache (current_thread, 1);
855 low_set_pc (regcache, sw_breakpoint_pc);
856 }
857
858 /* Update this so we record the correct stop PC below. */
859 pc = sw_breakpoint_pc;
860 }
861 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
862 {
863 if (debug_threads)
864 {
865 struct thread_info *thr = get_lwp_thread (lwp);
866
867 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
868 target_pid_to_str (ptid_of (thr)));
869 }
870 }
871 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
872 {
873 if (debug_threads)
874 {
875 struct thread_info *thr = get_lwp_thread (lwp);
876
877 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
878 target_pid_to_str (ptid_of (thr)));
879 }
880 }
881 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
882 {
883 if (debug_threads)
884 {
885 struct thread_info *thr = get_lwp_thread (lwp);
886
887 debug_printf ("CSBB: %s stopped by trace\n",
888 target_pid_to_str (ptid_of (thr)));
889 }
890 }
891
892 lwp->stop_pc = pc;
893 current_thread = saved_thread;
894 return true;
895 }
896
897 lwp_info *
898 linux_process_target::add_lwp (ptid_t ptid)
899 {
900 lwp_info *lwp = new lwp_info;
901
902 lwp->thread = add_thread (ptid, lwp);
903
904 low_new_thread (lwp);
905
906 return lwp;
907 }
908
909 void
910 linux_process_target::low_new_thread (lwp_info *info)
911 {
912 /* Nop. */
913 }
914
915 /* Callback to be used when calling fork_inferior, responsible for
916 actually initiating the tracing of the inferior. */
917
918 static void
919 linux_ptrace_fun ()
920 {
921 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
922 (PTRACE_TYPE_ARG4) 0) < 0)
923 trace_start_error_with_name ("ptrace");
924
925 if (setpgid (0, 0) < 0)
926 trace_start_error_with_name ("setpgid");
927
928 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
929 stdout to stderr so that inferior i/o doesn't corrupt the connection.
930 Also, redirect stdin to /dev/null. */
931 if (remote_connection_is_stdio ())
932 {
933 if (close (0) < 0)
934 trace_start_error_with_name ("close");
935 if (open ("/dev/null", O_RDONLY) < 0)
936 trace_start_error_with_name ("open");
937 if (dup2 (2, 1) < 0)
938 trace_start_error_with_name ("dup2");
939 if (write (2, "stdin/stdout redirected\n",
940 sizeof ("stdin/stdout redirected\n") - 1) < 0)
941 {
942 /* Errors ignored. */;
943 }
944 }
945 }
946
947 /* Start an inferior process and returns its pid.
948 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
949 are its arguments. */
950
951 int
952 linux_process_target::create_inferior (const char *program,
953 const std::vector<char *> &program_args)
954 {
955 client_state &cs = get_client_state ();
956 struct lwp_info *new_lwp;
957 int pid;
958 ptid_t ptid;
959
960 {
961 maybe_disable_address_space_randomization restore_personality
962 (cs.disable_randomization);
963 std::string str_program_args = construct_inferior_arguments (program_args);
964
965 pid = fork_inferior (program,
966 str_program_args.c_str (),
967 get_environ ()->envp (), linux_ptrace_fun,
968 NULL, NULL, NULL, NULL);
969 }
970
971 add_linux_process (pid, 0);
972
973 ptid = ptid_t (pid, pid, 0);
974 new_lwp = add_lwp (ptid);
975 new_lwp->must_set_ptrace_flags = 1;
976
977 post_fork_inferior (pid, program);
978
979 return pid;
980 }
981
982 /* Implement the post_create_inferior target_ops method. */
983
984 void
985 linux_process_target::post_create_inferior ()
986 {
987 struct lwp_info *lwp = get_thread_lwp (current_thread);
988
989 low_arch_setup ();
990
991 if (lwp->must_set_ptrace_flags)
992 {
993 struct process_info *proc = current_process ();
994 int options = linux_low_ptrace_options (proc->attached);
995
996 linux_enable_event_reporting (lwpid_of (current_thread), options);
997 lwp->must_set_ptrace_flags = 0;
998 }
999 }
1000
1001 int
1002 linux_process_target::attach_lwp (ptid_t ptid)
1003 {
1004 struct lwp_info *new_lwp;
1005 int lwpid = ptid.lwp ();
1006
1007 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1008 != 0)
1009 return errno;
1010
1011 new_lwp = add_lwp (ptid);
1012
1013 /* We need to wait for SIGSTOP before being able to make the next
1014 ptrace call on this LWP. */
1015 new_lwp->must_set_ptrace_flags = 1;
1016
1017 if (linux_proc_pid_is_stopped (lwpid))
1018 {
1019 if (debug_threads)
1020 debug_printf ("Attached to a stopped process\n");
1021
1022 /* The process is definitely stopped. It is in a job control
1023 stop, unless the kernel predates the TASK_STOPPED /
1024 TASK_TRACED distinction, in which case it might be in a
1025 ptrace stop. Make sure it is in a ptrace stop; from there we
1026 can kill it, signal it, et cetera.
1027
1028 First make sure there is a pending SIGSTOP. Since we are
1029 already attached, the process can not transition from stopped
1030 to running without a PTRACE_CONT; so we know this signal will
1031 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1032 probably already in the queue (unless this kernel is old
1033 enough to use TASK_STOPPED for ptrace stops); but since
1034 SIGSTOP is not an RT signal, it can only be queued once. */
1035 kill_lwp (lwpid, SIGSTOP);
1036
1037 /* Finally, resume the stopped process. This will deliver the
1038 SIGSTOP (or a higher priority signal, just like normal
1039 PTRACE_ATTACH), which we'll catch later on. */
1040 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1041 }
1042
1043 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1044 brings it to a halt.
1045
1046 There are several cases to consider here:
1047
1048 1) gdbserver has already attached to the process and is being notified
1049 of a new thread that is being created.
1050 In this case we should ignore that SIGSTOP and resume the
1051 process. This is handled below by setting stop_expected = 1,
1052 and the fact that add_thread sets last_resume_kind ==
1053 resume_continue.
1054
1055 2) This is the first thread (the process thread), and we're attaching
1056 to it via attach_inferior.
1057 In this case we want the process thread to stop.
1058 This is handled by having linux_attach set last_resume_kind ==
1059 resume_stop after we return.
1060
1061 If the pid we are attaching to is also the tgid, we attach to and
1062 stop all the existing threads. Otherwise, we attach to pid and
1063 ignore any other threads in the same group as this pid.
1064
1065 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1066 existing threads.
1067 In this case we want the thread to stop.
1068 FIXME: This case is currently not properly handled.
1069 We should wait for the SIGSTOP but don't. Things work apparently
1070 because enough time passes between when we ptrace (ATTACH) and when
1071 gdb makes the next ptrace call on the thread.
1072
1073 On the other hand, if we are currently trying to stop all threads, we
1074 should treat the new thread as if we had sent it a SIGSTOP. This works
1075 because we are guaranteed that the add_lwp call above added us to the
1076 end of the list, and so the new thread has not yet reached
1077 wait_for_sigstop (but will). */
1078 new_lwp->stop_expected = 1;
1079
1080 return 0;
1081 }
1082
1083 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1084 already attached. Returns true if a new LWP is found, false
1085 otherwise. */
1086
1087 static int
1088 attach_proc_task_lwp_callback (ptid_t ptid)
1089 {
1090 /* Is this a new thread? */
1091 if (find_thread_ptid (ptid) == NULL)
1092 {
1093 int lwpid = ptid.lwp ();
1094 int err;
1095
1096 if (debug_threads)
1097 debug_printf ("Found new lwp %d\n", lwpid);
1098
1099 err = the_linux_target->attach_lwp (ptid);
1100
1101 /* Be quiet if we simply raced with the thread exiting. EPERM
1102 is returned if the thread's task still exists, and is marked
1103 as exited or zombie, as well as other conditions, so in that
1104 case, confirm the status in /proc/PID/status. */
1105 if (err == ESRCH
1106 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1107 {
1108 if (debug_threads)
1109 {
1110 debug_printf ("Cannot attach to lwp %d: "
1111 "thread is gone (%d: %s)\n",
1112 lwpid, err, safe_strerror (err));
1113 }
1114 }
1115 else if (err != 0)
1116 {
1117 std::string reason
1118 = linux_ptrace_attach_fail_reason_string (ptid, err);
1119
1120 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1121 }
1122
1123 return 1;
1124 }
1125 return 0;
1126 }
1127
1128 static void async_file_mark (void);
1129
1130 /* Attach to PID. If PID is the tgid, attach to it and all
1131 of its threads. */
1132
1133 int
1134 linux_process_target::attach (unsigned long pid)
1135 {
1136 struct process_info *proc;
1137 struct thread_info *initial_thread;
1138 ptid_t ptid = ptid_t (pid, pid, 0);
1139 int err;
1140
1141 proc = add_linux_process (pid, 1);
1142
1143 /* Attach to PID. We will check for other threads
1144 soon. */
1145 err = attach_lwp (ptid);
1146 if (err != 0)
1147 {
1148 remove_process (proc);
1149
1150 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1151 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1152 }
1153
1154 /* Don't ignore the initial SIGSTOP if we just attached to this
1155 process. It will be collected by wait shortly. */
1156 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1157 initial_thread->last_resume_kind = resume_stop;
1158
1159 /* We must attach to every LWP. If /proc is mounted, use that to
1160 find them now. On the one hand, the inferior may be using raw
1161 clone instead of using pthreads. On the other hand, even if it
1162 is using pthreads, GDB may not be connected yet (thread_db needs
1163 to do symbol lookups, through qSymbol). Also, thread_db walks
1164 structures in the inferior's address space to find the list of
1165 threads/LWPs, and those structures may well be corrupted. Note
1166 that once thread_db is loaded, we'll still use it to list threads
1167 and associate pthread info with each LWP. */
1168 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1169
1170 /* GDB will shortly read the xml target description for this
1171 process, to figure out the process' architecture. But the target
1172 description is only filled in when the first process/thread in
1173 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1174 that now, otherwise, if GDB is fast enough, it could read the
1175 target description _before_ that initial stop. */
1176 if (non_stop)
1177 {
1178 struct lwp_info *lwp;
1179 int wstat, lwpid;
1180 ptid_t pid_ptid = ptid_t (pid);
1181
1182 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1183 gdb_assert (lwpid > 0);
1184
1185 lwp = find_lwp_pid (ptid_t (lwpid));
1186
1187 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1188 {
1189 lwp->status_pending_p = 1;
1190 lwp->status_pending = wstat;
1191 }
1192
1193 initial_thread->last_resume_kind = resume_continue;
1194
1195 async_file_mark ();
1196
1197 gdb_assert (proc->tdesc != NULL);
1198 }
1199
1200 return 0;
1201 }
1202
1203 static int
1204 last_thread_of_process_p (int pid)
1205 {
1206 bool seen_one = false;
1207
1208 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1209 {
1210 if (!seen_one)
1211 {
1212 /* This is the first thread of this process we see. */
1213 seen_one = true;
1214 return false;
1215 }
1216 else
1217 {
1218 /* This is the second thread of this process we see. */
1219 return true;
1220 }
1221 });
1222
1223 return thread == NULL;
1224 }
1225
1226 /* Kill LWP. */
1227
1228 static void
1229 linux_kill_one_lwp (struct lwp_info *lwp)
1230 {
1231 struct thread_info *thr = get_lwp_thread (lwp);
1232 int pid = lwpid_of (thr);
1233
1234 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1235 there is no signal context, and ptrace(PTRACE_KILL) (or
1236 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1237 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1238 alternative is to kill with SIGKILL. We only need one SIGKILL
1239 per process, not one for each thread. But since we still support
1240 support debugging programs using raw clone without CLONE_THREAD,
1241 we send one for each thread. For years, we used PTRACE_KILL
1242 only, so we're being a bit paranoid about some old kernels where
1243 PTRACE_KILL might work better (dubious if there are any such, but
1244 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1245 second, and so we're fine everywhere. */
1246
1247 errno = 0;
1248 kill_lwp (pid, SIGKILL);
1249 if (debug_threads)
1250 {
1251 int save_errno = errno;
1252
1253 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1254 target_pid_to_str (ptid_of (thr)),
1255 save_errno ? safe_strerror (save_errno) : "OK");
1256 }
1257
1258 errno = 0;
1259 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1260 if (debug_threads)
1261 {
1262 int save_errno = errno;
1263
1264 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1265 target_pid_to_str (ptid_of (thr)),
1266 save_errno ? safe_strerror (save_errno) : "OK");
1267 }
1268 }
1269
1270 /* Kill LWP and wait for it to die. */
1271
1272 static void
1273 kill_wait_lwp (struct lwp_info *lwp)
1274 {
1275 struct thread_info *thr = get_lwp_thread (lwp);
1276 int pid = ptid_of (thr).pid ();
1277 int lwpid = ptid_of (thr).lwp ();
1278 int wstat;
1279 int res;
1280
1281 if (debug_threads)
1282 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1283
1284 do
1285 {
1286 linux_kill_one_lwp (lwp);
1287
1288 /* Make sure it died. Notes:
1289
1290 - The loop is most likely unnecessary.
1291
1292 - We don't use wait_for_event as that could delete lwps
1293 while we're iterating over them. We're not interested in
1294 any pending status at this point, only in making sure all
1295 wait status on the kernel side are collected until the
1296 process is reaped.
1297
1298 - We don't use __WALL here as the __WALL emulation relies on
1299 SIGCHLD, and killing a stopped process doesn't generate
1300 one, nor an exit status.
1301 */
1302 res = my_waitpid (lwpid, &wstat, 0);
1303 if (res == -1 && errno == ECHILD)
1304 res = my_waitpid (lwpid, &wstat, __WCLONE);
1305 } while (res > 0 && WIFSTOPPED (wstat));
1306
1307 /* Even if it was stopped, the child may have already disappeared.
1308 E.g., if it was killed by SIGKILL. */
1309 if (res < 0 && errno != ECHILD)
1310 perror_with_name ("kill_wait_lwp");
1311 }
1312
1313 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1314 except the leader. */
1315
1316 static void
1317 kill_one_lwp_callback (thread_info *thread, int pid)
1318 {
1319 struct lwp_info *lwp = get_thread_lwp (thread);
1320
1321 /* We avoid killing the first thread here, because of a Linux kernel (at
1322 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1323 the children get a chance to be reaped, it will remain a zombie
1324 forever. */
1325
1326 if (lwpid_of (thread) == pid)
1327 {
1328 if (debug_threads)
1329 debug_printf ("lkop: is last of process %s\n",
1330 target_pid_to_str (thread->id));
1331 return;
1332 }
1333
1334 kill_wait_lwp (lwp);
1335 }
1336
1337 int
1338 linux_process_target::kill (process_info *process)
1339 {
1340 int pid = process->pid;
1341
1342 /* If we're killing a running inferior, make sure it is stopped
1343 first, as PTRACE_KILL will not work otherwise. */
1344 stop_all_lwps (0, NULL);
1345
1346 for_each_thread (pid, [&] (thread_info *thread)
1347 {
1348 kill_one_lwp_callback (thread, pid);
1349 });
1350
1351 /* See the comment in linux_kill_one_lwp. We did not kill the first
1352 thread in the list, so do so now. */
1353 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1354
1355 if (lwp == NULL)
1356 {
1357 if (debug_threads)
1358 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1359 pid);
1360 }
1361 else
1362 kill_wait_lwp (lwp);
1363
1364 mourn (process);
1365
1366 /* Since we presently can only stop all lwps of all processes, we
1367 need to unstop lwps of other processes. */
1368 unstop_all_lwps (0, NULL);
1369 return 0;
1370 }
1371
1372 /* Get pending signal of THREAD, for detaching purposes. This is the
1373 signal the thread last stopped for, which we need to deliver to the
1374 thread when detaching, otherwise, it'd be suppressed/lost. */
1375
1376 static int
1377 get_detach_signal (struct thread_info *thread)
1378 {
1379 client_state &cs = get_client_state ();
1380 enum gdb_signal signo = GDB_SIGNAL_0;
1381 int status;
1382 struct lwp_info *lp = get_thread_lwp (thread);
1383
1384 if (lp->status_pending_p)
1385 status = lp->status_pending;
1386 else
1387 {
1388 /* If the thread had been suspended by gdbserver, and it stopped
1389 cleanly, then it'll have stopped with SIGSTOP. But we don't
1390 want to deliver that SIGSTOP. */
1391 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1392 || thread->last_status.value.sig == GDB_SIGNAL_0)
1393 return 0;
1394
1395 /* Otherwise, we may need to deliver the signal we
1396 intercepted. */
1397 status = lp->last_status;
1398 }
1399
1400 if (!WIFSTOPPED (status))
1401 {
1402 if (debug_threads)
1403 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1404 target_pid_to_str (ptid_of (thread)));
1405 return 0;
1406 }
1407
1408 /* Extended wait statuses aren't real SIGTRAPs. */
1409 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1410 {
1411 if (debug_threads)
1412 debug_printf ("GPS: lwp %s had stopped with extended "
1413 "status: no pending signal\n",
1414 target_pid_to_str (ptid_of (thread)));
1415 return 0;
1416 }
1417
1418 signo = gdb_signal_from_host (WSTOPSIG (status));
1419
1420 if (cs.program_signals_p && !cs.program_signals[signo])
1421 {
1422 if (debug_threads)
1423 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1424 target_pid_to_str (ptid_of (thread)),
1425 gdb_signal_to_string (signo));
1426 return 0;
1427 }
1428 else if (!cs.program_signals_p
1429 /* If we have no way to know which signals GDB does not
1430 want to have passed to the program, assume
1431 SIGTRAP/SIGINT, which is GDB's default. */
1432 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1433 {
1434 if (debug_threads)
1435 debug_printf ("GPS: lwp %s had signal %s, "
1436 "but we don't know if we should pass it. "
1437 "Default to not.\n",
1438 target_pid_to_str (ptid_of (thread)),
1439 gdb_signal_to_string (signo));
1440 return 0;
1441 }
1442 else
1443 {
1444 if (debug_threads)
1445 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1446 target_pid_to_str (ptid_of (thread)),
1447 gdb_signal_to_string (signo));
1448
1449 return WSTOPSIG (status);
1450 }
1451 }
1452
1453 void
1454 linux_process_target::detach_one_lwp (lwp_info *lwp)
1455 {
1456 struct thread_info *thread = get_lwp_thread (lwp);
1457 int sig;
1458 int lwpid;
1459
1460 /* If there is a pending SIGSTOP, get rid of it. */
1461 if (lwp->stop_expected)
1462 {
1463 if (debug_threads)
1464 debug_printf ("Sending SIGCONT to %s\n",
1465 target_pid_to_str (ptid_of (thread)));
1466
1467 kill_lwp (lwpid_of (thread), SIGCONT);
1468 lwp->stop_expected = 0;
1469 }
1470
1471 /* Pass on any pending signal for this thread. */
1472 sig = get_detach_signal (thread);
1473
1474 /* Preparing to resume may try to write registers, and fail if the
1475 lwp is zombie. If that happens, ignore the error. We'll handle
1476 it below, when detach fails with ESRCH. */
1477 try
1478 {
1479 /* Flush any pending changes to the process's registers. */
1480 regcache_invalidate_thread (thread);
1481
1482 /* Finally, let it resume. */
1483 low_prepare_to_resume (lwp);
1484 }
1485 catch (const gdb_exception_error &ex)
1486 {
1487 if (!check_ptrace_stopped_lwp_gone (lwp))
1488 throw;
1489 }
1490
1491 lwpid = lwpid_of (thread);
1492 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1493 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1494 {
1495 int save_errno = errno;
1496
1497 /* We know the thread exists, so ESRCH must mean the lwp is
1498 zombie. This can happen if one of the already-detached
1499 threads exits the whole thread group. In that case we're
1500 still attached, and must reap the lwp. */
1501 if (save_errno == ESRCH)
1502 {
1503 int ret, status;
1504
1505 ret = my_waitpid (lwpid, &status, __WALL);
1506 if (ret == -1)
1507 {
1508 warning (_("Couldn't reap LWP %d while detaching: %s"),
1509 lwpid, safe_strerror (errno));
1510 }
1511 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1512 {
1513 warning (_("Reaping LWP %d while detaching "
1514 "returned unexpected status 0x%x"),
1515 lwpid, status);
1516 }
1517 }
1518 else
1519 {
1520 error (_("Can't detach %s: %s"),
1521 target_pid_to_str (ptid_of (thread)),
1522 safe_strerror (save_errno));
1523 }
1524 }
1525 else if (debug_threads)
1526 {
1527 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1528 target_pid_to_str (ptid_of (thread)),
1529 strsignal (sig));
1530 }
1531
1532 delete_lwp (lwp);
1533 }
1534
1535 int
1536 linux_process_target::detach (process_info *process)
1537 {
1538 struct lwp_info *main_lwp;
1539
1540 /* As there's a step over already in progress, let it finish first,
1541 otherwise nesting a stabilize_threads operation on top gets real
1542 messy. */
1543 complete_ongoing_step_over ();
1544
1545 /* Stop all threads before detaching. First, ptrace requires that
1546 the thread is stopped to successfully detach. Second, thread_db
1547 may need to uninstall thread event breakpoints from memory, which
1548 only works with a stopped process anyway. */
1549 stop_all_lwps (0, NULL);
1550
1551 #ifdef USE_THREAD_DB
1552 thread_db_detach (process);
1553 #endif
1554
1555 /* Stabilize threads (move out of jump pads). */
1556 target_stabilize_threads ();
1557
1558 /* Detach from the clone lwps first. If the thread group exits just
1559 while we're detaching, we must reap the clone lwps before we're
1560 able to reap the leader. */
1561 for_each_thread (process->pid, [this] (thread_info *thread)
1562 {
1563 /* We don't actually detach from the thread group leader just yet.
1564 If the thread group exits, we must reap the zombie clone lwps
1565 before we're able to reap the leader. */
1566 if (thread->id.pid () == thread->id.lwp ())
1567 return;
1568
1569 lwp_info *lwp = get_thread_lwp (thread);
1570 detach_one_lwp (lwp);
1571 });
1572
1573 main_lwp = find_lwp_pid (ptid_t (process->pid));
1574 detach_one_lwp (main_lwp);
1575
1576 mourn (process);
1577
1578 /* Since we presently can only stop all lwps of all processes, we
1579 need to unstop lwps of other processes. */
1580 unstop_all_lwps (0, NULL);
1581 return 0;
1582 }
1583
1584 /* Remove all LWPs that belong to process PROC from the lwp list. */
1585
1586 void
1587 linux_process_target::mourn (process_info *process)
1588 {
1589 struct process_info_private *priv;
1590
1591 #ifdef USE_THREAD_DB
1592 thread_db_mourn (process);
1593 #endif
1594
1595 for_each_thread (process->pid, [this] (thread_info *thread)
1596 {
1597 delete_lwp (get_thread_lwp (thread));
1598 });
1599
1600 /* Freeing all private data. */
1601 priv = process->priv;
1602 low_delete_process (priv->arch_private);
1603 free (priv);
1604 process->priv = NULL;
1605
1606 remove_process (process);
1607 }
1608
1609 void
1610 linux_process_target::join (int pid)
1611 {
1612 int status, ret;
1613
1614 do {
1615 ret = my_waitpid (pid, &status, 0);
1616 if (WIFEXITED (status) || WIFSIGNALED (status))
1617 break;
1618 } while (ret != -1 || errno != ECHILD);
1619 }
1620
1621 /* Return true if the given thread is still alive. */
1622
1623 bool
1624 linux_process_target::thread_alive (ptid_t ptid)
1625 {
1626 struct lwp_info *lwp = find_lwp_pid (ptid);
1627
1628 /* We assume we always know if a thread exits. If a whole process
1629 exited but we still haven't been able to report it to GDB, we'll
1630 hold on to the last lwp of the dead process. */
1631 if (lwp != NULL)
1632 return !lwp_is_marked_dead (lwp);
1633 else
1634 return 0;
1635 }
1636
1637 bool
1638 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1639 {
1640 struct lwp_info *lp = get_thread_lwp (thread);
1641
1642 if (!lp->status_pending_p)
1643 return 0;
1644
1645 if (thread->last_resume_kind != resume_stop
1646 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1647 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1648 {
1649 struct thread_info *saved_thread;
1650 CORE_ADDR pc;
1651 int discard = 0;
1652
1653 gdb_assert (lp->last_status != 0);
1654
1655 pc = get_pc (lp);
1656
1657 saved_thread = current_thread;
1658 current_thread = thread;
1659
1660 if (pc != lp->stop_pc)
1661 {
1662 if (debug_threads)
1663 debug_printf ("PC of %ld changed\n",
1664 lwpid_of (thread));
1665 discard = 1;
1666 }
1667
1668 #if !USE_SIGTRAP_SIGINFO
1669 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1670 && !low_breakpoint_at (pc))
1671 {
1672 if (debug_threads)
1673 debug_printf ("previous SW breakpoint of %ld gone\n",
1674 lwpid_of (thread));
1675 discard = 1;
1676 }
1677 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1678 && !hardware_breakpoint_inserted_here (pc))
1679 {
1680 if (debug_threads)
1681 debug_printf ("previous HW breakpoint of %ld gone\n",
1682 lwpid_of (thread));
1683 discard = 1;
1684 }
1685 #endif
1686
1687 current_thread = saved_thread;
1688
1689 if (discard)
1690 {
1691 if (debug_threads)
1692 debug_printf ("discarding pending breakpoint status\n");
1693 lp->status_pending_p = 0;
1694 return 0;
1695 }
1696 }
1697
1698 return 1;
1699 }
1700
1701 /* Returns true if LWP is resumed from the client's perspective. */
1702
1703 static int
1704 lwp_resumed (struct lwp_info *lwp)
1705 {
1706 struct thread_info *thread = get_lwp_thread (lwp);
1707
1708 if (thread->last_resume_kind != resume_stop)
1709 return 1;
1710
1711 /* Did gdb send us a `vCont;t', but we haven't reported the
1712 corresponding stop to gdb yet? If so, the thread is still
1713 resumed/running from gdb's perspective. */
1714 if (thread->last_resume_kind == resume_stop
1715 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1716 return 1;
1717
1718 return 0;
1719 }
1720
1721 bool
1722 linux_process_target::status_pending_p_callback (thread_info *thread,
1723 ptid_t ptid)
1724 {
1725 struct lwp_info *lp = get_thread_lwp (thread);
1726
1727 /* Check if we're only interested in events from a specific process
1728 or a specific LWP. */
1729 if (!thread->id.matches (ptid))
1730 return 0;
1731
1732 if (!lwp_resumed (lp))
1733 return 0;
1734
1735 if (lp->status_pending_p
1736 && !thread_still_has_status_pending (thread))
1737 {
1738 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1739 return 0;
1740 }
1741
1742 return lp->status_pending_p;
1743 }
1744
1745 struct lwp_info *
1746 find_lwp_pid (ptid_t ptid)
1747 {
1748 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1749 {
1750 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1751 return thr_arg->id.lwp () == lwp;
1752 });
1753
1754 if (thread == NULL)
1755 return NULL;
1756
1757 return get_thread_lwp (thread);
1758 }
1759
1760 /* Return the number of known LWPs in the tgid given by PID. */
1761
1762 static int
1763 num_lwps (int pid)
1764 {
1765 int count = 0;
1766
1767 for_each_thread (pid, [&] (thread_info *thread)
1768 {
1769 count++;
1770 });
1771
1772 return count;
1773 }
1774
1775 /* See nat/linux-nat.h. */
1776
1777 struct lwp_info *
1778 iterate_over_lwps (ptid_t filter,
1779 gdb::function_view<iterate_over_lwps_ftype> callback)
1780 {
1781 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1782 {
1783 lwp_info *lwp = get_thread_lwp (thr_arg);
1784
1785 return callback (lwp);
1786 });
1787
1788 if (thread == NULL)
1789 return NULL;
1790
1791 return get_thread_lwp (thread);
1792 }
1793
1794 void
1795 linux_process_target::check_zombie_leaders ()
1796 {
1797 for_each_process ([this] (process_info *proc) {
1798 pid_t leader_pid = pid_of (proc);
1799 struct lwp_info *leader_lp;
1800
1801 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1802
1803 if (debug_threads)
1804 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1805 "num_lwps=%d, zombie=%d\n",
1806 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1807 linux_proc_pid_is_zombie (leader_pid));
1808
1809 if (leader_lp != NULL && !leader_lp->stopped
1810 /* Check if there are other threads in the group, as we may
1811 have raced with the inferior simply exiting. */
1812 && !last_thread_of_process_p (leader_pid)
1813 && linux_proc_pid_is_zombie (leader_pid))
1814 {
1815 /* A leader zombie can mean one of two things:
1816
1817 - It exited, and there's an exit status pending
1818 available, or only the leader exited (not the whole
1819 program). In the latter case, we can't waitpid the
1820 leader's exit status until all other threads are gone.
1821
1822 - There are 3 or more threads in the group, and a thread
1823 other than the leader exec'd. On an exec, the Linux
1824 kernel destroys all other threads (except the execing
1825 one) in the thread group, and resets the execing thread's
1826 tid to the tgid. No exit notification is sent for the
1827 execing thread -- from the ptracer's perspective, it
1828 appears as though the execing thread just vanishes.
1829 Until we reap all other threads except the leader and the
1830 execing thread, the leader will be zombie, and the
1831 execing thread will be in `D (disc sleep)'. As soon as
1832 all other threads are reaped, the execing thread changes
1833 it's tid to the tgid, and the previous (zombie) leader
1834 vanishes, giving place to the "new" leader. We could try
1835 distinguishing the exit and exec cases, by waiting once
1836 more, and seeing if something comes out, but it doesn't
1837 sound useful. The previous leader _does_ go away, and
1838 we'll re-add the new one once we see the exec event
1839 (which is just the same as what would happen if the
1840 previous leader did exit voluntarily before some other
1841 thread execs). */
1842
1843 if (debug_threads)
1844 debug_printf ("CZL: Thread group leader %d zombie "
1845 "(it exited, or another thread execd).\n",
1846 leader_pid);
1847
1848 delete_lwp (leader_lp);
1849 }
1850 });
1851 }
1852
1853 /* Callback for `find_thread'. Returns the first LWP that is not
1854 stopped. */
1855
1856 static bool
1857 not_stopped_callback (thread_info *thread, ptid_t filter)
1858 {
1859 if (!thread->id.matches (filter))
1860 return false;
1861
1862 lwp_info *lwp = get_thread_lwp (thread);
1863
1864 return !lwp->stopped;
1865 }
1866
1867 /* Increment LWP's suspend count. */
1868
1869 static void
1870 lwp_suspended_inc (struct lwp_info *lwp)
1871 {
1872 lwp->suspended++;
1873
1874 if (debug_threads && lwp->suspended > 4)
1875 {
1876 struct thread_info *thread = get_lwp_thread (lwp);
1877
1878 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1879 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1880 }
1881 }
1882
1883 /* Decrement LWP's suspend count. */
1884
1885 static void
1886 lwp_suspended_decr (struct lwp_info *lwp)
1887 {
1888 lwp->suspended--;
1889
1890 if (lwp->suspended < 0)
1891 {
1892 struct thread_info *thread = get_lwp_thread (lwp);
1893
1894 internal_error (__FILE__, __LINE__,
1895 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1896 lwp->suspended);
1897 }
1898 }
1899
1900 /* This function should only be called if the LWP got a SIGTRAP.
1901
1902 Handle any tracepoint steps or hits. Return true if a tracepoint
1903 event was handled, 0 otherwise. */
1904
1905 static int
1906 handle_tracepoints (struct lwp_info *lwp)
1907 {
1908 struct thread_info *tinfo = get_lwp_thread (lwp);
1909 int tpoint_related_event = 0;
1910
1911 gdb_assert (lwp->suspended == 0);
1912
1913 /* If this tracepoint hit causes a tracing stop, we'll immediately
1914 uninsert tracepoints. To do this, we temporarily pause all
1915 threads, unpatch away, and then unpause threads. We need to make
1916 sure the unpausing doesn't resume LWP too. */
1917 lwp_suspended_inc (lwp);
1918
1919 /* And we need to be sure that any all-threads-stopping doesn't try
1920 to move threads out of the jump pads, as it could deadlock the
1921 inferior (LWP could be in the jump pad, maybe even holding the
1922 lock.) */
1923
1924 /* Do any necessary step collect actions. */
1925 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1926
1927 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1928
1929 /* See if we just hit a tracepoint and do its main collect
1930 actions. */
1931 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1932
1933 lwp_suspended_decr (lwp);
1934
1935 gdb_assert (lwp->suspended == 0);
1936 gdb_assert (!stabilizing_threads
1937 || (lwp->collecting_fast_tracepoint
1938 != fast_tpoint_collect_result::not_collecting));
1939
1940 if (tpoint_related_event)
1941 {
1942 if (debug_threads)
1943 debug_printf ("got a tracepoint event\n");
1944 return 1;
1945 }
1946
1947 return 0;
1948 }
1949
1950 fast_tpoint_collect_result
1951 linux_process_target::linux_fast_tracepoint_collecting
1952 (lwp_info *lwp, fast_tpoint_collect_status *status)
1953 {
1954 CORE_ADDR thread_area;
1955 struct thread_info *thread = get_lwp_thread (lwp);
1956
1957 /* Get the thread area address. This is used to recognize which
1958 thread is which when tracing with the in-process agent library.
1959 We don't read anything from the address, and treat it as opaque;
1960 it's the address itself that we assume is unique per-thread. */
1961 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1962 return fast_tpoint_collect_result::not_collecting;
1963
1964 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1965 }
1966
1967 int
1968 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1969 {
1970 return -1;
1971 }
1972
1973 bool
1974 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1975 {
1976 struct thread_info *saved_thread;
1977
1978 saved_thread = current_thread;
1979 current_thread = get_lwp_thread (lwp);
1980
1981 if ((wstat == NULL
1982 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1983 && supports_fast_tracepoints ()
1984 && agent_loaded_p ())
1985 {
1986 struct fast_tpoint_collect_status status;
1987
1988 if (debug_threads)
1989 debug_printf ("Checking whether LWP %ld needs to move out of the "
1990 "jump pad.\n",
1991 lwpid_of (current_thread));
1992
1993 fast_tpoint_collect_result r
1994 = linux_fast_tracepoint_collecting (lwp, &status);
1995
1996 if (wstat == NULL
1997 || (WSTOPSIG (*wstat) != SIGILL
1998 && WSTOPSIG (*wstat) != SIGFPE
1999 && WSTOPSIG (*wstat) != SIGSEGV
2000 && WSTOPSIG (*wstat) != SIGBUS))
2001 {
2002 lwp->collecting_fast_tracepoint = r;
2003
2004 if (r != fast_tpoint_collect_result::not_collecting)
2005 {
2006 if (r == fast_tpoint_collect_result::before_insn
2007 && lwp->exit_jump_pad_bkpt == NULL)
2008 {
2009 /* Haven't executed the original instruction yet.
2010 Set breakpoint there, and wait till it's hit,
2011 then single-step until exiting the jump pad. */
2012 lwp->exit_jump_pad_bkpt
2013 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2014 }
2015
2016 if (debug_threads)
2017 debug_printf ("Checking whether LWP %ld needs to move out of "
2018 "the jump pad...it does\n",
2019 lwpid_of (current_thread));
2020 current_thread = saved_thread;
2021
2022 return true;
2023 }
2024 }
2025 else
2026 {
2027 /* If we get a synchronous signal while collecting, *and*
2028 while executing the (relocated) original instruction,
2029 reset the PC to point at the tpoint address, before
2030 reporting to GDB. Otherwise, it's an IPA lib bug: just
2031 report the signal to GDB, and pray for the best. */
2032
2033 lwp->collecting_fast_tracepoint
2034 = fast_tpoint_collect_result::not_collecting;
2035
2036 if (r != fast_tpoint_collect_result::not_collecting
2037 && (status.adjusted_insn_addr <= lwp->stop_pc
2038 && lwp->stop_pc < status.adjusted_insn_addr_end))
2039 {
2040 siginfo_t info;
2041 struct regcache *regcache;
2042
2043 /* The si_addr on a few signals references the address
2044 of the faulting instruction. Adjust that as
2045 well. */
2046 if ((WSTOPSIG (*wstat) == SIGILL
2047 || WSTOPSIG (*wstat) == SIGFPE
2048 || WSTOPSIG (*wstat) == SIGBUS
2049 || WSTOPSIG (*wstat) == SIGSEGV)
2050 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2051 (PTRACE_TYPE_ARG3) 0, &info) == 0
2052 /* Final check just to make sure we don't clobber
2053 the siginfo of non-kernel-sent signals. */
2054 && (uintptr_t) info.si_addr == lwp->stop_pc)
2055 {
2056 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2057 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2058 (PTRACE_TYPE_ARG3) 0, &info);
2059 }
2060
2061 regcache = get_thread_regcache (current_thread, 1);
2062 low_set_pc (regcache, status.tpoint_addr);
2063 lwp->stop_pc = status.tpoint_addr;
2064
2065 /* Cancel any fast tracepoint lock this thread was
2066 holding. */
2067 force_unlock_trace_buffer ();
2068 }
2069
2070 if (lwp->exit_jump_pad_bkpt != NULL)
2071 {
2072 if (debug_threads)
2073 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2074 "stopping all threads momentarily.\n");
2075
2076 stop_all_lwps (1, lwp);
2077
2078 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2079 lwp->exit_jump_pad_bkpt = NULL;
2080
2081 unstop_all_lwps (1, lwp);
2082
2083 gdb_assert (lwp->suspended >= 0);
2084 }
2085 }
2086 }
2087
2088 if (debug_threads)
2089 debug_printf ("Checking whether LWP %ld needs to move out of the "
2090 "jump pad...no\n",
2091 lwpid_of (current_thread));
2092
2093 current_thread = saved_thread;
2094 return false;
2095 }
2096
2097 /* Enqueue one signal in the "signals to report later when out of the
2098 jump pad" list. */
2099
2100 static void
2101 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2102 {
2103 struct thread_info *thread = get_lwp_thread (lwp);
2104
2105 if (debug_threads)
2106 debug_printf ("Deferring signal %d for LWP %ld.\n",
2107 WSTOPSIG (*wstat), lwpid_of (thread));
2108
2109 if (debug_threads)
2110 {
2111 for (const auto &sig : lwp->pending_signals_to_report)
2112 debug_printf (" Already queued %d\n",
2113 sig.signal);
2114
2115 debug_printf (" (no more currently queued signals)\n");
2116 }
2117
2118 /* Don't enqueue non-RT signals if they are already in the deferred
2119 queue. (SIGSTOP being the easiest signal to see ending up here
2120 twice) */
2121 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2122 {
2123 for (const auto &sig : lwp->pending_signals_to_report)
2124 {
2125 if (sig.signal == WSTOPSIG (*wstat))
2126 {
2127 if (debug_threads)
2128 debug_printf ("Not requeuing already queued non-RT signal %d"
2129 " for LWP %ld\n",
2130 sig.signal,
2131 lwpid_of (thread));
2132 return;
2133 }
2134 }
2135 }
2136
2137 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2138
2139 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2140 &lwp->pending_signals_to_report.back ().info);
2141 }
2142
2143 /* Dequeue one signal from the "signals to report later when out of
2144 the jump pad" list. */
2145
2146 static int
2147 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2148 {
2149 struct thread_info *thread = get_lwp_thread (lwp);
2150
2151 if (!lwp->pending_signals_to_report.empty ())
2152 {
2153 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2154
2155 *wstat = W_STOPCODE (p_sig.signal);
2156 if (p_sig.info.si_signo != 0)
2157 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2158 &p_sig.info);
2159
2160 lwp->pending_signals_to_report.pop_front ();
2161
2162 if (debug_threads)
2163 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2164 WSTOPSIG (*wstat), lwpid_of (thread));
2165
2166 if (debug_threads)
2167 {
2168 for (const auto &sig : lwp->pending_signals_to_report)
2169 debug_printf (" Still queued %d\n",
2170 sig.signal);
2171
2172 debug_printf (" (no more queued signals)\n");
2173 }
2174
2175 return 1;
2176 }
2177
2178 return 0;
2179 }
2180
2181 bool
2182 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2183 {
2184 struct thread_info *saved_thread = current_thread;
2185 current_thread = get_lwp_thread (child);
2186
2187 if (low_stopped_by_watchpoint ())
2188 {
2189 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2190 child->stopped_data_address = low_stopped_data_address ();
2191 }
2192
2193 current_thread = saved_thread;
2194
2195 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2196 }
2197
2198 bool
2199 linux_process_target::low_stopped_by_watchpoint ()
2200 {
2201 return false;
2202 }
2203
2204 CORE_ADDR
2205 linux_process_target::low_stopped_data_address ()
2206 {
2207 return 0;
2208 }
2209
2210 /* Return the ptrace options that we want to try to enable. */
2211
2212 static int
2213 linux_low_ptrace_options (int attached)
2214 {
2215 client_state &cs = get_client_state ();
2216 int options = 0;
2217
2218 if (!attached)
2219 options |= PTRACE_O_EXITKILL;
2220
2221 if (cs.report_fork_events)
2222 options |= PTRACE_O_TRACEFORK;
2223
2224 if (cs.report_vfork_events)
2225 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2226
2227 if (cs.report_exec_events)
2228 options |= PTRACE_O_TRACEEXEC;
2229
2230 options |= PTRACE_O_TRACESYSGOOD;
2231
2232 return options;
2233 }
2234
2235 void
2236 linux_process_target::filter_event (int lwpid, int wstat)
2237 {
2238 client_state &cs = get_client_state ();
2239 struct lwp_info *child;
2240 struct thread_info *thread;
2241 int have_stop_pc = 0;
2242
2243 child = find_lwp_pid (ptid_t (lwpid));
2244
2245 /* Check for stop events reported by a process we didn't already
2246 know about - anything not already in our LWP list.
2247
2248 If we're expecting to receive stopped processes after
2249 fork, vfork, and clone events, then we'll just add the
2250 new one to our list and go back to waiting for the event
2251 to be reported - the stopped process might be returned
2252 from waitpid before or after the event is.
2253
2254 But note the case of a non-leader thread exec'ing after the
2255 leader having exited, and gone from our lists (because
2256 check_zombie_leaders deleted it). The non-leader thread
2257 changes its tid to the tgid. */
2258
2259 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2260 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2261 {
2262 ptid_t child_ptid;
2263
2264 /* A multi-thread exec after we had seen the leader exiting. */
2265 if (debug_threads)
2266 {
2267 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2268 "after exec.\n", lwpid);
2269 }
2270
2271 child_ptid = ptid_t (lwpid, lwpid, 0);
2272 child = add_lwp (child_ptid);
2273 child->stopped = 1;
2274 current_thread = child->thread;
2275 }
2276
2277 /* If we didn't find a process, one of two things presumably happened:
2278 - A process we started and then detached from has exited. Ignore it.
2279 - A process we are controlling has forked and the new child's stop
2280 was reported to us by the kernel. Save its PID. */
2281 if (child == NULL && WIFSTOPPED (wstat))
2282 {
2283 add_to_pid_list (&stopped_pids, lwpid, wstat);
2284 return;
2285 }
2286 else if (child == NULL)
2287 return;
2288
2289 thread = get_lwp_thread (child);
2290
2291 child->stopped = 1;
2292
2293 child->last_status = wstat;
2294
2295 /* Check if the thread has exited. */
2296 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2297 {
2298 if (debug_threads)
2299 debug_printf ("LLFE: %d exited.\n", lwpid);
2300
2301 if (finish_step_over (child))
2302 {
2303 /* Unsuspend all other LWPs, and set them back running again. */
2304 unsuspend_all_lwps (child);
2305 }
2306
2307 /* If there is at least one more LWP, then the exit signal was
2308 not the end of the debugged application and should be
2309 ignored, unless GDB wants to hear about thread exits. */
2310 if (cs.report_thread_events
2311 || last_thread_of_process_p (pid_of (thread)))
2312 {
2313 /* Since events are serialized to GDB core, and we can't
2314 report this one right now. Leave the status pending for
2315 the next time we're able to report it. */
2316 mark_lwp_dead (child, wstat);
2317 return;
2318 }
2319 else
2320 {
2321 delete_lwp (child);
2322 return;
2323 }
2324 }
2325
2326 gdb_assert (WIFSTOPPED (wstat));
2327
2328 if (WIFSTOPPED (wstat))
2329 {
2330 struct process_info *proc;
2331
2332 /* Architecture-specific setup after inferior is running. */
2333 proc = find_process_pid (pid_of (thread));
2334 if (proc->tdesc == NULL)
2335 {
2336 if (proc->attached)
2337 {
2338 /* This needs to happen after we have attached to the
2339 inferior and it is stopped for the first time, but
2340 before we access any inferior registers. */
2341 arch_setup_thread (thread);
2342 }
2343 else
2344 {
2345 /* The process is started, but GDBserver will do
2346 architecture-specific setup after the program stops at
2347 the first instruction. */
2348 child->status_pending_p = 1;
2349 child->status_pending = wstat;
2350 return;
2351 }
2352 }
2353 }
2354
2355 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2356 {
2357 struct process_info *proc = find_process_pid (pid_of (thread));
2358 int options = linux_low_ptrace_options (proc->attached);
2359
2360 linux_enable_event_reporting (lwpid, options);
2361 child->must_set_ptrace_flags = 0;
2362 }
2363
2364 /* Always update syscall_state, even if it will be filtered later. */
2365 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2366 {
2367 child->syscall_state
2368 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2369 ? TARGET_WAITKIND_SYSCALL_RETURN
2370 : TARGET_WAITKIND_SYSCALL_ENTRY);
2371 }
2372 else
2373 {
2374 /* Almost all other ptrace-stops are known to be outside of system
2375 calls, with further exceptions in handle_extended_wait. */
2376 child->syscall_state = TARGET_WAITKIND_IGNORE;
2377 }
2378
2379 /* Be careful to not overwrite stop_pc until save_stop_reason is
2380 called. */
2381 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2382 && linux_is_extended_waitstatus (wstat))
2383 {
2384 child->stop_pc = get_pc (child);
2385 if (handle_extended_wait (&child, wstat))
2386 {
2387 /* The event has been handled, so just return without
2388 reporting it. */
2389 return;
2390 }
2391 }
2392
2393 if (linux_wstatus_maybe_breakpoint (wstat))
2394 {
2395 if (save_stop_reason (child))
2396 have_stop_pc = 1;
2397 }
2398
2399 if (!have_stop_pc)
2400 child->stop_pc = get_pc (child);
2401
2402 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2403 && child->stop_expected)
2404 {
2405 if (debug_threads)
2406 debug_printf ("Expected stop.\n");
2407 child->stop_expected = 0;
2408
2409 if (thread->last_resume_kind == resume_stop)
2410 {
2411 /* We want to report the stop to the core. Treat the
2412 SIGSTOP as a normal event. */
2413 if (debug_threads)
2414 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2415 target_pid_to_str (ptid_of (thread)));
2416 }
2417 else if (stopping_threads != NOT_STOPPING_THREADS)
2418 {
2419 /* Stopping threads. We don't want this SIGSTOP to end up
2420 pending. */
2421 if (debug_threads)
2422 debug_printf ("LLW: SIGSTOP caught for %s "
2423 "while stopping threads.\n",
2424 target_pid_to_str (ptid_of (thread)));
2425 return;
2426 }
2427 else
2428 {
2429 /* This is a delayed SIGSTOP. Filter out the event. */
2430 if (debug_threads)
2431 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2432 child->stepping ? "step" : "continue",
2433 target_pid_to_str (ptid_of (thread)));
2434
2435 resume_one_lwp (child, child->stepping, 0, NULL);
2436 return;
2437 }
2438 }
2439
2440 child->status_pending_p = 1;
2441 child->status_pending = wstat;
2442 return;
2443 }
2444
2445 bool
2446 linux_process_target::maybe_hw_step (thread_info *thread)
2447 {
2448 if (supports_hardware_single_step ())
2449 return true;
2450 else
2451 {
2452 /* GDBserver must insert single-step breakpoint for software
2453 single step. */
2454 gdb_assert (has_single_step_breakpoints (thread));
2455 return false;
2456 }
2457 }
2458
2459 void
2460 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2461 {
2462 struct lwp_info *lp = get_thread_lwp (thread);
2463
2464 if (lp->stopped
2465 && !lp->suspended
2466 && !lp->status_pending_p
2467 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2468 {
2469 int step = 0;
2470
2471 if (thread->last_resume_kind == resume_step)
2472 step = maybe_hw_step (thread);
2473
2474 if (debug_threads)
2475 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2476 target_pid_to_str (ptid_of (thread)),
2477 paddress (lp->stop_pc),
2478 step);
2479
2480 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2481 }
2482 }
2483
2484 int
2485 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2486 ptid_t filter_ptid,
2487 int *wstatp, int options)
2488 {
2489 struct thread_info *event_thread;
2490 struct lwp_info *event_child, *requested_child;
2491 sigset_t block_mask, prev_mask;
2492
2493 retry:
2494 /* N.B. event_thread points to the thread_info struct that contains
2495 event_child. Keep them in sync. */
2496 event_thread = NULL;
2497 event_child = NULL;
2498 requested_child = NULL;
2499
2500 /* Check for a lwp with a pending status. */
2501
2502 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2503 {
2504 event_thread = find_thread_in_random ([&] (thread_info *thread)
2505 {
2506 return status_pending_p_callback (thread, filter_ptid);
2507 });
2508
2509 if (event_thread != NULL)
2510 event_child = get_thread_lwp (event_thread);
2511 if (debug_threads && event_thread)
2512 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2513 }
2514 else if (filter_ptid != null_ptid)
2515 {
2516 requested_child = find_lwp_pid (filter_ptid);
2517
2518 if (stopping_threads == NOT_STOPPING_THREADS
2519 && requested_child->status_pending_p
2520 && (requested_child->collecting_fast_tracepoint
2521 != fast_tpoint_collect_result::not_collecting))
2522 {
2523 enqueue_one_deferred_signal (requested_child,
2524 &requested_child->status_pending);
2525 requested_child->status_pending_p = 0;
2526 requested_child->status_pending = 0;
2527 resume_one_lwp (requested_child, 0, 0, NULL);
2528 }
2529
2530 if (requested_child->suspended
2531 && requested_child->status_pending_p)
2532 {
2533 internal_error (__FILE__, __LINE__,
2534 "requesting an event out of a"
2535 " suspended child?");
2536 }
2537
2538 if (requested_child->status_pending_p)
2539 {
2540 event_child = requested_child;
2541 event_thread = get_lwp_thread (event_child);
2542 }
2543 }
2544
2545 if (event_child != NULL)
2546 {
2547 if (debug_threads)
2548 debug_printf ("Got an event from pending child %ld (%04x)\n",
2549 lwpid_of (event_thread), event_child->status_pending);
2550 *wstatp = event_child->status_pending;
2551 event_child->status_pending_p = 0;
2552 event_child->status_pending = 0;
2553 current_thread = event_thread;
2554 return lwpid_of (event_thread);
2555 }
2556
2557 /* But if we don't find a pending event, we'll have to wait.
2558
2559 We only enter this loop if no process has a pending wait status.
2560 Thus any action taken in response to a wait status inside this
2561 loop is responding as soon as we detect the status, not after any
2562 pending events. */
2563
2564 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2565 all signals while here. */
2566 sigfillset (&block_mask);
2567 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2568
2569 /* Always pull all events out of the kernel. We'll randomly select
2570 an event LWP out of all that have events, to prevent
2571 starvation. */
2572 while (event_child == NULL)
2573 {
2574 pid_t ret = 0;
2575
2576 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2577 quirks:
2578
2579 - If the thread group leader exits while other threads in the
2580 thread group still exist, waitpid(TGID, ...) hangs. That
2581 waitpid won't return an exit status until the other threads
2582 in the group are reaped.
2583
2584 - When a non-leader thread execs, that thread just vanishes
2585 without reporting an exit (so we'd hang if we waited for it
2586 explicitly in that case). The exec event is reported to
2587 the TGID pid. */
2588 errno = 0;
2589 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2590
2591 if (debug_threads)
2592 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2593 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2594
2595 if (ret > 0)
2596 {
2597 if (debug_threads)
2598 {
2599 debug_printf ("LLW: waitpid %ld received %s\n",
2600 (long) ret, status_to_str (*wstatp).c_str ());
2601 }
2602
2603 /* Filter all events. IOW, leave all events pending. We'll
2604 randomly select an event LWP out of all that have events
2605 below. */
2606 filter_event (ret, *wstatp);
2607 /* Retry until nothing comes out of waitpid. A single
2608 SIGCHLD can indicate more than one child stopped. */
2609 continue;
2610 }
2611
2612 /* Now that we've pulled all events out of the kernel, resume
2613 LWPs that don't have an interesting event to report. */
2614 if (stopping_threads == NOT_STOPPING_THREADS)
2615 for_each_thread ([this] (thread_info *thread)
2616 {
2617 resume_stopped_resumed_lwps (thread);
2618 });
2619
2620 /* ... and find an LWP with a status to report to the core, if
2621 any. */
2622 event_thread = find_thread_in_random ([&] (thread_info *thread)
2623 {
2624 return status_pending_p_callback (thread, filter_ptid);
2625 });
2626
2627 if (event_thread != NULL)
2628 {
2629 event_child = get_thread_lwp (event_thread);
2630 *wstatp = event_child->status_pending;
2631 event_child->status_pending_p = 0;
2632 event_child->status_pending = 0;
2633 break;
2634 }
2635
2636 /* Check for zombie thread group leaders. Those can't be reaped
2637 until all other threads in the thread group are. */
2638 check_zombie_leaders ();
2639
2640 auto not_stopped = [&] (thread_info *thread)
2641 {
2642 return not_stopped_callback (thread, wait_ptid);
2643 };
2644
2645 /* If there are no resumed children left in the set of LWPs we
2646 want to wait for, bail. We can't just block in
2647 waitpid/sigsuspend, because lwps might have been left stopped
2648 in trace-stop state, and we'd be stuck forever waiting for
2649 their status to change (which would only happen if we resumed
2650 them). Even if WNOHANG is set, this return code is preferred
2651 over 0 (below), as it is more detailed. */
2652 if (find_thread (not_stopped) == NULL)
2653 {
2654 if (debug_threads)
2655 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2656 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2657 return -1;
2658 }
2659
2660 /* No interesting event to report to the caller. */
2661 if ((options & WNOHANG))
2662 {
2663 if (debug_threads)
2664 debug_printf ("WNOHANG set, no event found\n");
2665
2666 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2667 return 0;
2668 }
2669
2670 /* Block until we get an event reported with SIGCHLD. */
2671 if (debug_threads)
2672 debug_printf ("sigsuspend'ing\n");
2673
2674 sigsuspend (&prev_mask);
2675 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2676 goto retry;
2677 }
2678
2679 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2680
2681 current_thread = event_thread;
2682
2683 return lwpid_of (event_thread);
2684 }
2685
2686 int
2687 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2688 {
2689 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2690 }
2691
2692 /* Select one LWP out of those that have events pending. */
2693
2694 static void
2695 select_event_lwp (struct lwp_info **orig_lp)
2696 {
2697 struct thread_info *event_thread = NULL;
2698
2699 /* In all-stop, give preference to the LWP that is being
2700 single-stepped. There will be at most one, and it's the LWP that
2701 the core is most interested in. If we didn't do this, then we'd
2702 have to handle pending step SIGTRAPs somehow in case the core
2703 later continues the previously-stepped thread, otherwise we'd
2704 report the pending SIGTRAP, and the core, not having stepped the
2705 thread, wouldn't understand what the trap was for, and therefore
2706 would report it to the user as a random signal. */
2707 if (!non_stop)
2708 {
2709 event_thread = find_thread ([] (thread_info *thread)
2710 {
2711 lwp_info *lp = get_thread_lwp (thread);
2712
2713 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2714 && thread->last_resume_kind == resume_step
2715 && lp->status_pending_p);
2716 });
2717
2718 if (event_thread != NULL)
2719 {
2720 if (debug_threads)
2721 debug_printf ("SEL: Select single-step %s\n",
2722 target_pid_to_str (ptid_of (event_thread)));
2723 }
2724 }
2725 if (event_thread == NULL)
2726 {
2727 /* No single-stepping LWP. Select one at random, out of those
2728 which have had events. */
2729
2730 event_thread = find_thread_in_random ([&] (thread_info *thread)
2731 {
2732 lwp_info *lp = get_thread_lwp (thread);
2733
2734 /* Only resumed LWPs that have an event pending. */
2735 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2736 && lp->status_pending_p);
2737 });
2738 }
2739
2740 if (event_thread != NULL)
2741 {
2742 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2743
2744 /* Switch the event LWP. */
2745 *orig_lp = event_lp;
2746 }
2747 }
2748
2749 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2750 NULL. */
2751
2752 static void
2753 unsuspend_all_lwps (struct lwp_info *except)
2754 {
2755 for_each_thread ([&] (thread_info *thread)
2756 {
2757 lwp_info *lwp = get_thread_lwp (thread);
2758
2759 if (lwp != except)
2760 lwp_suspended_decr (lwp);
2761 });
2762 }
2763
2764 static bool lwp_running (thread_info *thread);
2765
2766 /* Stabilize threads (move out of jump pads).
2767
2768 If a thread is midway collecting a fast tracepoint, we need to
2769 finish the collection and move it out of the jump pad before
2770 reporting the signal.
2771
2772 This avoids recursion while collecting (when a signal arrives
2773 midway, and the signal handler itself collects), which would trash
2774 the trace buffer. In case the user set a breakpoint in a signal
2775 handler, this avoids the backtrace showing the jump pad, etc..
2776 Most importantly, there are certain things we can't do safely if
2777 threads are stopped in a jump pad (or in its callee's). For
2778 example:
2779
2780 - starting a new trace run. A thread still collecting the
2781 previous run, could trash the trace buffer when resumed. The trace
2782 buffer control structures would have been reset but the thread had
2783 no way to tell. The thread could even midway memcpy'ing to the
2784 buffer, which would mean that when resumed, it would clobber the
2785 trace buffer that had been set for a new run.
2786
2787 - we can't rewrite/reuse the jump pads for new tracepoints
2788 safely. Say you do tstart while a thread is stopped midway while
2789 collecting. When the thread is later resumed, it finishes the
2790 collection, and returns to the jump pad, to execute the original
2791 instruction that was under the tracepoint jump at the time the
2792 older run had been started. If the jump pad had been rewritten
2793 since for something else in the new run, the thread would now
2794 execute the wrong / random instructions. */
2795
2796 void
2797 linux_process_target::stabilize_threads ()
2798 {
2799 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2800 {
2801 return stuck_in_jump_pad (thread);
2802 });
2803
2804 if (thread_stuck != NULL)
2805 {
2806 if (debug_threads)
2807 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2808 lwpid_of (thread_stuck));
2809 return;
2810 }
2811
2812 thread_info *saved_thread = current_thread;
2813
2814 stabilizing_threads = 1;
2815
2816 /* Kick 'em all. */
2817 for_each_thread ([this] (thread_info *thread)
2818 {
2819 move_out_of_jump_pad (thread);
2820 });
2821
2822 /* Loop until all are stopped out of the jump pads. */
2823 while (find_thread (lwp_running) != NULL)
2824 {
2825 struct target_waitstatus ourstatus;
2826 struct lwp_info *lwp;
2827 int wstat;
2828
2829 /* Note that we go through the full wait even loop. While
2830 moving threads out of jump pad, we need to be able to step
2831 over internal breakpoints and such. */
2832 wait_1 (minus_one_ptid, &ourstatus, 0);
2833
2834 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2835 {
2836 lwp = get_thread_lwp (current_thread);
2837
2838 /* Lock it. */
2839 lwp_suspended_inc (lwp);
2840
2841 if (ourstatus.value.sig != GDB_SIGNAL_0
2842 || current_thread->last_resume_kind == resume_stop)
2843 {
2844 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2845 enqueue_one_deferred_signal (lwp, &wstat);
2846 }
2847 }
2848 }
2849
2850 unsuspend_all_lwps (NULL);
2851
2852 stabilizing_threads = 0;
2853
2854 current_thread = saved_thread;
2855
2856 if (debug_threads)
2857 {
2858 thread_stuck = find_thread ([this] (thread_info *thread)
2859 {
2860 return stuck_in_jump_pad (thread);
2861 });
2862
2863 if (thread_stuck != NULL)
2864 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2865 lwpid_of (thread_stuck));
2866 }
2867 }
2868
2869 /* Convenience function that is called when the kernel reports an
2870 event that is not passed out to GDB. */
2871
2872 static ptid_t
2873 ignore_event (struct target_waitstatus *ourstatus)
2874 {
2875 /* If we got an event, there may still be others, as a single
2876 SIGCHLD can indicate more than one child stopped. This forces
2877 another target_wait call. */
2878 async_file_mark ();
2879
2880 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2881 return null_ptid;
2882 }
2883
2884 ptid_t
2885 linux_process_target::filter_exit_event (lwp_info *event_child,
2886 target_waitstatus *ourstatus)
2887 {
2888 client_state &cs = get_client_state ();
2889 struct thread_info *thread = get_lwp_thread (event_child);
2890 ptid_t ptid = ptid_of (thread);
2891
2892 if (!last_thread_of_process_p (pid_of (thread)))
2893 {
2894 if (cs.report_thread_events)
2895 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2896 else
2897 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2898
2899 delete_lwp (event_child);
2900 }
2901 return ptid;
2902 }
2903
2904 /* Returns 1 if GDB is interested in any event_child syscalls. */
2905
2906 static int
2907 gdb_catching_syscalls_p (struct lwp_info *event_child)
2908 {
2909 struct thread_info *thread = get_lwp_thread (event_child);
2910 struct process_info *proc = get_thread_process (thread);
2911
2912 return !proc->syscalls_to_catch.empty ();
2913 }
2914
2915 bool
2916 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2917 {
2918 int sysno;
2919 struct thread_info *thread = get_lwp_thread (event_child);
2920 struct process_info *proc = get_thread_process (thread);
2921
2922 if (proc->syscalls_to_catch.empty ())
2923 return false;
2924
2925 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2926 return true;
2927
2928 get_syscall_trapinfo (event_child, &sysno);
2929
2930 for (int iter : proc->syscalls_to_catch)
2931 if (iter == sysno)
2932 return true;
2933
2934 return false;
2935 }
2936
2937 ptid_t
2938 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2939 target_wait_flags target_options)
2940 {
2941 client_state &cs = get_client_state ();
2942 int w;
2943 struct lwp_info *event_child;
2944 int options;
2945 int pid;
2946 int step_over_finished;
2947 int bp_explains_trap;
2948 int maybe_internal_trap;
2949 int report_to_gdb;
2950 int trace_event;
2951 int in_step_range;
2952 int any_resumed;
2953
2954 if (debug_threads)
2955 {
2956 debug_enter ();
2957 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
2958 }
2959
2960 /* Translate generic target options into linux options. */
2961 options = __WALL;
2962 if (target_options & TARGET_WNOHANG)
2963 options |= WNOHANG;
2964
2965 bp_explains_trap = 0;
2966 trace_event = 0;
2967 in_step_range = 0;
2968 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2969
2970 auto status_pending_p_any = [&] (thread_info *thread)
2971 {
2972 return status_pending_p_callback (thread, minus_one_ptid);
2973 };
2974
2975 auto not_stopped = [&] (thread_info *thread)
2976 {
2977 return not_stopped_callback (thread, minus_one_ptid);
2978 };
2979
2980 /* Find a resumed LWP, if any. */
2981 if (find_thread (status_pending_p_any) != NULL)
2982 any_resumed = 1;
2983 else if (find_thread (not_stopped) != NULL)
2984 any_resumed = 1;
2985 else
2986 any_resumed = 0;
2987
2988 if (step_over_bkpt == null_ptid)
2989 pid = wait_for_event (ptid, &w, options);
2990 else
2991 {
2992 if (debug_threads)
2993 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2994 target_pid_to_str (step_over_bkpt));
2995 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2996 }
2997
2998 if (pid == 0 || (pid == -1 && !any_resumed))
2999 {
3000 gdb_assert (target_options & TARGET_WNOHANG);
3001
3002 if (debug_threads)
3003 {
3004 debug_printf ("wait_1 ret = null_ptid, "
3005 "TARGET_WAITKIND_IGNORE\n");
3006 debug_exit ();
3007 }
3008
3009 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3010 return null_ptid;
3011 }
3012 else if (pid == -1)
3013 {
3014 if (debug_threads)
3015 {
3016 debug_printf ("wait_1 ret = null_ptid, "
3017 "TARGET_WAITKIND_NO_RESUMED\n");
3018 debug_exit ();
3019 }
3020
3021 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3022 return null_ptid;
3023 }
3024
3025 event_child = get_thread_lwp (current_thread);
3026
3027 /* wait_for_event only returns an exit status for the last
3028 child of a process. Report it. */
3029 if (WIFEXITED (w) || WIFSIGNALED (w))
3030 {
3031 if (WIFEXITED (w))
3032 {
3033 ourstatus->kind = TARGET_WAITKIND_EXITED;
3034 ourstatus->value.integer = WEXITSTATUS (w);
3035
3036 if (debug_threads)
3037 {
3038 debug_printf ("wait_1 ret = %s, exited with "
3039 "retcode %d\n",
3040 target_pid_to_str (ptid_of (current_thread)),
3041 WEXITSTATUS (w));
3042 debug_exit ();
3043 }
3044 }
3045 else
3046 {
3047 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3048 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3049
3050 if (debug_threads)
3051 {
3052 debug_printf ("wait_1 ret = %s, terminated with "
3053 "signal %d\n",
3054 target_pid_to_str (ptid_of (current_thread)),
3055 WTERMSIG (w));
3056 debug_exit ();
3057 }
3058 }
3059
3060 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3061 return filter_exit_event (event_child, ourstatus);
3062
3063 return ptid_of (current_thread);
3064 }
3065
3066 /* If step-over executes a breakpoint instruction, in the case of a
3067 hardware single step it means a gdb/gdbserver breakpoint had been
3068 planted on top of a permanent breakpoint, in the case of a software
3069 single step it may just mean that gdbserver hit the reinsert breakpoint.
3070 The PC has been adjusted by save_stop_reason to point at
3071 the breakpoint address.
3072 So in the case of the hardware single step advance the PC manually
3073 past the breakpoint and in the case of software single step advance only
3074 if it's not the single_step_breakpoint we are hitting.
3075 This avoids that a program would keep trapping a permanent breakpoint
3076 forever. */
3077 if (step_over_bkpt != null_ptid
3078 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3079 && (event_child->stepping
3080 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3081 {
3082 int increment_pc = 0;
3083 int breakpoint_kind = 0;
3084 CORE_ADDR stop_pc = event_child->stop_pc;
3085
3086 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3087 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3088
3089 if (debug_threads)
3090 {
3091 debug_printf ("step-over for %s executed software breakpoint\n",
3092 target_pid_to_str (ptid_of (current_thread)));
3093 }
3094
3095 if (increment_pc != 0)
3096 {
3097 struct regcache *regcache
3098 = get_thread_regcache (current_thread, 1);
3099
3100 event_child->stop_pc += increment_pc;
3101 low_set_pc (regcache, event_child->stop_pc);
3102
3103 if (!low_breakpoint_at (event_child->stop_pc))
3104 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3105 }
3106 }
3107
3108 /* If this event was not handled before, and is not a SIGTRAP, we
3109 report it. SIGILL and SIGSEGV are also treated as traps in case
3110 a breakpoint is inserted at the current PC. If this target does
3111 not support internal breakpoints at all, we also report the
3112 SIGTRAP without further processing; it's of no concern to us. */
3113 maybe_internal_trap
3114 = (low_supports_breakpoints ()
3115 && (WSTOPSIG (w) == SIGTRAP
3116 || ((WSTOPSIG (w) == SIGILL
3117 || WSTOPSIG (w) == SIGSEGV)
3118 && low_breakpoint_at (event_child->stop_pc))));
3119
3120 if (maybe_internal_trap)
3121 {
3122 /* Handle anything that requires bookkeeping before deciding to
3123 report the event or continue waiting. */
3124
3125 /* First check if we can explain the SIGTRAP with an internal
3126 breakpoint, or if we should possibly report the event to GDB.
3127 Do this before anything that may remove or insert a
3128 breakpoint. */
3129 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3130
3131 /* We have a SIGTRAP, possibly a step-over dance has just
3132 finished. If so, tweak the state machine accordingly,
3133 reinsert breakpoints and delete any single-step
3134 breakpoints. */
3135 step_over_finished = finish_step_over (event_child);
3136
3137 /* Now invoke the callbacks of any internal breakpoints there. */
3138 check_breakpoints (event_child->stop_pc);
3139
3140 /* Handle tracepoint data collecting. This may overflow the
3141 trace buffer, and cause a tracing stop, removing
3142 breakpoints. */
3143 trace_event = handle_tracepoints (event_child);
3144
3145 if (bp_explains_trap)
3146 {
3147 if (debug_threads)
3148 debug_printf ("Hit a gdbserver breakpoint.\n");
3149 }
3150 }
3151 else
3152 {
3153 /* We have some other signal, possibly a step-over dance was in
3154 progress, and it should be cancelled too. */
3155 step_over_finished = finish_step_over (event_child);
3156 }
3157
3158 /* We have all the data we need. Either report the event to GDB, or
3159 resume threads and keep waiting for more. */
3160
3161 /* If we're collecting a fast tracepoint, finish the collection and
3162 move out of the jump pad before delivering a signal. See
3163 linux_stabilize_threads. */
3164
3165 if (WIFSTOPPED (w)
3166 && WSTOPSIG (w) != SIGTRAP
3167 && supports_fast_tracepoints ()
3168 && agent_loaded_p ())
3169 {
3170 if (debug_threads)
3171 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3172 "to defer or adjust it.\n",
3173 WSTOPSIG (w), lwpid_of (current_thread));
3174
3175 /* Allow debugging the jump pad itself. */
3176 if (current_thread->last_resume_kind != resume_step
3177 && maybe_move_out_of_jump_pad (event_child, &w))
3178 {
3179 enqueue_one_deferred_signal (event_child, &w);
3180
3181 if (debug_threads)
3182 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3183 WSTOPSIG (w), lwpid_of (current_thread));
3184
3185 resume_one_lwp (event_child, 0, 0, NULL);
3186
3187 if (debug_threads)
3188 debug_exit ();
3189 return ignore_event (ourstatus);
3190 }
3191 }
3192
3193 if (event_child->collecting_fast_tracepoint
3194 != fast_tpoint_collect_result::not_collecting)
3195 {
3196 if (debug_threads)
3197 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3198 "Check if we're already there.\n",
3199 lwpid_of (current_thread),
3200 (int) event_child->collecting_fast_tracepoint);
3201
3202 trace_event = 1;
3203
3204 event_child->collecting_fast_tracepoint
3205 = linux_fast_tracepoint_collecting (event_child, NULL);
3206
3207 if (event_child->collecting_fast_tracepoint
3208 != fast_tpoint_collect_result::before_insn)
3209 {
3210 /* No longer need this breakpoint. */
3211 if (event_child->exit_jump_pad_bkpt != NULL)
3212 {
3213 if (debug_threads)
3214 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3215 "stopping all threads momentarily.\n");
3216
3217 /* Other running threads could hit this breakpoint.
3218 We don't handle moribund locations like GDB does,
3219 instead we always pause all threads when removing
3220 breakpoints, so that any step-over or
3221 decr_pc_after_break adjustment is always taken
3222 care of while the breakpoint is still
3223 inserted. */
3224 stop_all_lwps (1, event_child);
3225
3226 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3227 event_child->exit_jump_pad_bkpt = NULL;
3228
3229 unstop_all_lwps (1, event_child);
3230
3231 gdb_assert (event_child->suspended >= 0);
3232 }
3233 }
3234
3235 if (event_child->collecting_fast_tracepoint
3236 == fast_tpoint_collect_result::not_collecting)
3237 {
3238 if (debug_threads)
3239 debug_printf ("fast tracepoint finished "
3240 "collecting successfully.\n");
3241
3242 /* We may have a deferred signal to report. */
3243 if (dequeue_one_deferred_signal (event_child, &w))
3244 {
3245 if (debug_threads)
3246 debug_printf ("dequeued one signal.\n");
3247 }
3248 else
3249 {
3250 if (debug_threads)
3251 debug_printf ("no deferred signals.\n");
3252
3253 if (stabilizing_threads)
3254 {
3255 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3256 ourstatus->value.sig = GDB_SIGNAL_0;
3257
3258 if (debug_threads)
3259 {
3260 debug_printf ("wait_1 ret = %s, stopped "
3261 "while stabilizing threads\n",
3262 target_pid_to_str (ptid_of (current_thread)));
3263 debug_exit ();
3264 }
3265
3266 return ptid_of (current_thread);
3267 }
3268 }
3269 }
3270 }
3271
3272 /* Check whether GDB would be interested in this event. */
3273
3274 /* Check if GDB is interested in this syscall. */
3275 if (WIFSTOPPED (w)
3276 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3277 && !gdb_catch_this_syscall (event_child))
3278 {
3279 if (debug_threads)
3280 {
3281 debug_printf ("Ignored syscall for LWP %ld.\n",
3282 lwpid_of (current_thread));
3283 }
3284
3285 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3286
3287 if (debug_threads)
3288 debug_exit ();
3289 return ignore_event (ourstatus);
3290 }
3291
3292 /* If GDB is not interested in this signal, don't stop other
3293 threads, and don't report it to GDB. Just resume the inferior
3294 right away. We do this for threading-related signals as well as
3295 any that GDB specifically requested we ignore. But never ignore
3296 SIGSTOP if we sent it ourselves, and do not ignore signals when
3297 stepping - they may require special handling to skip the signal
3298 handler. Also never ignore signals that could be caused by a
3299 breakpoint. */
3300 if (WIFSTOPPED (w)
3301 && current_thread->last_resume_kind != resume_step
3302 && (
3303 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3304 (current_process ()->priv->thread_db != NULL
3305 && (WSTOPSIG (w) == __SIGRTMIN
3306 || WSTOPSIG (w) == __SIGRTMIN + 1))
3307 ||
3308 #endif
3309 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3310 && !(WSTOPSIG (w) == SIGSTOP
3311 && current_thread->last_resume_kind == resume_stop)
3312 && !linux_wstatus_maybe_breakpoint (w))))
3313 {
3314 siginfo_t info, *info_p;
3315
3316 if (debug_threads)
3317 debug_printf ("Ignored signal %d for LWP %ld.\n",
3318 WSTOPSIG (w), lwpid_of (current_thread));
3319
3320 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3321 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3322 info_p = &info;
3323 else
3324 info_p = NULL;
3325
3326 if (step_over_finished)
3327 {
3328 /* We cancelled this thread's step-over above. We still
3329 need to unsuspend all other LWPs, and set them back
3330 running again while the signal handler runs. */
3331 unsuspend_all_lwps (event_child);
3332
3333 /* Enqueue the pending signal info so that proceed_all_lwps
3334 doesn't lose it. */
3335 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3336
3337 proceed_all_lwps ();
3338 }
3339 else
3340 {
3341 resume_one_lwp (event_child, event_child->stepping,
3342 WSTOPSIG (w), info_p);
3343 }
3344
3345 if (debug_threads)
3346 debug_exit ();
3347
3348 return ignore_event (ourstatus);
3349 }
3350
3351 /* Note that all addresses are always "out of the step range" when
3352 there's no range to begin with. */
3353 in_step_range = lwp_in_step_range (event_child);
3354
3355 /* If GDB wanted this thread to single step, and the thread is out
3356 of the step range, we always want to report the SIGTRAP, and let
3357 GDB handle it. Watchpoints should always be reported. So should
3358 signals we can't explain. A SIGTRAP we can't explain could be a
3359 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3360 do, we're be able to handle GDB breakpoints on top of internal
3361 breakpoints, by handling the internal breakpoint and still
3362 reporting the event to GDB. If we don't, we're out of luck, GDB
3363 won't see the breakpoint hit. If we see a single-step event but
3364 the thread should be continuing, don't pass the trap to gdb.
3365 That indicates that we had previously finished a single-step but
3366 left the single-step pending -- see
3367 complete_ongoing_step_over. */
3368 report_to_gdb = (!maybe_internal_trap
3369 || (current_thread->last_resume_kind == resume_step
3370 && !in_step_range)
3371 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3372 || (!in_step_range
3373 && !bp_explains_trap
3374 && !trace_event
3375 && !step_over_finished
3376 && !(current_thread->last_resume_kind == resume_continue
3377 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3378 || (gdb_breakpoint_here (event_child->stop_pc)
3379 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3380 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3381 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3382
3383 run_breakpoint_commands (event_child->stop_pc);
3384
3385 /* We found no reason GDB would want us to stop. We either hit one
3386 of our own breakpoints, or finished an internal step GDB
3387 shouldn't know about. */
3388 if (!report_to_gdb)
3389 {
3390 if (debug_threads)
3391 {
3392 if (bp_explains_trap)
3393 debug_printf ("Hit a gdbserver breakpoint.\n");
3394 if (step_over_finished)
3395 debug_printf ("Step-over finished.\n");
3396 if (trace_event)
3397 debug_printf ("Tracepoint event.\n");
3398 if (lwp_in_step_range (event_child))
3399 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3400 paddress (event_child->stop_pc),
3401 paddress (event_child->step_range_start),
3402 paddress (event_child->step_range_end));
3403 }
3404
3405 /* We're not reporting this breakpoint to GDB, so apply the
3406 decr_pc_after_break adjustment to the inferior's regcache
3407 ourselves. */
3408
3409 if (low_supports_breakpoints ())
3410 {
3411 struct regcache *regcache
3412 = get_thread_regcache (current_thread, 1);
3413 low_set_pc (regcache, event_child->stop_pc);
3414 }
3415
3416 if (step_over_finished)
3417 {
3418 /* If we have finished stepping over a breakpoint, we've
3419 stopped and suspended all LWPs momentarily except the
3420 stepping one. This is where we resume them all again.
3421 We're going to keep waiting, so use proceed, which
3422 handles stepping over the next breakpoint. */
3423 unsuspend_all_lwps (event_child);
3424 }
3425 else
3426 {
3427 /* Remove the single-step breakpoints if any. Note that
3428 there isn't single-step breakpoint if we finished stepping
3429 over. */
3430 if (supports_software_single_step ()
3431 && has_single_step_breakpoints (current_thread))
3432 {
3433 stop_all_lwps (0, event_child);
3434 delete_single_step_breakpoints (current_thread);
3435 unstop_all_lwps (0, event_child);
3436 }
3437 }
3438
3439 if (debug_threads)
3440 debug_printf ("proceeding all threads.\n");
3441 proceed_all_lwps ();
3442
3443 if (debug_threads)
3444 debug_exit ();
3445
3446 return ignore_event (ourstatus);
3447 }
3448
3449 if (debug_threads)
3450 {
3451 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3452 {
3453 std::string str
3454 = target_waitstatus_to_string (&event_child->waitstatus);
3455
3456 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3457 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3458 }
3459 if (current_thread->last_resume_kind == resume_step)
3460 {
3461 if (event_child->step_range_start == event_child->step_range_end)
3462 debug_printf ("GDB wanted to single-step, reporting event.\n");
3463 else if (!lwp_in_step_range (event_child))
3464 debug_printf ("Out of step range, reporting event.\n");
3465 }
3466 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3467 debug_printf ("Stopped by watchpoint.\n");
3468 else if (gdb_breakpoint_here (event_child->stop_pc))
3469 debug_printf ("Stopped by GDB breakpoint.\n");
3470 if (debug_threads)
3471 debug_printf ("Hit a non-gdbserver trap event.\n");
3472 }
3473
3474 /* Alright, we're going to report a stop. */
3475
3476 /* Remove single-step breakpoints. */
3477 if (supports_software_single_step ())
3478 {
3479 /* Remove single-step breakpoints or not. It it is true, stop all
3480 lwps, so that other threads won't hit the breakpoint in the
3481 staled memory. */
3482 int remove_single_step_breakpoints_p = 0;
3483
3484 if (non_stop)
3485 {
3486 remove_single_step_breakpoints_p
3487 = has_single_step_breakpoints (current_thread);
3488 }
3489 else
3490 {
3491 /* In all-stop, a stop reply cancels all previous resume
3492 requests. Delete all single-step breakpoints. */
3493
3494 find_thread ([&] (thread_info *thread) {
3495 if (has_single_step_breakpoints (thread))
3496 {
3497 remove_single_step_breakpoints_p = 1;
3498 return true;
3499 }
3500
3501 return false;
3502 });
3503 }
3504
3505 if (remove_single_step_breakpoints_p)
3506 {
3507 /* If we remove single-step breakpoints from memory, stop all lwps,
3508 so that other threads won't hit the breakpoint in the staled
3509 memory. */
3510 stop_all_lwps (0, event_child);
3511
3512 if (non_stop)
3513 {
3514 gdb_assert (has_single_step_breakpoints (current_thread));
3515 delete_single_step_breakpoints (current_thread);
3516 }
3517 else
3518 {
3519 for_each_thread ([] (thread_info *thread){
3520 if (has_single_step_breakpoints (thread))
3521 delete_single_step_breakpoints (thread);
3522 });
3523 }
3524
3525 unstop_all_lwps (0, event_child);
3526 }
3527 }
3528
3529 if (!stabilizing_threads)
3530 {
3531 /* In all-stop, stop all threads. */
3532 if (!non_stop)
3533 stop_all_lwps (0, NULL);
3534
3535 if (step_over_finished)
3536 {
3537 if (!non_stop)
3538 {
3539 /* If we were doing a step-over, all other threads but
3540 the stepping one had been paused in start_step_over,
3541 with their suspend counts incremented. We don't want
3542 to do a full unstop/unpause, because we're in
3543 all-stop mode (so we want threads stopped), but we
3544 still need to unsuspend the other threads, to
3545 decrement their `suspended' count back. */
3546 unsuspend_all_lwps (event_child);
3547 }
3548 else
3549 {
3550 /* If we just finished a step-over, then all threads had
3551 been momentarily paused. In all-stop, that's fine,
3552 we want threads stopped by now anyway. In non-stop,
3553 we need to re-resume threads that GDB wanted to be
3554 running. */
3555 unstop_all_lwps (1, event_child);
3556 }
3557 }
3558
3559 /* If we're not waiting for a specific LWP, choose an event LWP
3560 from among those that have had events. Giving equal priority
3561 to all LWPs that have had events helps prevent
3562 starvation. */
3563 if (ptid == minus_one_ptid)
3564 {
3565 event_child->status_pending_p = 1;
3566 event_child->status_pending = w;
3567
3568 select_event_lwp (&event_child);
3569
3570 /* current_thread and event_child must stay in sync. */
3571 current_thread = get_lwp_thread (event_child);
3572
3573 event_child->status_pending_p = 0;
3574 w = event_child->status_pending;
3575 }
3576
3577
3578 /* Stabilize threads (move out of jump pads). */
3579 if (!non_stop)
3580 target_stabilize_threads ();
3581 }
3582 else
3583 {
3584 /* If we just finished a step-over, then all threads had been
3585 momentarily paused. In all-stop, that's fine, we want
3586 threads stopped by now anyway. In non-stop, we need to
3587 re-resume threads that GDB wanted to be running. */
3588 if (step_over_finished)
3589 unstop_all_lwps (1, event_child);
3590 }
3591
3592 /* At this point, we haven't set OURSTATUS. This is where we do it. */
3593 gdb_assert (ourstatus->kind == TARGET_WAITKIND_IGNORE);
3594
3595 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3596 {
3597 /* If the reported event is an exit, fork, vfork or exec, let
3598 GDB know. */
3599
3600 /* Break the unreported fork relationship chain. */
3601 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3602 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3603 {
3604 event_child->fork_relative->fork_relative = NULL;
3605 event_child->fork_relative = NULL;
3606 }
3607
3608 *ourstatus = event_child->waitstatus;
3609 /* Clear the event lwp's waitstatus since we handled it already. */
3610 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3611 }
3612 else
3613 {
3614 /* The LWP stopped due to a plain signal or a syscall signal. Either way,
3615 event_chid->waitstatus wasn't filled in with the details, so look at
3616 the wait status W. */
3617 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3618 {
3619 int syscall_number;
3620
3621 get_syscall_trapinfo (event_child, &syscall_number);
3622 ourstatus->kind = event_child->syscall_state;
3623 }
3624 else if (current_thread->last_resume_kind == resume_stop
3625 && WSTOPSIG (w) == SIGSTOP)
3626 {
3627 /* A thread that has been requested to stop by GDB with vCont;t,
3628 and it stopped cleanly, so report as SIG0. The use of
3629 SIGSTOP is an implementation detail. */
3630 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3631 ourstatus->value.sig = GDB_SIGNAL_0;
3632 }
3633 else
3634 {
3635 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3636 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3637 }
3638 }
3639
3640 /* Now that we've selected our final event LWP, un-adjust its PC if
3641 it was a software breakpoint, and the client doesn't know we can
3642 adjust the breakpoint ourselves. */
3643 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3644 && !cs.swbreak_feature)
3645 {
3646 int decr_pc = low_decr_pc_after_break ();
3647
3648 if (decr_pc != 0)
3649 {
3650 struct regcache *regcache
3651 = get_thread_regcache (current_thread, 1);
3652 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3653 }
3654 }
3655
3656 gdb_assert (step_over_bkpt == null_ptid);
3657
3658 if (debug_threads)
3659 {
3660 debug_printf ("wait_1 ret = %s, %d, %d\n",
3661 target_pid_to_str (ptid_of (current_thread)),
3662 ourstatus->kind, ourstatus->value.sig);
3663 debug_exit ();
3664 }
3665
3666 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3667 return filter_exit_event (event_child, ourstatus);
3668
3669 return ptid_of (current_thread);
3670 }
3671
3672 /* Get rid of any pending event in the pipe. */
3673 static void
3674 async_file_flush (void)
3675 {
3676 int ret;
3677 char buf;
3678
3679 do
3680 ret = read (linux_event_pipe[0], &buf, 1);
3681 while (ret >= 0 || (ret == -1 && errno == EINTR));
3682 }
3683
3684 /* Put something in the pipe, so the event loop wakes up. */
3685 static void
3686 async_file_mark (void)
3687 {
3688 int ret;
3689
3690 async_file_flush ();
3691
3692 do
3693 ret = write (linux_event_pipe[1], "+", 1);
3694 while (ret == 0 || (ret == -1 && errno == EINTR));
3695
3696 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3697 be awakened anyway. */
3698 }
3699
3700 ptid_t
3701 linux_process_target::wait (ptid_t ptid,
3702 target_waitstatus *ourstatus,
3703 target_wait_flags target_options)
3704 {
3705 ptid_t event_ptid;
3706
3707 /* Flush the async file first. */
3708 if (target_is_async_p ())
3709 async_file_flush ();
3710
3711 do
3712 {
3713 event_ptid = wait_1 (ptid, ourstatus, target_options);
3714 }
3715 while ((target_options & TARGET_WNOHANG) == 0
3716 && event_ptid == null_ptid
3717 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3718
3719 /* If at least one stop was reported, there may be more. A single
3720 SIGCHLD can signal more than one child stop. */
3721 if (target_is_async_p ()
3722 && (target_options & TARGET_WNOHANG) != 0
3723 && event_ptid != null_ptid)
3724 async_file_mark ();
3725
3726 return event_ptid;
3727 }
3728
3729 /* Send a signal to an LWP. */
3730
3731 static int
3732 kill_lwp (unsigned long lwpid, int signo)
3733 {
3734 int ret;
3735
3736 errno = 0;
3737 ret = syscall (__NR_tkill, lwpid, signo);
3738 if (errno == ENOSYS)
3739 {
3740 /* If tkill fails, then we are not using nptl threads, a
3741 configuration we no longer support. */
3742 perror_with_name (("tkill"));
3743 }
3744 return ret;
3745 }
3746
3747 void
3748 linux_stop_lwp (struct lwp_info *lwp)
3749 {
3750 send_sigstop (lwp);
3751 }
3752
3753 static void
3754 send_sigstop (struct lwp_info *lwp)
3755 {
3756 int pid;
3757
3758 pid = lwpid_of (get_lwp_thread (lwp));
3759
3760 /* If we already have a pending stop signal for this process, don't
3761 send another. */
3762 if (lwp->stop_expected)
3763 {
3764 if (debug_threads)
3765 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3766
3767 return;
3768 }
3769
3770 if (debug_threads)
3771 debug_printf ("Sending sigstop to lwp %d\n", pid);
3772
3773 lwp->stop_expected = 1;
3774 kill_lwp (pid, SIGSTOP);
3775 }
3776
3777 static void
3778 send_sigstop (thread_info *thread, lwp_info *except)
3779 {
3780 struct lwp_info *lwp = get_thread_lwp (thread);
3781
3782 /* Ignore EXCEPT. */
3783 if (lwp == except)
3784 return;
3785
3786 if (lwp->stopped)
3787 return;
3788
3789 send_sigstop (lwp);
3790 }
3791
3792 /* Increment the suspend count of an LWP, and stop it, if not stopped
3793 yet. */
3794 static void
3795 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3796 {
3797 struct lwp_info *lwp = get_thread_lwp (thread);
3798
3799 /* Ignore EXCEPT. */
3800 if (lwp == except)
3801 return;
3802
3803 lwp_suspended_inc (lwp);
3804
3805 send_sigstop (thread, except);
3806 }
3807
3808 static void
3809 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3810 {
3811 /* Store the exit status for later. */
3812 lwp->status_pending_p = 1;
3813 lwp->status_pending = wstat;
3814
3815 /* Store in waitstatus as well, as there's nothing else to process
3816 for this event. */
3817 if (WIFEXITED (wstat))
3818 {
3819 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3820 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3821 }
3822 else if (WIFSIGNALED (wstat))
3823 {
3824 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3825 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3826 }
3827
3828 /* Prevent trying to stop it. */
3829 lwp->stopped = 1;
3830
3831 /* No further stops are expected from a dead lwp. */
3832 lwp->stop_expected = 0;
3833 }
3834
3835 /* Return true if LWP has exited already, and has a pending exit event
3836 to report to GDB. */
3837
3838 static int
3839 lwp_is_marked_dead (struct lwp_info *lwp)
3840 {
3841 return (lwp->status_pending_p
3842 && (WIFEXITED (lwp->status_pending)
3843 || WIFSIGNALED (lwp->status_pending)));
3844 }
3845
3846 void
3847 linux_process_target::wait_for_sigstop ()
3848 {
3849 struct thread_info *saved_thread;
3850 ptid_t saved_tid;
3851 int wstat;
3852 int ret;
3853
3854 saved_thread = current_thread;
3855 if (saved_thread != NULL)
3856 saved_tid = saved_thread->id;
3857 else
3858 saved_tid = null_ptid; /* avoid bogus unused warning */
3859
3860 if (debug_threads)
3861 debug_printf ("wait_for_sigstop: pulling events\n");
3862
3863 /* Passing NULL_PTID as filter indicates we want all events to be
3864 left pending. Eventually this returns when there are no
3865 unwaited-for children left. */
3866 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3867 gdb_assert (ret == -1);
3868
3869 if (saved_thread == NULL || mythread_alive (saved_tid))
3870 current_thread = saved_thread;
3871 else
3872 {
3873 if (debug_threads)
3874 debug_printf ("Previously current thread died.\n");
3875
3876 /* We can't change the current inferior behind GDB's back,
3877 otherwise, a subsequent command may apply to the wrong
3878 process. */
3879 current_thread = NULL;
3880 }
3881 }
3882
3883 bool
3884 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3885 {
3886 struct lwp_info *lwp = get_thread_lwp (thread);
3887
3888 if (lwp->suspended != 0)
3889 {
3890 internal_error (__FILE__, __LINE__,
3891 "LWP %ld is suspended, suspended=%d\n",
3892 lwpid_of (thread), lwp->suspended);
3893 }
3894 gdb_assert (lwp->stopped);
3895
3896 /* Allow debugging the jump pad, gdb_collect, etc.. */
3897 return (supports_fast_tracepoints ()
3898 && agent_loaded_p ()
3899 && (gdb_breakpoint_here (lwp->stop_pc)
3900 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3901 || thread->last_resume_kind == resume_step)
3902 && (linux_fast_tracepoint_collecting (lwp, NULL)
3903 != fast_tpoint_collect_result::not_collecting));
3904 }
3905
3906 void
3907 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3908 {
3909 struct thread_info *saved_thread;
3910 struct lwp_info *lwp = get_thread_lwp (thread);
3911 int *wstat;
3912
3913 if (lwp->suspended != 0)
3914 {
3915 internal_error (__FILE__, __LINE__,
3916 "LWP %ld is suspended, suspended=%d\n",
3917 lwpid_of (thread), lwp->suspended);
3918 }
3919 gdb_assert (lwp->stopped);
3920
3921 /* For gdb_breakpoint_here. */
3922 saved_thread = current_thread;
3923 current_thread = thread;
3924
3925 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3926
3927 /* Allow debugging the jump pad, gdb_collect, etc. */
3928 if (!gdb_breakpoint_here (lwp->stop_pc)
3929 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3930 && thread->last_resume_kind != resume_step
3931 && maybe_move_out_of_jump_pad (lwp, wstat))
3932 {
3933 if (debug_threads)
3934 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3935 lwpid_of (thread));
3936
3937 if (wstat)
3938 {
3939 lwp->status_pending_p = 0;
3940 enqueue_one_deferred_signal (lwp, wstat);
3941
3942 if (debug_threads)
3943 debug_printf ("Signal %d for LWP %ld deferred "
3944 "(in jump pad)\n",
3945 WSTOPSIG (*wstat), lwpid_of (thread));
3946 }
3947
3948 resume_one_lwp (lwp, 0, 0, NULL);
3949 }
3950 else
3951 lwp_suspended_inc (lwp);
3952
3953 current_thread = saved_thread;
3954 }
3955
3956 static bool
3957 lwp_running (thread_info *thread)
3958 {
3959 struct lwp_info *lwp = get_thread_lwp (thread);
3960
3961 if (lwp_is_marked_dead (lwp))
3962 return false;
3963
3964 return !lwp->stopped;
3965 }
3966
3967 void
3968 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3969 {
3970 /* Should not be called recursively. */
3971 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3972
3973 if (debug_threads)
3974 {
3975 debug_enter ();
3976 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3977 suspend ? "stop-and-suspend" : "stop",
3978 except != NULL
3979 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3980 : "none");
3981 }
3982
3983 stopping_threads = (suspend
3984 ? STOPPING_AND_SUSPENDING_THREADS
3985 : STOPPING_THREADS);
3986
3987 if (suspend)
3988 for_each_thread ([&] (thread_info *thread)
3989 {
3990 suspend_and_send_sigstop (thread, except);
3991 });
3992 else
3993 for_each_thread ([&] (thread_info *thread)
3994 {
3995 send_sigstop (thread, except);
3996 });
3997
3998 wait_for_sigstop ();
3999 stopping_threads = NOT_STOPPING_THREADS;
4000
4001 if (debug_threads)
4002 {
4003 debug_printf ("stop_all_lwps done, setting stopping_threads "
4004 "back to !stopping\n");
4005 debug_exit ();
4006 }
4007 }
4008
4009 /* Enqueue one signal in the chain of signals which need to be
4010 delivered to this process on next resume. */
4011
4012 static void
4013 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4014 {
4015 lwp->pending_signals.emplace_back (signal);
4016 if (info == nullptr)
4017 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
4018 else
4019 lwp->pending_signals.back ().info = *info;
4020 }
4021
4022 void
4023 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4024 {
4025 struct thread_info *thread = get_lwp_thread (lwp);
4026 struct regcache *regcache = get_thread_regcache (thread, 1);
4027
4028 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4029
4030 current_thread = thread;
4031 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4032
4033 for (CORE_ADDR pc : next_pcs)
4034 set_single_step_breakpoint (pc, current_ptid);
4035 }
4036
4037 int
4038 linux_process_target::single_step (lwp_info* lwp)
4039 {
4040 int step = 0;
4041
4042 if (supports_hardware_single_step ())
4043 {
4044 step = 1;
4045 }
4046 else if (supports_software_single_step ())
4047 {
4048 install_software_single_step_breakpoints (lwp);
4049 step = 0;
4050 }
4051 else
4052 {
4053 if (debug_threads)
4054 debug_printf ("stepping is not implemented on this target");
4055 }
4056
4057 return step;
4058 }
4059
4060 /* The signal can be delivered to the inferior if we are not trying to
4061 finish a fast tracepoint collect. Since signal can be delivered in
4062 the step-over, the program may go to signal handler and trap again
4063 after return from the signal handler. We can live with the spurious
4064 double traps. */
4065
4066 static int
4067 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4068 {
4069 return (lwp->collecting_fast_tracepoint
4070 == fast_tpoint_collect_result::not_collecting);
4071 }
4072
4073 void
4074 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4075 int signal, siginfo_t *info)
4076 {
4077 struct thread_info *thread = get_lwp_thread (lwp);
4078 struct thread_info *saved_thread;
4079 int ptrace_request;
4080 struct process_info *proc = get_thread_process (thread);
4081
4082 /* Note that target description may not be initialised
4083 (proc->tdesc == NULL) at this point because the program hasn't
4084 stopped at the first instruction yet. It means GDBserver skips
4085 the extra traps from the wrapper program (see option --wrapper).
4086 Code in this function that requires register access should be
4087 guarded by proc->tdesc == NULL or something else. */
4088
4089 if (lwp->stopped == 0)
4090 return;
4091
4092 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4093
4094 fast_tpoint_collect_result fast_tp_collecting
4095 = lwp->collecting_fast_tracepoint;
4096
4097 gdb_assert (!stabilizing_threads
4098 || (fast_tp_collecting
4099 != fast_tpoint_collect_result::not_collecting));
4100
4101 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4102 user used the "jump" command, or "set $pc = foo"). */
4103 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4104 {
4105 /* Collecting 'while-stepping' actions doesn't make sense
4106 anymore. */
4107 release_while_stepping_state_list (thread);
4108 }
4109
4110 /* If we have pending signals or status, and a new signal, enqueue the
4111 signal. Also enqueue the signal if it can't be delivered to the
4112 inferior right now. */
4113 if (signal != 0
4114 && (lwp->status_pending_p
4115 || !lwp->pending_signals.empty ()
4116 || !lwp_signal_can_be_delivered (lwp)))
4117 {
4118 enqueue_pending_signal (lwp, signal, info);
4119
4120 /* Postpone any pending signal. It was enqueued above. */
4121 signal = 0;
4122 }
4123
4124 if (lwp->status_pending_p)
4125 {
4126 if (debug_threads)
4127 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4128 " has pending status\n",
4129 lwpid_of (thread), step ? "step" : "continue",
4130 lwp->stop_expected ? "expected" : "not expected");
4131 return;
4132 }
4133
4134 saved_thread = current_thread;
4135 current_thread = thread;
4136
4137 /* This bit needs some thinking about. If we get a signal that
4138 we must report while a single-step reinsert is still pending,
4139 we often end up resuming the thread. It might be better to
4140 (ew) allow a stack of pending events; then we could be sure that
4141 the reinsert happened right away and not lose any signals.
4142
4143 Making this stack would also shrink the window in which breakpoints are
4144 uninserted (see comment in linux_wait_for_lwp) but not enough for
4145 complete correctness, so it won't solve that problem. It may be
4146 worthwhile just to solve this one, however. */
4147 if (lwp->bp_reinsert != 0)
4148 {
4149 if (debug_threads)
4150 debug_printf (" pending reinsert at 0x%s\n",
4151 paddress (lwp->bp_reinsert));
4152
4153 if (supports_hardware_single_step ())
4154 {
4155 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4156 {
4157 if (step == 0)
4158 warning ("BAD - reinserting but not stepping.");
4159 if (lwp->suspended)
4160 warning ("BAD - reinserting and suspended(%d).",
4161 lwp->suspended);
4162 }
4163 }
4164
4165 step = maybe_hw_step (thread);
4166 }
4167
4168 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4169 {
4170 if (debug_threads)
4171 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4172 " (exit-jump-pad-bkpt)\n",
4173 lwpid_of (thread));
4174 }
4175 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4176 {
4177 if (debug_threads)
4178 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4179 " single-stepping\n",
4180 lwpid_of (thread));
4181
4182 if (supports_hardware_single_step ())
4183 step = 1;
4184 else
4185 {
4186 internal_error (__FILE__, __LINE__,
4187 "moving out of jump pad single-stepping"
4188 " not implemented on this target");
4189 }
4190 }
4191
4192 /* If we have while-stepping actions in this thread set it stepping.
4193 If we have a signal to deliver, it may or may not be set to
4194 SIG_IGN, we don't know. Assume so, and allow collecting
4195 while-stepping into a signal handler. A possible smart thing to
4196 do would be to set an internal breakpoint at the signal return
4197 address, continue, and carry on catching this while-stepping
4198 action only when that breakpoint is hit. A future
4199 enhancement. */
4200 if (thread->while_stepping != NULL)
4201 {
4202 if (debug_threads)
4203 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4204 lwpid_of (thread));
4205
4206 step = single_step (lwp);
4207 }
4208
4209 if (proc->tdesc != NULL && low_supports_breakpoints ())
4210 {
4211 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4212
4213 lwp->stop_pc = low_get_pc (regcache);
4214
4215 if (debug_threads)
4216 {
4217 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4218 (long) lwp->stop_pc);
4219 }
4220 }
4221
4222 /* If we have pending signals, consume one if it can be delivered to
4223 the inferior. */
4224 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4225 {
4226 const pending_signal &p_sig = lwp->pending_signals.front ();
4227
4228 signal = p_sig.signal;
4229 if (p_sig.info.si_signo != 0)
4230 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4231 &p_sig.info);
4232
4233 lwp->pending_signals.pop_front ();
4234 }
4235
4236 if (debug_threads)
4237 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4238 lwpid_of (thread), step ? "step" : "continue", signal,
4239 lwp->stop_expected ? "expected" : "not expected");
4240
4241 low_prepare_to_resume (lwp);
4242
4243 regcache_invalidate_thread (thread);
4244 errno = 0;
4245 lwp->stepping = step;
4246 if (step)
4247 ptrace_request = PTRACE_SINGLESTEP;
4248 else if (gdb_catching_syscalls_p (lwp))
4249 ptrace_request = PTRACE_SYSCALL;
4250 else
4251 ptrace_request = PTRACE_CONT;
4252 ptrace (ptrace_request,
4253 lwpid_of (thread),
4254 (PTRACE_TYPE_ARG3) 0,
4255 /* Coerce to a uintptr_t first to avoid potential gcc warning
4256 of coercing an 8 byte integer to a 4 byte pointer. */
4257 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4258
4259 current_thread = saved_thread;
4260 if (errno)
4261 perror_with_name ("resuming thread");
4262
4263 /* Successfully resumed. Clear state that no longer makes sense,
4264 and mark the LWP as running. Must not do this before resuming
4265 otherwise if that fails other code will be confused. E.g., we'd
4266 later try to stop the LWP and hang forever waiting for a stop
4267 status. Note that we must not throw after this is cleared,
4268 otherwise handle_zombie_lwp_error would get confused. */
4269 lwp->stopped = 0;
4270 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4271 }
4272
4273 void
4274 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4275 {
4276 /* Nop. */
4277 }
4278
4279 /* Called when we try to resume a stopped LWP and that errors out. If
4280 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4281 or about to become), discard the error, clear any pending status
4282 the LWP may have, and return true (we'll collect the exit status
4283 soon enough). Otherwise, return false. */
4284
4285 static int
4286 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4287 {
4288 struct thread_info *thread = get_lwp_thread (lp);
4289
4290 /* If we get an error after resuming the LWP successfully, we'd
4291 confuse !T state for the LWP being gone. */
4292 gdb_assert (lp->stopped);
4293
4294 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4295 because even if ptrace failed with ESRCH, the tracee may be "not
4296 yet fully dead", but already refusing ptrace requests. In that
4297 case the tracee has 'R (Running)' state for a little bit
4298 (observed in Linux 3.18). See also the note on ESRCH in the
4299 ptrace(2) man page. Instead, check whether the LWP has any state
4300 other than ptrace-stopped. */
4301
4302 /* Don't assume anything if /proc/PID/status can't be read. */
4303 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4304 {
4305 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4306 lp->status_pending_p = 0;
4307 return 1;
4308 }
4309 return 0;
4310 }
4311
4312 void
4313 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4314 siginfo_t *info)
4315 {
4316 try
4317 {
4318 resume_one_lwp_throw (lwp, step, signal, info);
4319 }
4320 catch (const gdb_exception_error &ex)
4321 {
4322 if (!check_ptrace_stopped_lwp_gone (lwp))
4323 throw;
4324 }
4325 }
4326
4327 /* This function is called once per thread via for_each_thread.
4328 We look up which resume request applies to THREAD and mark it with a
4329 pointer to the appropriate resume request.
4330
4331 This algorithm is O(threads * resume elements), but resume elements
4332 is small (and will remain small at least until GDB supports thread
4333 suspension). */
4334
4335 static void
4336 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4337 {
4338 struct lwp_info *lwp = get_thread_lwp (thread);
4339
4340 for (int ndx = 0; ndx < n; ndx++)
4341 {
4342 ptid_t ptid = resume[ndx].thread;
4343 if (ptid == minus_one_ptid
4344 || ptid == thread->id
4345 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4346 of PID'. */
4347 || (ptid.pid () == pid_of (thread)
4348 && (ptid.is_pid ()
4349 || ptid.lwp () == -1)))
4350 {
4351 if (resume[ndx].kind == resume_stop
4352 && thread->last_resume_kind == resume_stop)
4353 {
4354 if (debug_threads)
4355 debug_printf ("already %s LWP %ld at GDB's request\n",
4356 (thread->last_status.kind
4357 == TARGET_WAITKIND_STOPPED)
4358 ? "stopped"
4359 : "stopping",
4360 lwpid_of (thread));
4361
4362 continue;
4363 }
4364
4365 /* Ignore (wildcard) resume requests for already-resumed
4366 threads. */
4367 if (resume[ndx].kind != resume_stop
4368 && thread->last_resume_kind != resume_stop)
4369 {
4370 if (debug_threads)
4371 debug_printf ("already %s LWP %ld at GDB's request\n",
4372 (thread->last_resume_kind
4373 == resume_step)
4374 ? "stepping"
4375 : "continuing",
4376 lwpid_of (thread));
4377 continue;
4378 }
4379
4380 /* Don't let wildcard resumes resume fork children that GDB
4381 does not yet know are new fork children. */
4382 if (lwp->fork_relative != NULL)
4383 {
4384 struct lwp_info *rel = lwp->fork_relative;
4385
4386 if (rel->status_pending_p
4387 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4388 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4389 {
4390 if (debug_threads)
4391 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4392 lwpid_of (thread));
4393 continue;
4394 }
4395 }
4396
4397 /* If the thread has a pending event that has already been
4398 reported to GDBserver core, but GDB has not pulled the
4399 event out of the vStopped queue yet, likewise, ignore the
4400 (wildcard) resume request. */
4401 if (in_queued_stop_replies (thread->id))
4402 {
4403 if (debug_threads)
4404 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4405 lwpid_of (thread));
4406 continue;
4407 }
4408
4409 lwp->resume = &resume[ndx];
4410 thread->last_resume_kind = lwp->resume->kind;
4411
4412 lwp->step_range_start = lwp->resume->step_range_start;
4413 lwp->step_range_end = lwp->resume->step_range_end;
4414
4415 /* If we had a deferred signal to report, dequeue one now.
4416 This can happen if LWP gets more than one signal while
4417 trying to get out of a jump pad. */
4418 if (lwp->stopped
4419 && !lwp->status_pending_p
4420 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4421 {
4422 lwp->status_pending_p = 1;
4423
4424 if (debug_threads)
4425 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4426 "leaving status pending.\n",
4427 WSTOPSIG (lwp->status_pending),
4428 lwpid_of (thread));
4429 }
4430
4431 return;
4432 }
4433 }
4434
4435 /* No resume action for this thread. */
4436 lwp->resume = NULL;
4437 }
4438
4439 bool
4440 linux_process_target::resume_status_pending (thread_info *thread)
4441 {
4442 struct lwp_info *lwp = get_thread_lwp (thread);
4443
4444 /* LWPs which will not be resumed are not interesting, because
4445 we might not wait for them next time through linux_wait. */
4446 if (lwp->resume == NULL)
4447 return false;
4448
4449 return thread_still_has_status_pending (thread);
4450 }
4451
4452 bool
4453 linux_process_target::thread_needs_step_over (thread_info *thread)
4454 {
4455 struct lwp_info *lwp = get_thread_lwp (thread);
4456 struct thread_info *saved_thread;
4457 CORE_ADDR pc;
4458 struct process_info *proc = get_thread_process (thread);
4459
4460 /* GDBserver is skipping the extra traps from the wrapper program,
4461 don't have to do step over. */
4462 if (proc->tdesc == NULL)
4463 return false;
4464
4465 /* LWPs which will not be resumed are not interesting, because we
4466 might not wait for them next time through linux_wait. */
4467
4468 if (!lwp->stopped)
4469 {
4470 if (debug_threads)
4471 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4472 lwpid_of (thread));
4473 return false;
4474 }
4475
4476 if (thread->last_resume_kind == resume_stop)
4477 {
4478 if (debug_threads)
4479 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4480 " stopped\n",
4481 lwpid_of (thread));
4482 return false;
4483 }
4484
4485 gdb_assert (lwp->suspended >= 0);
4486
4487 if (lwp->suspended)
4488 {
4489 if (debug_threads)
4490 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4491 lwpid_of (thread));
4492 return false;
4493 }
4494
4495 if (lwp->status_pending_p)
4496 {
4497 if (debug_threads)
4498 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4499 " status.\n",
4500 lwpid_of (thread));
4501 return false;
4502 }
4503
4504 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4505 or we have. */
4506 pc = get_pc (lwp);
4507
4508 /* If the PC has changed since we stopped, then don't do anything,
4509 and let the breakpoint/tracepoint be hit. This happens if, for
4510 instance, GDB handled the decr_pc_after_break subtraction itself,
4511 GDB is OOL stepping this thread, or the user has issued a "jump"
4512 command, or poked thread's registers herself. */
4513 if (pc != lwp->stop_pc)
4514 {
4515 if (debug_threads)
4516 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4517 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4518 lwpid_of (thread),
4519 paddress (lwp->stop_pc), paddress (pc));
4520 return false;
4521 }
4522
4523 /* On software single step target, resume the inferior with signal
4524 rather than stepping over. */
4525 if (supports_software_single_step ()
4526 && !lwp->pending_signals.empty ()
4527 && lwp_signal_can_be_delivered (lwp))
4528 {
4529 if (debug_threads)
4530 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4531 " signals.\n",
4532 lwpid_of (thread));
4533
4534 return false;
4535 }
4536
4537 saved_thread = current_thread;
4538 current_thread = thread;
4539
4540 /* We can only step over breakpoints we know about. */
4541 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4542 {
4543 /* Don't step over a breakpoint that GDB expects to hit
4544 though. If the condition is being evaluated on the target's side
4545 and it evaluate to false, step over this breakpoint as well. */
4546 if (gdb_breakpoint_here (pc)
4547 && gdb_condition_true_at_breakpoint (pc)
4548 && gdb_no_commands_at_breakpoint (pc))
4549 {
4550 if (debug_threads)
4551 debug_printf ("Need step over [LWP %ld]? yes, but found"
4552 " GDB breakpoint at 0x%s; skipping step over\n",
4553 lwpid_of (thread), paddress (pc));
4554
4555 current_thread = saved_thread;
4556 return false;
4557 }
4558 else
4559 {
4560 if (debug_threads)
4561 debug_printf ("Need step over [LWP %ld]? yes, "
4562 "found breakpoint at 0x%s\n",
4563 lwpid_of (thread), paddress (pc));
4564
4565 /* We've found an lwp that needs stepping over --- return 1 so
4566 that find_thread stops looking. */
4567 current_thread = saved_thread;
4568
4569 return true;
4570 }
4571 }
4572
4573 current_thread = saved_thread;
4574
4575 if (debug_threads)
4576 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4577 " at 0x%s\n",
4578 lwpid_of (thread), paddress (pc));
4579
4580 return false;
4581 }
4582
4583 void
4584 linux_process_target::start_step_over (lwp_info *lwp)
4585 {
4586 struct thread_info *thread = get_lwp_thread (lwp);
4587 struct thread_info *saved_thread;
4588 CORE_ADDR pc;
4589 int step;
4590
4591 if (debug_threads)
4592 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4593 lwpid_of (thread));
4594
4595 stop_all_lwps (1, lwp);
4596
4597 if (lwp->suspended != 0)
4598 {
4599 internal_error (__FILE__, __LINE__,
4600 "LWP %ld suspended=%d\n", lwpid_of (thread),
4601 lwp->suspended);
4602 }
4603
4604 if (debug_threads)
4605 debug_printf ("Done stopping all threads for step-over.\n");
4606
4607 /* Note, we should always reach here with an already adjusted PC,
4608 either by GDB (if we're resuming due to GDB's request), or by our
4609 caller, if we just finished handling an internal breakpoint GDB
4610 shouldn't care about. */
4611 pc = get_pc (lwp);
4612
4613 saved_thread = current_thread;
4614 current_thread = thread;
4615
4616 lwp->bp_reinsert = pc;
4617 uninsert_breakpoints_at (pc);
4618 uninsert_fast_tracepoint_jumps_at (pc);
4619
4620 step = single_step (lwp);
4621
4622 current_thread = saved_thread;
4623
4624 resume_one_lwp (lwp, step, 0, NULL);
4625
4626 /* Require next event from this LWP. */
4627 step_over_bkpt = thread->id;
4628 }
4629
4630 bool
4631 linux_process_target::finish_step_over (lwp_info *lwp)
4632 {
4633 if (lwp->bp_reinsert != 0)
4634 {
4635 struct thread_info *saved_thread = current_thread;
4636
4637 if (debug_threads)
4638 debug_printf ("Finished step over.\n");
4639
4640 current_thread = get_lwp_thread (lwp);
4641
4642 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4643 may be no breakpoint to reinsert there by now. */
4644 reinsert_breakpoints_at (lwp->bp_reinsert);
4645 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4646
4647 lwp->bp_reinsert = 0;
4648
4649 /* Delete any single-step breakpoints. No longer needed. We
4650 don't have to worry about other threads hitting this trap,
4651 and later not being able to explain it, because we were
4652 stepping over a breakpoint, and we hold all threads but
4653 LWP stopped while doing that. */
4654 if (!supports_hardware_single_step ())
4655 {
4656 gdb_assert (has_single_step_breakpoints (current_thread));
4657 delete_single_step_breakpoints (current_thread);
4658 }
4659
4660 step_over_bkpt = null_ptid;
4661 current_thread = saved_thread;
4662 return true;
4663 }
4664 else
4665 return false;
4666 }
4667
4668 void
4669 linux_process_target::complete_ongoing_step_over ()
4670 {
4671 if (step_over_bkpt != null_ptid)
4672 {
4673 struct lwp_info *lwp;
4674 int wstat;
4675 int ret;
4676
4677 if (debug_threads)
4678 debug_printf ("detach: step over in progress, finish it first\n");
4679
4680 /* Passing NULL_PTID as filter indicates we want all events to
4681 be left pending. Eventually this returns when there are no
4682 unwaited-for children left. */
4683 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4684 __WALL);
4685 gdb_assert (ret == -1);
4686
4687 lwp = find_lwp_pid (step_over_bkpt);
4688 if (lwp != NULL)
4689 {
4690 finish_step_over (lwp);
4691
4692 /* If we got our step SIGTRAP, don't leave it pending,
4693 otherwise we would report it to GDB as a spurious
4694 SIGTRAP. */
4695 gdb_assert (lwp->status_pending_p);
4696 if (WIFSTOPPED (lwp->status_pending)
4697 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4698 {
4699 thread_info *thread = get_lwp_thread (lwp);
4700 if (thread->last_resume_kind != resume_step)
4701 {
4702 if (debug_threads)
4703 debug_printf ("detach: discard step-over SIGTRAP\n");
4704
4705 lwp->status_pending_p = 0;
4706 lwp->status_pending = 0;
4707 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4708 }
4709 else
4710 {
4711 if (debug_threads)
4712 debug_printf ("detach: resume_step, "
4713 "not discarding step-over SIGTRAP\n");
4714 }
4715 }
4716 }
4717 step_over_bkpt = null_ptid;
4718 unsuspend_all_lwps (lwp);
4719 }
4720 }
4721
4722 void
4723 linux_process_target::resume_one_thread (thread_info *thread,
4724 bool leave_all_stopped)
4725 {
4726 struct lwp_info *lwp = get_thread_lwp (thread);
4727 int leave_pending;
4728
4729 if (lwp->resume == NULL)
4730 return;
4731
4732 if (lwp->resume->kind == resume_stop)
4733 {
4734 if (debug_threads)
4735 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4736
4737 if (!lwp->stopped)
4738 {
4739 if (debug_threads)
4740 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4741
4742 /* Stop the thread, and wait for the event asynchronously,
4743 through the event loop. */
4744 send_sigstop (lwp);
4745 }
4746 else
4747 {
4748 if (debug_threads)
4749 debug_printf ("already stopped LWP %ld\n",
4750 lwpid_of (thread));
4751
4752 /* The LWP may have been stopped in an internal event that
4753 was not meant to be notified back to GDB (e.g., gdbserver
4754 breakpoint), so we should be reporting a stop event in
4755 this case too. */
4756
4757 /* If the thread already has a pending SIGSTOP, this is a
4758 no-op. Otherwise, something later will presumably resume
4759 the thread and this will cause it to cancel any pending
4760 operation, due to last_resume_kind == resume_stop. If
4761 the thread already has a pending status to report, we
4762 will still report it the next time we wait - see
4763 status_pending_p_callback. */
4764
4765 /* If we already have a pending signal to report, then
4766 there's no need to queue a SIGSTOP, as this means we're
4767 midway through moving the LWP out of the jumppad, and we
4768 will report the pending signal as soon as that is
4769 finished. */
4770 if (lwp->pending_signals_to_report.empty ())
4771 send_sigstop (lwp);
4772 }
4773
4774 /* For stop requests, we're done. */
4775 lwp->resume = NULL;
4776 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4777 return;
4778 }
4779
4780 /* If this thread which is about to be resumed has a pending status,
4781 then don't resume it - we can just report the pending status.
4782 Likewise if it is suspended, because e.g., another thread is
4783 stepping past a breakpoint. Make sure to queue any signals that
4784 would otherwise be sent. In all-stop mode, we do this decision
4785 based on if *any* thread has a pending status. If there's a
4786 thread that needs the step-over-breakpoint dance, then don't
4787 resume any other thread but that particular one. */
4788 leave_pending = (lwp->suspended
4789 || lwp->status_pending_p
4790 || leave_all_stopped);
4791
4792 /* If we have a new signal, enqueue the signal. */
4793 if (lwp->resume->sig != 0)
4794 {
4795 siginfo_t info, *info_p;
4796
4797 /* If this is the same signal we were previously stopped by,
4798 make sure to queue its siginfo. */
4799 if (WIFSTOPPED (lwp->last_status)
4800 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4801 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4802 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4803 info_p = &info;
4804 else
4805 info_p = NULL;
4806
4807 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4808 }
4809
4810 if (!leave_pending)
4811 {
4812 if (debug_threads)
4813 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4814
4815 proceed_one_lwp (thread, NULL);
4816 }
4817 else
4818 {
4819 if (debug_threads)
4820 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4821 }
4822
4823 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4824 lwp->resume = NULL;
4825 }
4826
4827 void
4828 linux_process_target::resume (thread_resume *resume_info, size_t n)
4829 {
4830 struct thread_info *need_step_over = NULL;
4831
4832 if (debug_threads)
4833 {
4834 debug_enter ();
4835 debug_printf ("linux_resume:\n");
4836 }
4837
4838 for_each_thread ([&] (thread_info *thread)
4839 {
4840 linux_set_resume_request (thread, resume_info, n);
4841 });
4842
4843 /* If there is a thread which would otherwise be resumed, which has
4844 a pending status, then don't resume any threads - we can just
4845 report the pending status. Make sure to queue any signals that
4846 would otherwise be sent. In non-stop mode, we'll apply this
4847 logic to each thread individually. We consume all pending events
4848 before considering to start a step-over (in all-stop). */
4849 bool any_pending = false;
4850 if (!non_stop)
4851 any_pending = find_thread ([this] (thread_info *thread)
4852 {
4853 return resume_status_pending (thread);
4854 }) != nullptr;
4855
4856 /* If there is a thread which would otherwise be resumed, which is
4857 stopped at a breakpoint that needs stepping over, then don't
4858 resume any threads - have it step over the breakpoint with all
4859 other threads stopped, then resume all threads again. Make sure
4860 to queue any signals that would otherwise be delivered or
4861 queued. */
4862 if (!any_pending && low_supports_breakpoints ())
4863 need_step_over = find_thread ([this] (thread_info *thread)
4864 {
4865 return thread_needs_step_over (thread);
4866 });
4867
4868 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4869
4870 if (debug_threads)
4871 {
4872 if (need_step_over != NULL)
4873 debug_printf ("Not resuming all, need step over\n");
4874 else if (any_pending)
4875 debug_printf ("Not resuming, all-stop and found "
4876 "an LWP with pending status\n");
4877 else
4878 debug_printf ("Resuming, no pending status or step over needed\n");
4879 }
4880
4881 /* Even if we're leaving threads stopped, queue all signals we'd
4882 otherwise deliver. */
4883 for_each_thread ([&] (thread_info *thread)
4884 {
4885 resume_one_thread (thread, leave_all_stopped);
4886 });
4887
4888 if (need_step_over)
4889 start_step_over (get_thread_lwp (need_step_over));
4890
4891 if (debug_threads)
4892 {
4893 debug_printf ("linux_resume done\n");
4894 debug_exit ();
4895 }
4896
4897 /* We may have events that were pending that can/should be sent to
4898 the client now. Trigger a linux_wait call. */
4899 if (target_is_async_p ())
4900 async_file_mark ();
4901 }
4902
4903 void
4904 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4905 {
4906 struct lwp_info *lwp = get_thread_lwp (thread);
4907 int step;
4908
4909 if (lwp == except)
4910 return;
4911
4912 if (debug_threads)
4913 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4914
4915 if (!lwp->stopped)
4916 {
4917 if (debug_threads)
4918 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4919 return;
4920 }
4921
4922 if (thread->last_resume_kind == resume_stop
4923 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4924 {
4925 if (debug_threads)
4926 debug_printf (" client wants LWP to remain %ld stopped\n",
4927 lwpid_of (thread));
4928 return;
4929 }
4930
4931 if (lwp->status_pending_p)
4932 {
4933 if (debug_threads)
4934 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4935 lwpid_of (thread));
4936 return;
4937 }
4938
4939 gdb_assert (lwp->suspended >= 0);
4940
4941 if (lwp->suspended)
4942 {
4943 if (debug_threads)
4944 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4945 return;
4946 }
4947
4948 if (thread->last_resume_kind == resume_stop
4949 && lwp->pending_signals_to_report.empty ()
4950 && (lwp->collecting_fast_tracepoint
4951 == fast_tpoint_collect_result::not_collecting))
4952 {
4953 /* We haven't reported this LWP as stopped yet (otherwise, the
4954 last_status.kind check above would catch it, and we wouldn't
4955 reach here. This LWP may have been momentarily paused by a
4956 stop_all_lwps call while handling for example, another LWP's
4957 step-over. In that case, the pending expected SIGSTOP signal
4958 that was queued at vCont;t handling time will have already
4959 been consumed by wait_for_sigstop, and so we need to requeue
4960 another one here. Note that if the LWP already has a SIGSTOP
4961 pending, this is a no-op. */
4962
4963 if (debug_threads)
4964 debug_printf ("Client wants LWP %ld to stop. "
4965 "Making sure it has a SIGSTOP pending\n",
4966 lwpid_of (thread));
4967
4968 send_sigstop (lwp);
4969 }
4970
4971 if (thread->last_resume_kind == resume_step)
4972 {
4973 if (debug_threads)
4974 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4975 lwpid_of (thread));
4976
4977 /* If resume_step is requested by GDB, install single-step
4978 breakpoints when the thread is about to be actually resumed if
4979 the single-step breakpoints weren't removed. */
4980 if (supports_software_single_step ()
4981 && !has_single_step_breakpoints (thread))
4982 install_software_single_step_breakpoints (lwp);
4983
4984 step = maybe_hw_step (thread);
4985 }
4986 else if (lwp->bp_reinsert != 0)
4987 {
4988 if (debug_threads)
4989 debug_printf (" stepping LWP %ld, reinsert set\n",
4990 lwpid_of (thread));
4991
4992 step = maybe_hw_step (thread);
4993 }
4994 else
4995 step = 0;
4996
4997 resume_one_lwp (lwp, step, 0, NULL);
4998 }
4999
5000 void
5001 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5002 lwp_info *except)
5003 {
5004 struct lwp_info *lwp = get_thread_lwp (thread);
5005
5006 if (lwp == except)
5007 return;
5008
5009 lwp_suspended_decr (lwp);
5010
5011 proceed_one_lwp (thread, except);
5012 }
5013
5014 void
5015 linux_process_target::proceed_all_lwps ()
5016 {
5017 struct thread_info *need_step_over;
5018
5019 /* If there is a thread which would otherwise be resumed, which is
5020 stopped at a breakpoint that needs stepping over, then don't
5021 resume any threads - have it step over the breakpoint with all
5022 other threads stopped, then resume all threads again. */
5023
5024 if (low_supports_breakpoints ())
5025 {
5026 need_step_over = find_thread ([this] (thread_info *thread)
5027 {
5028 return thread_needs_step_over (thread);
5029 });
5030
5031 if (need_step_over != NULL)
5032 {
5033 if (debug_threads)
5034 debug_printf ("proceed_all_lwps: found "
5035 "thread %ld needing a step-over\n",
5036 lwpid_of (need_step_over));
5037
5038 start_step_over (get_thread_lwp (need_step_over));
5039 return;
5040 }
5041 }
5042
5043 if (debug_threads)
5044 debug_printf ("Proceeding, no step-over needed\n");
5045
5046 for_each_thread ([this] (thread_info *thread)
5047 {
5048 proceed_one_lwp (thread, NULL);
5049 });
5050 }
5051
5052 void
5053 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5054 {
5055 if (debug_threads)
5056 {
5057 debug_enter ();
5058 if (except)
5059 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5060 lwpid_of (get_lwp_thread (except)));
5061 else
5062 debug_printf ("unstopping all lwps\n");
5063 }
5064
5065 if (unsuspend)
5066 for_each_thread ([&] (thread_info *thread)
5067 {
5068 unsuspend_and_proceed_one_lwp (thread, except);
5069 });
5070 else
5071 for_each_thread ([&] (thread_info *thread)
5072 {
5073 proceed_one_lwp (thread, except);
5074 });
5075
5076 if (debug_threads)
5077 {
5078 debug_printf ("unstop_all_lwps done\n");
5079 debug_exit ();
5080 }
5081 }
5082
5083
5084 #ifdef HAVE_LINUX_REGSETS
5085
5086 #define use_linux_regsets 1
5087
5088 /* Returns true if REGSET has been disabled. */
5089
5090 static int
5091 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5092 {
5093 return (info->disabled_regsets != NULL
5094 && info->disabled_regsets[regset - info->regsets]);
5095 }
5096
5097 /* Disable REGSET. */
5098
5099 static void
5100 disable_regset (struct regsets_info *info, struct regset_info *regset)
5101 {
5102 int dr_offset;
5103
5104 dr_offset = regset - info->regsets;
5105 if (info->disabled_regsets == NULL)
5106 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5107 info->disabled_regsets[dr_offset] = 1;
5108 }
5109
5110 static int
5111 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5112 struct regcache *regcache)
5113 {
5114 struct regset_info *regset;
5115 int saw_general_regs = 0;
5116 int pid;
5117 struct iovec iov;
5118
5119 pid = lwpid_of (current_thread);
5120 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5121 {
5122 void *buf, *data;
5123 int nt_type, res;
5124
5125 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5126 continue;
5127
5128 buf = xmalloc (regset->size);
5129
5130 nt_type = regset->nt_type;
5131 if (nt_type)
5132 {
5133 iov.iov_base = buf;
5134 iov.iov_len = regset->size;
5135 data = (void *) &iov;
5136 }
5137 else
5138 data = buf;
5139
5140 #ifndef __sparc__
5141 res = ptrace (regset->get_request, pid,
5142 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5143 #else
5144 res = ptrace (regset->get_request, pid, data, nt_type);
5145 #endif
5146 if (res < 0)
5147 {
5148 if (errno == EIO
5149 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5150 {
5151 /* If we get EIO on a regset, or an EINVAL and the regset is
5152 optional, do not try it again for this process mode. */
5153 disable_regset (regsets_info, regset);
5154 }
5155 else if (errno == ENODATA)
5156 {
5157 /* ENODATA may be returned if the regset is currently
5158 not "active". This can happen in normal operation,
5159 so suppress the warning in this case. */
5160 }
5161 else if (errno == ESRCH)
5162 {
5163 /* At this point, ESRCH should mean the process is
5164 already gone, in which case we simply ignore attempts
5165 to read its registers. */
5166 }
5167 else
5168 {
5169 char s[256];
5170 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5171 pid);
5172 perror (s);
5173 }
5174 }
5175 else
5176 {
5177 if (regset->type == GENERAL_REGS)
5178 saw_general_regs = 1;
5179 regset->store_function (regcache, buf);
5180 }
5181 free (buf);
5182 }
5183 if (saw_general_regs)
5184 return 0;
5185 else
5186 return 1;
5187 }
5188
5189 static int
5190 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5191 struct regcache *regcache)
5192 {
5193 struct regset_info *regset;
5194 int saw_general_regs = 0;
5195 int pid;
5196 struct iovec iov;
5197
5198 pid = lwpid_of (current_thread);
5199 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5200 {
5201 void *buf, *data;
5202 int nt_type, res;
5203
5204 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5205 || regset->fill_function == NULL)
5206 continue;
5207
5208 buf = xmalloc (regset->size);
5209
5210 /* First fill the buffer with the current register set contents,
5211 in case there are any items in the kernel's regset that are
5212 not in gdbserver's regcache. */
5213
5214 nt_type = regset->nt_type;
5215 if (nt_type)
5216 {
5217 iov.iov_base = buf;
5218 iov.iov_len = regset->size;
5219 data = (void *) &iov;
5220 }
5221 else
5222 data = buf;
5223
5224 #ifndef __sparc__
5225 res = ptrace (regset->get_request, pid,
5226 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5227 #else
5228 res = ptrace (regset->get_request, pid, data, nt_type);
5229 #endif
5230
5231 if (res == 0)
5232 {
5233 /* Then overlay our cached registers on that. */
5234 regset->fill_function (regcache, buf);
5235
5236 /* Only now do we write the register set. */
5237 #ifndef __sparc__
5238 res = ptrace (regset->set_request, pid,
5239 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5240 #else
5241 res = ptrace (regset->set_request, pid, data, nt_type);
5242 #endif
5243 }
5244
5245 if (res < 0)
5246 {
5247 if (errno == EIO
5248 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5249 {
5250 /* If we get EIO on a regset, or an EINVAL and the regset is
5251 optional, do not try it again for this process mode. */
5252 disable_regset (regsets_info, regset);
5253 }
5254 else if (errno == ESRCH)
5255 {
5256 /* At this point, ESRCH should mean the process is
5257 already gone, in which case we simply ignore attempts
5258 to change its registers. See also the related
5259 comment in resume_one_lwp. */
5260 free (buf);
5261 return 0;
5262 }
5263 else
5264 {
5265 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5266 }
5267 }
5268 else if (regset->type == GENERAL_REGS)
5269 saw_general_regs = 1;
5270 free (buf);
5271 }
5272 if (saw_general_regs)
5273 return 0;
5274 else
5275 return 1;
5276 }
5277
5278 #else /* !HAVE_LINUX_REGSETS */
5279
5280 #define use_linux_regsets 0
5281 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5282 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5283
5284 #endif
5285
5286 /* Return 1 if register REGNO is supported by one of the regset ptrace
5287 calls or 0 if it has to be transferred individually. */
5288
5289 static int
5290 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5291 {
5292 unsigned char mask = 1 << (regno % 8);
5293 size_t index = regno / 8;
5294
5295 return (use_linux_regsets
5296 && (regs_info->regset_bitmap == NULL
5297 || (regs_info->regset_bitmap[index] & mask) != 0));
5298 }
5299
5300 #ifdef HAVE_LINUX_USRREGS
5301
5302 static int
5303 register_addr (const struct usrregs_info *usrregs, int regnum)
5304 {
5305 int addr;
5306
5307 if (regnum < 0 || regnum >= usrregs->num_regs)
5308 error ("Invalid register number %d.", regnum);
5309
5310 addr = usrregs->regmap[regnum];
5311
5312 return addr;
5313 }
5314
5315
5316 void
5317 linux_process_target::fetch_register (const usrregs_info *usrregs,
5318 regcache *regcache, int regno)
5319 {
5320 CORE_ADDR regaddr;
5321 int i, size;
5322 char *buf;
5323 int pid;
5324
5325 if (regno >= usrregs->num_regs)
5326 return;
5327 if (low_cannot_fetch_register (regno))
5328 return;
5329
5330 regaddr = register_addr (usrregs, regno);
5331 if (regaddr == -1)
5332 return;
5333
5334 size = ((register_size (regcache->tdesc, regno)
5335 + sizeof (PTRACE_XFER_TYPE) - 1)
5336 & -sizeof (PTRACE_XFER_TYPE));
5337 buf = (char *) alloca (size);
5338
5339 pid = lwpid_of (current_thread);
5340 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5341 {
5342 errno = 0;
5343 *(PTRACE_XFER_TYPE *) (buf + i) =
5344 ptrace (PTRACE_PEEKUSER, pid,
5345 /* Coerce to a uintptr_t first to avoid potential gcc warning
5346 of coercing an 8 byte integer to a 4 byte pointer. */
5347 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5348 regaddr += sizeof (PTRACE_XFER_TYPE);
5349 if (errno != 0)
5350 {
5351 /* Mark register REGNO unavailable. */
5352 supply_register (regcache, regno, NULL);
5353 return;
5354 }
5355 }
5356
5357 low_supply_ptrace_register (regcache, regno, buf);
5358 }
5359
5360 void
5361 linux_process_target::store_register (const usrregs_info *usrregs,
5362 regcache *regcache, int regno)
5363 {
5364 CORE_ADDR regaddr;
5365 int i, size;
5366 char *buf;
5367 int pid;
5368
5369 if (regno >= usrregs->num_regs)
5370 return;
5371 if (low_cannot_store_register (regno))
5372 return;
5373
5374 regaddr = register_addr (usrregs, regno);
5375 if (regaddr == -1)
5376 return;
5377
5378 size = ((register_size (regcache->tdesc, regno)
5379 + sizeof (PTRACE_XFER_TYPE) - 1)
5380 & -sizeof (PTRACE_XFER_TYPE));
5381 buf = (char *) alloca (size);
5382 memset (buf, 0, size);
5383
5384 low_collect_ptrace_register (regcache, regno, buf);
5385
5386 pid = lwpid_of (current_thread);
5387 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5388 {
5389 errno = 0;
5390 ptrace (PTRACE_POKEUSER, pid,
5391 /* Coerce to a uintptr_t first to avoid potential gcc warning
5392 about coercing an 8 byte integer to a 4 byte pointer. */
5393 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5394 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5395 if (errno != 0)
5396 {
5397 /* At this point, ESRCH should mean the process is
5398 already gone, in which case we simply ignore attempts
5399 to change its registers. See also the related
5400 comment in resume_one_lwp. */
5401 if (errno == ESRCH)
5402 return;
5403
5404
5405 if (!low_cannot_store_register (regno))
5406 error ("writing register %d: %s", regno, safe_strerror (errno));
5407 }
5408 regaddr += sizeof (PTRACE_XFER_TYPE);
5409 }
5410 }
5411 #endif /* HAVE_LINUX_USRREGS */
5412
5413 void
5414 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5415 int regno, char *buf)
5416 {
5417 collect_register (regcache, regno, buf);
5418 }
5419
5420 void
5421 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5422 int regno, const char *buf)
5423 {
5424 supply_register (regcache, regno, buf);
5425 }
5426
5427 void
5428 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5429 regcache *regcache,
5430 int regno, int all)
5431 {
5432 #ifdef HAVE_LINUX_USRREGS
5433 struct usrregs_info *usr = regs_info->usrregs;
5434
5435 if (regno == -1)
5436 {
5437 for (regno = 0; regno < usr->num_regs; regno++)
5438 if (all || !linux_register_in_regsets (regs_info, regno))
5439 fetch_register (usr, regcache, regno);
5440 }
5441 else
5442 fetch_register (usr, regcache, regno);
5443 #endif
5444 }
5445
5446 void
5447 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5448 regcache *regcache,
5449 int regno, int all)
5450 {
5451 #ifdef HAVE_LINUX_USRREGS
5452 struct usrregs_info *usr = regs_info->usrregs;
5453
5454 if (regno == -1)
5455 {
5456 for (regno = 0; regno < usr->num_regs; regno++)
5457 if (all || !linux_register_in_regsets (regs_info, regno))
5458 store_register (usr, regcache, regno);
5459 }
5460 else
5461 store_register (usr, regcache, regno);
5462 #endif
5463 }
5464
5465 void
5466 linux_process_target::fetch_registers (regcache *regcache, int regno)
5467 {
5468 int use_regsets;
5469 int all = 0;
5470 const regs_info *regs_info = get_regs_info ();
5471
5472 if (regno == -1)
5473 {
5474 if (regs_info->usrregs != NULL)
5475 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5476 low_fetch_register (regcache, regno);
5477
5478 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5479 if (regs_info->usrregs != NULL)
5480 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5481 }
5482 else
5483 {
5484 if (low_fetch_register (regcache, regno))
5485 return;
5486
5487 use_regsets = linux_register_in_regsets (regs_info, regno);
5488 if (use_regsets)
5489 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5490 regcache);
5491 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5492 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5493 }
5494 }
5495
5496 void
5497 linux_process_target::store_registers (regcache *regcache, int regno)
5498 {
5499 int use_regsets;
5500 int all = 0;
5501 const regs_info *regs_info = get_regs_info ();
5502
5503 if (regno == -1)
5504 {
5505 all = regsets_store_inferior_registers (regs_info->regsets_info,
5506 regcache);
5507 if (regs_info->usrregs != NULL)
5508 usr_store_inferior_registers (regs_info, regcache, regno, all);
5509 }
5510 else
5511 {
5512 use_regsets = linux_register_in_regsets (regs_info, regno);
5513 if (use_regsets)
5514 all = regsets_store_inferior_registers (regs_info->regsets_info,
5515 regcache);
5516 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5517 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5518 }
5519 }
5520
5521 bool
5522 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5523 {
5524 return false;
5525 }
5526
5527 /* A wrapper for the read_memory target op. */
5528
5529 static int
5530 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5531 {
5532 return the_target->read_memory (memaddr, myaddr, len);
5533 }
5534
5535 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5536 to debugger memory starting at MYADDR. */
5537
5538 int
5539 linux_process_target::read_memory (CORE_ADDR memaddr,
5540 unsigned char *myaddr, int len)
5541 {
5542 int pid = lwpid_of (current_thread);
5543 PTRACE_XFER_TYPE *buffer;
5544 CORE_ADDR addr;
5545 int count;
5546 char filename[64];
5547 int i;
5548 int ret;
5549 int fd;
5550
5551 /* Try using /proc. Don't bother for one word. */
5552 if (len >= 3 * sizeof (long))
5553 {
5554 int bytes;
5555
5556 /* We could keep this file open and cache it - possibly one per
5557 thread. That requires some juggling, but is even faster. */
5558 sprintf (filename, "/proc/%d/mem", pid);
5559 fd = open (filename, O_RDONLY | O_LARGEFILE);
5560 if (fd == -1)
5561 goto no_proc;
5562
5563 /* If pread64 is available, use it. It's faster if the kernel
5564 supports it (only one syscall), and it's 64-bit safe even on
5565 32-bit platforms (for instance, SPARC debugging a SPARC64
5566 application). */
5567 #ifdef HAVE_PREAD64
5568 bytes = pread64 (fd, myaddr, len, memaddr);
5569 #else
5570 bytes = -1;
5571 if (lseek (fd, memaddr, SEEK_SET) != -1)
5572 bytes = read (fd, myaddr, len);
5573 #endif
5574
5575 close (fd);
5576 if (bytes == len)
5577 return 0;
5578
5579 /* Some data was read, we'll try to get the rest with ptrace. */
5580 if (bytes > 0)
5581 {
5582 memaddr += bytes;
5583 myaddr += bytes;
5584 len -= bytes;
5585 }
5586 }
5587
5588 no_proc:
5589 /* Round starting address down to longword boundary. */
5590 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5591 /* Round ending address up; get number of longwords that makes. */
5592 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5593 / sizeof (PTRACE_XFER_TYPE));
5594 /* Allocate buffer of that many longwords. */
5595 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5596
5597 /* Read all the longwords */
5598 errno = 0;
5599 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5600 {
5601 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5602 about coercing an 8 byte integer to a 4 byte pointer. */
5603 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5604 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5605 (PTRACE_TYPE_ARG4) 0);
5606 if (errno)
5607 break;
5608 }
5609 ret = errno;
5610
5611 /* Copy appropriate bytes out of the buffer. */
5612 if (i > 0)
5613 {
5614 i *= sizeof (PTRACE_XFER_TYPE);
5615 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5616 memcpy (myaddr,
5617 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5618 i < len ? i : len);
5619 }
5620
5621 return ret;
5622 }
5623
5624 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5625 memory at MEMADDR. On failure (cannot write to the inferior)
5626 returns the value of errno. Always succeeds if LEN is zero. */
5627
5628 int
5629 linux_process_target::write_memory (CORE_ADDR memaddr,
5630 const unsigned char *myaddr, int len)
5631 {
5632 int i;
5633 /* Round starting address down to longword boundary. */
5634 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5635 /* Round ending address up; get number of longwords that makes. */
5636 int count
5637 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5638 / sizeof (PTRACE_XFER_TYPE);
5639
5640 /* Allocate buffer of that many longwords. */
5641 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5642
5643 int pid = lwpid_of (current_thread);
5644
5645 if (len == 0)
5646 {
5647 /* Zero length write always succeeds. */
5648 return 0;
5649 }
5650
5651 if (debug_threads)
5652 {
5653 /* Dump up to four bytes. */
5654 char str[4 * 2 + 1];
5655 char *p = str;
5656 int dump = len < 4 ? len : 4;
5657
5658 for (i = 0; i < dump; i++)
5659 {
5660 sprintf (p, "%02x", myaddr[i]);
5661 p += 2;
5662 }
5663 *p = '\0';
5664
5665 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5666 str, (long) memaddr, pid);
5667 }
5668
5669 /* Fill start and end extra bytes of buffer with existing memory data. */
5670
5671 errno = 0;
5672 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5673 about coercing an 8 byte integer to a 4 byte pointer. */
5674 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5675 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5676 (PTRACE_TYPE_ARG4) 0);
5677 if (errno)
5678 return errno;
5679
5680 if (count > 1)
5681 {
5682 errno = 0;
5683 buffer[count - 1]
5684 = ptrace (PTRACE_PEEKTEXT, pid,
5685 /* Coerce to a uintptr_t first to avoid potential gcc warning
5686 about coercing an 8 byte integer to a 4 byte pointer. */
5687 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5688 * sizeof (PTRACE_XFER_TYPE)),
5689 (PTRACE_TYPE_ARG4) 0);
5690 if (errno)
5691 return errno;
5692 }
5693
5694 /* Copy data to be written over corresponding part of buffer. */
5695
5696 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5697 myaddr, len);
5698
5699 /* Write the entire buffer. */
5700
5701 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5702 {
5703 errno = 0;
5704 ptrace (PTRACE_POKETEXT, pid,
5705 /* Coerce to a uintptr_t first to avoid potential gcc warning
5706 about coercing an 8 byte integer to a 4 byte pointer. */
5707 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5708 (PTRACE_TYPE_ARG4) buffer[i]);
5709 if (errno)
5710 return errno;
5711 }
5712
5713 return 0;
5714 }
5715
5716 void
5717 linux_process_target::look_up_symbols ()
5718 {
5719 #ifdef USE_THREAD_DB
5720 struct process_info *proc = current_process ();
5721
5722 if (proc->priv->thread_db != NULL)
5723 return;
5724
5725 thread_db_init ();
5726 #endif
5727 }
5728
5729 void
5730 linux_process_target::request_interrupt ()
5731 {
5732 /* Send a SIGINT to the process group. This acts just like the user
5733 typed a ^C on the controlling terminal. */
5734 ::kill (-signal_pid, SIGINT);
5735 }
5736
5737 bool
5738 linux_process_target::supports_read_auxv ()
5739 {
5740 return true;
5741 }
5742
5743 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5744 to debugger memory starting at MYADDR. */
5745
5746 int
5747 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5748 unsigned int len)
5749 {
5750 char filename[PATH_MAX];
5751 int fd, n;
5752 int pid = lwpid_of (current_thread);
5753
5754 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5755
5756 fd = open (filename, O_RDONLY);
5757 if (fd < 0)
5758 return -1;
5759
5760 if (offset != (CORE_ADDR) 0
5761 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5762 n = -1;
5763 else
5764 n = read (fd, myaddr, len);
5765
5766 close (fd);
5767
5768 return n;
5769 }
5770
5771 int
5772 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5773 int size, raw_breakpoint *bp)
5774 {
5775 if (type == raw_bkpt_type_sw)
5776 return insert_memory_breakpoint (bp);
5777 else
5778 return low_insert_point (type, addr, size, bp);
5779 }
5780
5781 int
5782 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5783 int size, raw_breakpoint *bp)
5784 {
5785 /* Unsupported (see target.h). */
5786 return 1;
5787 }
5788
5789 int
5790 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5791 int size, raw_breakpoint *bp)
5792 {
5793 if (type == raw_bkpt_type_sw)
5794 return remove_memory_breakpoint (bp);
5795 else
5796 return low_remove_point (type, addr, size, bp);
5797 }
5798
5799 int
5800 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5801 int size, raw_breakpoint *bp)
5802 {
5803 /* Unsupported (see target.h). */
5804 return 1;
5805 }
5806
5807 /* Implement the stopped_by_sw_breakpoint target_ops
5808 method. */
5809
5810 bool
5811 linux_process_target::stopped_by_sw_breakpoint ()
5812 {
5813 struct lwp_info *lwp = get_thread_lwp (current_thread);
5814
5815 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5816 }
5817
5818 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5819 method. */
5820
5821 bool
5822 linux_process_target::supports_stopped_by_sw_breakpoint ()
5823 {
5824 return USE_SIGTRAP_SIGINFO;
5825 }
5826
5827 /* Implement the stopped_by_hw_breakpoint target_ops
5828 method. */
5829
5830 bool
5831 linux_process_target::stopped_by_hw_breakpoint ()
5832 {
5833 struct lwp_info *lwp = get_thread_lwp (current_thread);
5834
5835 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5836 }
5837
5838 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5839 method. */
5840
5841 bool
5842 linux_process_target::supports_stopped_by_hw_breakpoint ()
5843 {
5844 return USE_SIGTRAP_SIGINFO;
5845 }
5846
5847 /* Implement the supports_hardware_single_step target_ops method. */
5848
5849 bool
5850 linux_process_target::supports_hardware_single_step ()
5851 {
5852 return true;
5853 }
5854
5855 bool
5856 linux_process_target::stopped_by_watchpoint ()
5857 {
5858 struct lwp_info *lwp = get_thread_lwp (current_thread);
5859
5860 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5861 }
5862
5863 CORE_ADDR
5864 linux_process_target::stopped_data_address ()
5865 {
5866 struct lwp_info *lwp = get_thread_lwp (current_thread);
5867
5868 return lwp->stopped_data_address;
5869 }
5870
5871 /* This is only used for targets that define PT_TEXT_ADDR,
5872 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5873 the target has different ways of acquiring this information, like
5874 loadmaps. */
5875
5876 bool
5877 linux_process_target::supports_read_offsets ()
5878 {
5879 #ifdef SUPPORTS_READ_OFFSETS
5880 return true;
5881 #else
5882 return false;
5883 #endif
5884 }
5885
5886 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5887 to tell gdb about. */
5888
5889 int
5890 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5891 {
5892 #ifdef SUPPORTS_READ_OFFSETS
5893 unsigned long text, text_end, data;
5894 int pid = lwpid_of (current_thread);
5895
5896 errno = 0;
5897
5898 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5899 (PTRACE_TYPE_ARG4) 0);
5900 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5901 (PTRACE_TYPE_ARG4) 0);
5902 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5903 (PTRACE_TYPE_ARG4) 0);
5904
5905 if (errno == 0)
5906 {
5907 /* Both text and data offsets produced at compile-time (and so
5908 used by gdb) are relative to the beginning of the program,
5909 with the data segment immediately following the text segment.
5910 However, the actual runtime layout in memory may put the data
5911 somewhere else, so when we send gdb a data base-address, we
5912 use the real data base address and subtract the compile-time
5913 data base-address from it (which is just the length of the
5914 text segment). BSS immediately follows data in both
5915 cases. */
5916 *text_p = text;
5917 *data_p = data - (text_end - text);
5918
5919 return 1;
5920 }
5921 return 0;
5922 #else
5923 gdb_assert_not_reached ("target op read_offsets not supported");
5924 #endif
5925 }
5926
5927 bool
5928 linux_process_target::supports_get_tls_address ()
5929 {
5930 #ifdef USE_THREAD_DB
5931 return true;
5932 #else
5933 return false;
5934 #endif
5935 }
5936
5937 int
5938 linux_process_target::get_tls_address (thread_info *thread,
5939 CORE_ADDR offset,
5940 CORE_ADDR load_module,
5941 CORE_ADDR *address)
5942 {
5943 #ifdef USE_THREAD_DB
5944 return thread_db_get_tls_address (thread, offset, load_module, address);
5945 #else
5946 return -1;
5947 #endif
5948 }
5949
5950 bool
5951 linux_process_target::supports_qxfer_osdata ()
5952 {
5953 return true;
5954 }
5955
5956 int
5957 linux_process_target::qxfer_osdata (const char *annex,
5958 unsigned char *readbuf,
5959 unsigned const char *writebuf,
5960 CORE_ADDR offset, int len)
5961 {
5962 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5963 }
5964
5965 void
5966 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5967 gdb_byte *inf_siginfo, int direction)
5968 {
5969 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5970
5971 /* If there was no callback, or the callback didn't do anything,
5972 then just do a straight memcpy. */
5973 if (!done)
5974 {
5975 if (direction == 1)
5976 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5977 else
5978 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5979 }
5980 }
5981
5982 bool
5983 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5984 int direction)
5985 {
5986 return false;
5987 }
5988
5989 bool
5990 linux_process_target::supports_qxfer_siginfo ()
5991 {
5992 return true;
5993 }
5994
5995 int
5996 linux_process_target::qxfer_siginfo (const char *annex,
5997 unsigned char *readbuf,
5998 unsigned const char *writebuf,
5999 CORE_ADDR offset, int len)
6000 {
6001 int pid;
6002 siginfo_t siginfo;
6003 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6004
6005 if (current_thread == NULL)
6006 return -1;
6007
6008 pid = lwpid_of (current_thread);
6009
6010 if (debug_threads)
6011 debug_printf ("%s siginfo for lwp %d.\n",
6012 readbuf != NULL ? "Reading" : "Writing",
6013 pid);
6014
6015 if (offset >= sizeof (siginfo))
6016 return -1;
6017
6018 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6019 return -1;
6020
6021 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6022 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6023 inferior with a 64-bit GDBSERVER should look the same as debugging it
6024 with a 32-bit GDBSERVER, we need to convert it. */
6025 siginfo_fixup (&siginfo, inf_siginfo, 0);
6026
6027 if (offset + len > sizeof (siginfo))
6028 len = sizeof (siginfo) - offset;
6029
6030 if (readbuf != NULL)
6031 memcpy (readbuf, inf_siginfo + offset, len);
6032 else
6033 {
6034 memcpy (inf_siginfo + offset, writebuf, len);
6035
6036 /* Convert back to ptrace layout before flushing it out. */
6037 siginfo_fixup (&siginfo, inf_siginfo, 1);
6038
6039 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6040 return -1;
6041 }
6042
6043 return len;
6044 }
6045
6046 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6047 so we notice when children change state; as the handler for the
6048 sigsuspend in my_waitpid. */
6049
6050 static void
6051 sigchld_handler (int signo)
6052 {
6053 int old_errno = errno;
6054
6055 if (debug_threads)
6056 {
6057 do
6058 {
6059 /* Use the async signal safe debug function. */
6060 if (debug_write ("sigchld_handler\n",
6061 sizeof ("sigchld_handler\n") - 1) < 0)
6062 break; /* just ignore */
6063 } while (0);
6064 }
6065
6066 if (target_is_async_p ())
6067 async_file_mark (); /* trigger a linux_wait */
6068
6069 errno = old_errno;
6070 }
6071
6072 bool
6073 linux_process_target::supports_non_stop ()
6074 {
6075 return true;
6076 }
6077
6078 bool
6079 linux_process_target::async (bool enable)
6080 {
6081 bool previous = target_is_async_p ();
6082
6083 if (debug_threads)
6084 debug_printf ("linux_async (%d), previous=%d\n",
6085 enable, previous);
6086
6087 if (previous != enable)
6088 {
6089 sigset_t mask;
6090 sigemptyset (&mask);
6091 sigaddset (&mask, SIGCHLD);
6092
6093 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6094
6095 if (enable)
6096 {
6097 if (pipe (linux_event_pipe) == -1)
6098 {
6099 linux_event_pipe[0] = -1;
6100 linux_event_pipe[1] = -1;
6101 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6102
6103 warning ("creating event pipe failed.");
6104 return previous;
6105 }
6106
6107 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6108 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6109
6110 /* Register the event loop handler. */
6111 add_file_handler (linux_event_pipe[0],
6112 handle_target_event, NULL,
6113 "linux-low");
6114
6115 /* Always trigger a linux_wait. */
6116 async_file_mark ();
6117 }
6118 else
6119 {
6120 delete_file_handler (linux_event_pipe[0]);
6121
6122 close (linux_event_pipe[0]);
6123 close (linux_event_pipe[1]);
6124 linux_event_pipe[0] = -1;
6125 linux_event_pipe[1] = -1;
6126 }
6127
6128 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6129 }
6130
6131 return previous;
6132 }
6133
6134 int
6135 linux_process_target::start_non_stop (bool nonstop)
6136 {
6137 /* Register or unregister from event-loop accordingly. */
6138 target_async (nonstop);
6139
6140 if (target_is_async_p () != (nonstop != false))
6141 return -1;
6142
6143 return 0;
6144 }
6145
6146 bool
6147 linux_process_target::supports_multi_process ()
6148 {
6149 return true;
6150 }
6151
6152 /* Check if fork events are supported. */
6153
6154 bool
6155 linux_process_target::supports_fork_events ()
6156 {
6157 return linux_supports_tracefork ();
6158 }
6159
6160 /* Check if vfork events are supported. */
6161
6162 bool
6163 linux_process_target::supports_vfork_events ()
6164 {
6165 return linux_supports_tracefork ();
6166 }
6167
6168 /* Check if exec events are supported. */
6169
6170 bool
6171 linux_process_target::supports_exec_events ()
6172 {
6173 return linux_supports_traceexec ();
6174 }
6175
6176 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6177 ptrace flags for all inferiors. This is in case the new GDB connection
6178 doesn't support the same set of events that the previous one did. */
6179
6180 void
6181 linux_process_target::handle_new_gdb_connection ()
6182 {
6183 /* Request that all the lwps reset their ptrace options. */
6184 for_each_thread ([] (thread_info *thread)
6185 {
6186 struct lwp_info *lwp = get_thread_lwp (thread);
6187
6188 if (!lwp->stopped)
6189 {
6190 /* Stop the lwp so we can modify its ptrace options. */
6191 lwp->must_set_ptrace_flags = 1;
6192 linux_stop_lwp (lwp);
6193 }
6194 else
6195 {
6196 /* Already stopped; go ahead and set the ptrace options. */
6197 struct process_info *proc = find_process_pid (pid_of (thread));
6198 int options = linux_low_ptrace_options (proc->attached);
6199
6200 linux_enable_event_reporting (lwpid_of (thread), options);
6201 lwp->must_set_ptrace_flags = 0;
6202 }
6203 });
6204 }
6205
6206 int
6207 linux_process_target::handle_monitor_command (char *mon)
6208 {
6209 #ifdef USE_THREAD_DB
6210 return thread_db_handle_monitor_command (mon);
6211 #else
6212 return 0;
6213 #endif
6214 }
6215
6216 int
6217 linux_process_target::core_of_thread (ptid_t ptid)
6218 {
6219 return linux_common_core_of_thread (ptid);
6220 }
6221
6222 bool
6223 linux_process_target::supports_disable_randomization ()
6224 {
6225 return true;
6226 }
6227
6228 bool
6229 linux_process_target::supports_agent ()
6230 {
6231 return true;
6232 }
6233
6234 bool
6235 linux_process_target::supports_range_stepping ()
6236 {
6237 if (supports_software_single_step ())
6238 return true;
6239
6240 return low_supports_range_stepping ();
6241 }
6242
6243 bool
6244 linux_process_target::low_supports_range_stepping ()
6245 {
6246 return false;
6247 }
6248
6249 bool
6250 linux_process_target::supports_pid_to_exec_file ()
6251 {
6252 return true;
6253 }
6254
6255 const char *
6256 linux_process_target::pid_to_exec_file (int pid)
6257 {
6258 return linux_proc_pid_to_exec_file (pid);
6259 }
6260
6261 bool
6262 linux_process_target::supports_multifs ()
6263 {
6264 return true;
6265 }
6266
6267 int
6268 linux_process_target::multifs_open (int pid, const char *filename,
6269 int flags, mode_t mode)
6270 {
6271 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6272 }
6273
6274 int
6275 linux_process_target::multifs_unlink (int pid, const char *filename)
6276 {
6277 return linux_mntns_unlink (pid, filename);
6278 }
6279
6280 ssize_t
6281 linux_process_target::multifs_readlink (int pid, const char *filename,
6282 char *buf, size_t bufsiz)
6283 {
6284 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6285 }
6286
6287 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6288 struct target_loadseg
6289 {
6290 /* Core address to which the segment is mapped. */
6291 Elf32_Addr addr;
6292 /* VMA recorded in the program header. */
6293 Elf32_Addr p_vaddr;
6294 /* Size of this segment in memory. */
6295 Elf32_Word p_memsz;
6296 };
6297
6298 # if defined PT_GETDSBT
6299 struct target_loadmap
6300 {
6301 /* Protocol version number, must be zero. */
6302 Elf32_Word version;
6303 /* Pointer to the DSBT table, its size, and the DSBT index. */
6304 unsigned *dsbt_table;
6305 unsigned dsbt_size, dsbt_index;
6306 /* Number of segments in this map. */
6307 Elf32_Word nsegs;
6308 /* The actual memory map. */
6309 struct target_loadseg segs[/*nsegs*/];
6310 };
6311 # define LINUX_LOADMAP PT_GETDSBT
6312 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6313 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6314 # else
6315 struct target_loadmap
6316 {
6317 /* Protocol version number, must be zero. */
6318 Elf32_Half version;
6319 /* Number of segments in this map. */
6320 Elf32_Half nsegs;
6321 /* The actual memory map. */
6322 struct target_loadseg segs[/*nsegs*/];
6323 };
6324 # define LINUX_LOADMAP PTRACE_GETFDPIC
6325 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6326 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6327 # endif
6328
6329 bool
6330 linux_process_target::supports_read_loadmap ()
6331 {
6332 return true;
6333 }
6334
6335 int
6336 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6337 unsigned char *myaddr, unsigned int len)
6338 {
6339 int pid = lwpid_of (current_thread);
6340 int addr = -1;
6341 struct target_loadmap *data = NULL;
6342 unsigned int actual_length, copy_length;
6343
6344 if (strcmp (annex, "exec") == 0)
6345 addr = (int) LINUX_LOADMAP_EXEC;
6346 else if (strcmp (annex, "interp") == 0)
6347 addr = (int) LINUX_LOADMAP_INTERP;
6348 else
6349 return -1;
6350
6351 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6352 return -1;
6353
6354 if (data == NULL)
6355 return -1;
6356
6357 actual_length = sizeof (struct target_loadmap)
6358 + sizeof (struct target_loadseg) * data->nsegs;
6359
6360 if (offset < 0 || offset > actual_length)
6361 return -1;
6362
6363 copy_length = actual_length - offset < len ? actual_length - offset : len;
6364 memcpy (myaddr, (char *) data + offset, copy_length);
6365 return copy_length;
6366 }
6367 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6368
6369 bool
6370 linux_process_target::supports_catch_syscall ()
6371 {
6372 return (low_supports_catch_syscall ()
6373 && linux_supports_tracesysgood ());
6374 }
6375
6376 bool
6377 linux_process_target::low_supports_catch_syscall ()
6378 {
6379 return false;
6380 }
6381
6382 CORE_ADDR
6383 linux_process_target::read_pc (regcache *regcache)
6384 {
6385 if (!low_supports_breakpoints ())
6386 return 0;
6387
6388 return low_get_pc (regcache);
6389 }
6390
6391 void
6392 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6393 {
6394 gdb_assert (low_supports_breakpoints ());
6395
6396 low_set_pc (regcache, pc);
6397 }
6398
6399 bool
6400 linux_process_target::supports_thread_stopped ()
6401 {
6402 return true;
6403 }
6404
6405 bool
6406 linux_process_target::thread_stopped (thread_info *thread)
6407 {
6408 return get_thread_lwp (thread)->stopped;
6409 }
6410
6411 /* This exposes stop-all-threads functionality to other modules. */
6412
6413 void
6414 linux_process_target::pause_all (bool freeze)
6415 {
6416 stop_all_lwps (freeze, NULL);
6417 }
6418
6419 /* This exposes unstop-all-threads functionality to other gdbserver
6420 modules. */
6421
6422 void
6423 linux_process_target::unpause_all (bool unfreeze)
6424 {
6425 unstop_all_lwps (unfreeze, NULL);
6426 }
6427
6428 int
6429 linux_process_target::prepare_to_access_memory ()
6430 {
6431 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6432 running LWP. */
6433 if (non_stop)
6434 target_pause_all (true);
6435 return 0;
6436 }
6437
6438 void
6439 linux_process_target::done_accessing_memory ()
6440 {
6441 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6442 running LWP. */
6443 if (non_stop)
6444 target_unpause_all (true);
6445 }
6446
6447 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6448
6449 static int
6450 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6451 CORE_ADDR *phdr_memaddr, int *num_phdr)
6452 {
6453 char filename[PATH_MAX];
6454 int fd;
6455 const int auxv_size = is_elf64
6456 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6457 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6458
6459 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6460
6461 fd = open (filename, O_RDONLY);
6462 if (fd < 0)
6463 return 1;
6464
6465 *phdr_memaddr = 0;
6466 *num_phdr = 0;
6467 while (read (fd, buf, auxv_size) == auxv_size
6468 && (*phdr_memaddr == 0 || *num_phdr == 0))
6469 {
6470 if (is_elf64)
6471 {
6472 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6473
6474 switch (aux->a_type)
6475 {
6476 case AT_PHDR:
6477 *phdr_memaddr = aux->a_un.a_val;
6478 break;
6479 case AT_PHNUM:
6480 *num_phdr = aux->a_un.a_val;
6481 break;
6482 }
6483 }
6484 else
6485 {
6486 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6487
6488 switch (aux->a_type)
6489 {
6490 case AT_PHDR:
6491 *phdr_memaddr = aux->a_un.a_val;
6492 break;
6493 case AT_PHNUM:
6494 *num_phdr = aux->a_un.a_val;
6495 break;
6496 }
6497 }
6498 }
6499
6500 close (fd);
6501
6502 if (*phdr_memaddr == 0 || *num_phdr == 0)
6503 {
6504 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6505 "phdr_memaddr = %ld, phdr_num = %d",
6506 (long) *phdr_memaddr, *num_phdr);
6507 return 2;
6508 }
6509
6510 return 0;
6511 }
6512
6513 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6514
6515 static CORE_ADDR
6516 get_dynamic (const int pid, const int is_elf64)
6517 {
6518 CORE_ADDR phdr_memaddr, relocation;
6519 int num_phdr, i;
6520 unsigned char *phdr_buf;
6521 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6522
6523 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6524 return 0;
6525
6526 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6527 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6528
6529 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6530 return 0;
6531
6532 /* Compute relocation: it is expected to be 0 for "regular" executables,
6533 non-zero for PIE ones. */
6534 relocation = -1;
6535 for (i = 0; relocation == -1 && i < num_phdr; i++)
6536 if (is_elf64)
6537 {
6538 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6539
6540 if (p->p_type == PT_PHDR)
6541 relocation = phdr_memaddr - p->p_vaddr;
6542 }
6543 else
6544 {
6545 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6546
6547 if (p->p_type == PT_PHDR)
6548 relocation = phdr_memaddr - p->p_vaddr;
6549 }
6550
6551 if (relocation == -1)
6552 {
6553 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6554 any real world executables, including PIE executables, have always
6555 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6556 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6557 or present DT_DEBUG anyway (fpc binaries are statically linked).
6558
6559 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6560
6561 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6562
6563 return 0;
6564 }
6565
6566 for (i = 0; i < num_phdr; i++)
6567 {
6568 if (is_elf64)
6569 {
6570 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6571
6572 if (p->p_type == PT_DYNAMIC)
6573 return p->p_vaddr + relocation;
6574 }
6575 else
6576 {
6577 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6578
6579 if (p->p_type == PT_DYNAMIC)
6580 return p->p_vaddr + relocation;
6581 }
6582 }
6583
6584 return 0;
6585 }
6586
6587 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6588 can be 0 if the inferior does not yet have the library list initialized.
6589 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6590 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6591
6592 static CORE_ADDR
6593 get_r_debug (const int pid, const int is_elf64)
6594 {
6595 CORE_ADDR dynamic_memaddr;
6596 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6597 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6598 CORE_ADDR map = -1;
6599
6600 dynamic_memaddr = get_dynamic (pid, is_elf64);
6601 if (dynamic_memaddr == 0)
6602 return map;
6603
6604 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6605 {
6606 if (is_elf64)
6607 {
6608 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6609 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6610 union
6611 {
6612 Elf64_Xword map;
6613 unsigned char buf[sizeof (Elf64_Xword)];
6614 }
6615 rld_map;
6616 #endif
6617 #ifdef DT_MIPS_RLD_MAP
6618 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6619 {
6620 if (linux_read_memory (dyn->d_un.d_val,
6621 rld_map.buf, sizeof (rld_map.buf)) == 0)
6622 return rld_map.map;
6623 else
6624 break;
6625 }
6626 #endif /* DT_MIPS_RLD_MAP */
6627 #ifdef DT_MIPS_RLD_MAP_REL
6628 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6629 {
6630 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6631 rld_map.buf, sizeof (rld_map.buf)) == 0)
6632 return rld_map.map;
6633 else
6634 break;
6635 }
6636 #endif /* DT_MIPS_RLD_MAP_REL */
6637
6638 if (dyn->d_tag == DT_DEBUG && map == -1)
6639 map = dyn->d_un.d_val;
6640
6641 if (dyn->d_tag == DT_NULL)
6642 break;
6643 }
6644 else
6645 {
6646 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6647 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6648 union
6649 {
6650 Elf32_Word map;
6651 unsigned char buf[sizeof (Elf32_Word)];
6652 }
6653 rld_map;
6654 #endif
6655 #ifdef DT_MIPS_RLD_MAP
6656 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6657 {
6658 if (linux_read_memory (dyn->d_un.d_val,
6659 rld_map.buf, sizeof (rld_map.buf)) == 0)
6660 return rld_map.map;
6661 else
6662 break;
6663 }
6664 #endif /* DT_MIPS_RLD_MAP */
6665 #ifdef DT_MIPS_RLD_MAP_REL
6666 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6667 {
6668 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6669 rld_map.buf, sizeof (rld_map.buf)) == 0)
6670 return rld_map.map;
6671 else
6672 break;
6673 }
6674 #endif /* DT_MIPS_RLD_MAP_REL */
6675
6676 if (dyn->d_tag == DT_DEBUG && map == -1)
6677 map = dyn->d_un.d_val;
6678
6679 if (dyn->d_tag == DT_NULL)
6680 break;
6681 }
6682
6683 dynamic_memaddr += dyn_size;
6684 }
6685
6686 return map;
6687 }
6688
6689 /* Read one pointer from MEMADDR in the inferior. */
6690
6691 static int
6692 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6693 {
6694 int ret;
6695
6696 /* Go through a union so this works on either big or little endian
6697 hosts, when the inferior's pointer size is smaller than the size
6698 of CORE_ADDR. It is assumed the inferior's endianness is the
6699 same of the superior's. */
6700 union
6701 {
6702 CORE_ADDR core_addr;
6703 unsigned int ui;
6704 unsigned char uc;
6705 } addr;
6706
6707 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6708 if (ret == 0)
6709 {
6710 if (ptr_size == sizeof (CORE_ADDR))
6711 *ptr = addr.core_addr;
6712 else if (ptr_size == sizeof (unsigned int))
6713 *ptr = addr.ui;
6714 else
6715 gdb_assert_not_reached ("unhandled pointer size");
6716 }
6717 return ret;
6718 }
6719
6720 bool
6721 linux_process_target::supports_qxfer_libraries_svr4 ()
6722 {
6723 return true;
6724 }
6725
6726 struct link_map_offsets
6727 {
6728 /* Offset and size of r_debug.r_version. */
6729 int r_version_offset;
6730
6731 /* Offset and size of r_debug.r_map. */
6732 int r_map_offset;
6733
6734 /* Offset to l_addr field in struct link_map. */
6735 int l_addr_offset;
6736
6737 /* Offset to l_name field in struct link_map. */
6738 int l_name_offset;
6739
6740 /* Offset to l_ld field in struct link_map. */
6741 int l_ld_offset;
6742
6743 /* Offset to l_next field in struct link_map. */
6744 int l_next_offset;
6745
6746 /* Offset to l_prev field in struct link_map. */
6747 int l_prev_offset;
6748 };
6749
6750 /* Construct qXfer:libraries-svr4:read reply. */
6751
6752 int
6753 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6754 unsigned char *readbuf,
6755 unsigned const char *writebuf,
6756 CORE_ADDR offset, int len)
6757 {
6758 struct process_info_private *const priv = current_process ()->priv;
6759 char filename[PATH_MAX];
6760 int pid, is_elf64;
6761
6762 static const struct link_map_offsets lmo_32bit_offsets =
6763 {
6764 0, /* r_version offset. */
6765 4, /* r_debug.r_map offset. */
6766 0, /* l_addr offset in link_map. */
6767 4, /* l_name offset in link_map. */
6768 8, /* l_ld offset in link_map. */
6769 12, /* l_next offset in link_map. */
6770 16 /* l_prev offset in link_map. */
6771 };
6772
6773 static const struct link_map_offsets lmo_64bit_offsets =
6774 {
6775 0, /* r_version offset. */
6776 8, /* r_debug.r_map offset. */
6777 0, /* l_addr offset in link_map. */
6778 8, /* l_name offset in link_map. */
6779 16, /* l_ld offset in link_map. */
6780 24, /* l_next offset in link_map. */
6781 32 /* l_prev offset in link_map. */
6782 };
6783 const struct link_map_offsets *lmo;
6784 unsigned int machine;
6785 int ptr_size;
6786 CORE_ADDR lm_addr = 0, lm_prev = 0;
6787 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6788 int header_done = 0;
6789
6790 if (writebuf != NULL)
6791 return -2;
6792 if (readbuf == NULL)
6793 return -1;
6794
6795 pid = lwpid_of (current_thread);
6796 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6797 is_elf64 = elf_64_file_p (filename, &machine);
6798 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6799 ptr_size = is_elf64 ? 8 : 4;
6800
6801 while (annex[0] != '\0')
6802 {
6803 const char *sep;
6804 CORE_ADDR *addrp;
6805 int name_len;
6806
6807 sep = strchr (annex, '=');
6808 if (sep == NULL)
6809 break;
6810
6811 name_len = sep - annex;
6812 if (name_len == 5 && startswith (annex, "start"))
6813 addrp = &lm_addr;
6814 else if (name_len == 4 && startswith (annex, "prev"))
6815 addrp = &lm_prev;
6816 else
6817 {
6818 annex = strchr (sep, ';');
6819 if (annex == NULL)
6820 break;
6821 annex++;
6822 continue;
6823 }
6824
6825 annex = decode_address_to_semicolon (addrp, sep + 1);
6826 }
6827
6828 if (lm_addr == 0)
6829 {
6830 int r_version = 0;
6831
6832 if (priv->r_debug == 0)
6833 priv->r_debug = get_r_debug (pid, is_elf64);
6834
6835 /* We failed to find DT_DEBUG. Such situation will not change
6836 for this inferior - do not retry it. Report it to GDB as
6837 E01, see for the reasons at the GDB solib-svr4.c side. */
6838 if (priv->r_debug == (CORE_ADDR) -1)
6839 return -1;
6840
6841 if (priv->r_debug != 0)
6842 {
6843 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6844 (unsigned char *) &r_version,
6845 sizeof (r_version)) != 0
6846 || r_version != 1)
6847 {
6848 warning ("unexpected r_debug version %d", r_version);
6849 }
6850 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6851 &lm_addr, ptr_size) != 0)
6852 {
6853 warning ("unable to read r_map from 0x%lx",
6854 (long) priv->r_debug + lmo->r_map_offset);
6855 }
6856 }
6857 }
6858
6859 std::string document = "<library-list-svr4 version=\"1.0\"";
6860
6861 while (lm_addr
6862 && read_one_ptr (lm_addr + lmo->l_name_offset,
6863 &l_name, ptr_size) == 0
6864 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6865 &l_addr, ptr_size) == 0
6866 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6867 &l_ld, ptr_size) == 0
6868 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6869 &l_prev, ptr_size) == 0
6870 && read_one_ptr (lm_addr + lmo->l_next_offset,
6871 &l_next, ptr_size) == 0)
6872 {
6873 unsigned char libname[PATH_MAX];
6874
6875 if (lm_prev != l_prev)
6876 {
6877 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6878 (long) lm_prev, (long) l_prev);
6879 break;
6880 }
6881
6882 /* Ignore the first entry even if it has valid name as the first entry
6883 corresponds to the main executable. The first entry should not be
6884 skipped if the dynamic loader was loaded late by a static executable
6885 (see solib-svr4.c parameter ignore_first). But in such case the main
6886 executable does not have PT_DYNAMIC present and this function already
6887 exited above due to failed get_r_debug. */
6888 if (lm_prev == 0)
6889 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6890 else
6891 {
6892 /* Not checking for error because reading may stop before
6893 we've got PATH_MAX worth of characters. */
6894 libname[0] = '\0';
6895 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6896 libname[sizeof (libname) - 1] = '\0';
6897 if (libname[0] != '\0')
6898 {
6899 if (!header_done)
6900 {
6901 /* Terminate `<library-list-svr4'. */
6902 document += '>';
6903 header_done = 1;
6904 }
6905
6906 string_appendf (document, "<library name=\"");
6907 xml_escape_text_append (&document, (char *) libname);
6908 string_appendf (document, "\" lm=\"0x%lx\" "
6909 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6910 (unsigned long) lm_addr, (unsigned long) l_addr,
6911 (unsigned long) l_ld);
6912 }
6913 }
6914
6915 lm_prev = lm_addr;
6916 lm_addr = l_next;
6917 }
6918
6919 if (!header_done)
6920 {
6921 /* Empty list; terminate `<library-list-svr4'. */
6922 document += "/>";
6923 }
6924 else
6925 document += "</library-list-svr4>";
6926
6927 int document_len = document.length ();
6928 if (offset < document_len)
6929 document_len -= offset;
6930 else
6931 document_len = 0;
6932 if (len > document_len)
6933 len = document_len;
6934
6935 memcpy (readbuf, document.data () + offset, len);
6936
6937 return len;
6938 }
6939
6940 #ifdef HAVE_LINUX_BTRACE
6941
6942 btrace_target_info *
6943 linux_process_target::enable_btrace (ptid_t ptid,
6944 const btrace_config *conf)
6945 {
6946 return linux_enable_btrace (ptid, conf);
6947 }
6948
6949 /* See to_disable_btrace target method. */
6950
6951 int
6952 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6953 {
6954 enum btrace_error err;
6955
6956 err = linux_disable_btrace (tinfo);
6957 return (err == BTRACE_ERR_NONE ? 0 : -1);
6958 }
6959
6960 /* Encode an Intel Processor Trace configuration. */
6961
6962 static void
6963 linux_low_encode_pt_config (struct buffer *buffer,
6964 const struct btrace_data_pt_config *config)
6965 {
6966 buffer_grow_str (buffer, "<pt-config>\n");
6967
6968 switch (config->cpu.vendor)
6969 {
6970 case CV_INTEL:
6971 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6972 "model=\"%u\" stepping=\"%u\"/>\n",
6973 config->cpu.family, config->cpu.model,
6974 config->cpu.stepping);
6975 break;
6976
6977 default:
6978 break;
6979 }
6980
6981 buffer_grow_str (buffer, "</pt-config>\n");
6982 }
6983
6984 /* Encode a raw buffer. */
6985
6986 static void
6987 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6988 unsigned int size)
6989 {
6990 if (size == 0)
6991 return;
6992
6993 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6994 buffer_grow_str (buffer, "<raw>\n");
6995
6996 while (size-- > 0)
6997 {
6998 char elem[2];
6999
7000 elem[0] = tohex ((*data >> 4) & 0xf);
7001 elem[1] = tohex (*data++ & 0xf);
7002
7003 buffer_grow (buffer, elem, 2);
7004 }
7005
7006 buffer_grow_str (buffer, "</raw>\n");
7007 }
7008
7009 /* See to_read_btrace target method. */
7010
7011 int
7012 linux_process_target::read_btrace (btrace_target_info *tinfo,
7013 buffer *buffer,
7014 enum btrace_read_type type)
7015 {
7016 struct btrace_data btrace;
7017 enum btrace_error err;
7018
7019 err = linux_read_btrace (&btrace, tinfo, type);
7020 if (err != BTRACE_ERR_NONE)
7021 {
7022 if (err == BTRACE_ERR_OVERFLOW)
7023 buffer_grow_str0 (buffer, "E.Overflow.");
7024 else
7025 buffer_grow_str0 (buffer, "E.Generic Error.");
7026
7027 return -1;
7028 }
7029
7030 switch (btrace.format)
7031 {
7032 case BTRACE_FORMAT_NONE:
7033 buffer_grow_str0 (buffer, "E.No Trace.");
7034 return -1;
7035
7036 case BTRACE_FORMAT_BTS:
7037 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7038 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7039
7040 for (const btrace_block &block : *btrace.variant.bts.blocks)
7041 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7042 paddress (block.begin), paddress (block.end));
7043
7044 buffer_grow_str0 (buffer, "</btrace>\n");
7045 break;
7046
7047 case BTRACE_FORMAT_PT:
7048 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7049 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7050 buffer_grow_str (buffer, "<pt>\n");
7051
7052 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7053
7054 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7055 btrace.variant.pt.size);
7056
7057 buffer_grow_str (buffer, "</pt>\n");
7058 buffer_grow_str0 (buffer, "</btrace>\n");
7059 break;
7060
7061 default:
7062 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7063 return -1;
7064 }
7065
7066 return 0;
7067 }
7068
7069 /* See to_btrace_conf target method. */
7070
7071 int
7072 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7073 buffer *buffer)
7074 {
7075 const struct btrace_config *conf;
7076
7077 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7078 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7079
7080 conf = linux_btrace_conf (tinfo);
7081 if (conf != NULL)
7082 {
7083 switch (conf->format)
7084 {
7085 case BTRACE_FORMAT_NONE:
7086 break;
7087
7088 case BTRACE_FORMAT_BTS:
7089 buffer_xml_printf (buffer, "<bts");
7090 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7091 buffer_xml_printf (buffer, " />\n");
7092 break;
7093
7094 case BTRACE_FORMAT_PT:
7095 buffer_xml_printf (buffer, "<pt");
7096 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7097 buffer_xml_printf (buffer, "/>\n");
7098 break;
7099 }
7100 }
7101
7102 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7103 return 0;
7104 }
7105 #endif /* HAVE_LINUX_BTRACE */
7106
7107 /* See nat/linux-nat.h. */
7108
7109 ptid_t
7110 current_lwp_ptid (void)
7111 {
7112 return ptid_of (current_thread);
7113 }
7114
7115 const char *
7116 linux_process_target::thread_name (ptid_t thread)
7117 {
7118 return linux_proc_tid_get_name (thread);
7119 }
7120
7121 #if USE_THREAD_DB
7122 bool
7123 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7124 int *handle_len)
7125 {
7126 return thread_db_thread_handle (ptid, handle, handle_len);
7127 }
7128 #endif
7129
7130 thread_info *
7131 linux_process_target::thread_pending_parent (thread_info *thread)
7132 {
7133 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7134
7135 if (parent == nullptr)
7136 return nullptr;
7137
7138 return get_lwp_thread (parent);
7139 }
7140
7141 /* Default implementation of linux_target_ops method "set_pc" for
7142 32-bit pc register which is literally named "pc". */
7143
7144 void
7145 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7146 {
7147 uint32_t newpc = pc;
7148
7149 supply_register_by_name (regcache, "pc", &newpc);
7150 }
7151
7152 /* Default implementation of linux_target_ops method "get_pc" for
7153 32-bit pc register which is literally named "pc". */
7154
7155 CORE_ADDR
7156 linux_get_pc_32bit (struct regcache *regcache)
7157 {
7158 uint32_t pc;
7159
7160 collect_register_by_name (regcache, "pc", &pc);
7161 if (debug_threads)
7162 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7163 return pc;
7164 }
7165
7166 /* Default implementation of linux_target_ops method "set_pc" for
7167 64-bit pc register which is literally named "pc". */
7168
7169 void
7170 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7171 {
7172 uint64_t newpc = pc;
7173
7174 supply_register_by_name (regcache, "pc", &newpc);
7175 }
7176
7177 /* Default implementation of linux_target_ops method "get_pc" for
7178 64-bit pc register which is literally named "pc". */
7179
7180 CORE_ADDR
7181 linux_get_pc_64bit (struct regcache *regcache)
7182 {
7183 uint64_t pc;
7184
7185 collect_register_by_name (regcache, "pc", &pc);
7186 if (debug_threads)
7187 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7188 return pc;
7189 }
7190
7191 /* See linux-low.h. */
7192
7193 int
7194 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7195 {
7196 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7197 int offset = 0;
7198
7199 gdb_assert (wordsize == 4 || wordsize == 8);
7200
7201 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7202 {
7203 if (wordsize == 4)
7204 {
7205 uint32_t *data_p = (uint32_t *) data;
7206 if (data_p[0] == match)
7207 {
7208 *valp = data_p[1];
7209 return 1;
7210 }
7211 }
7212 else
7213 {
7214 uint64_t *data_p = (uint64_t *) data;
7215 if (data_p[0] == match)
7216 {
7217 *valp = data_p[1];
7218 return 1;
7219 }
7220 }
7221
7222 offset += 2 * wordsize;
7223 }
7224
7225 return 0;
7226 }
7227
7228 /* See linux-low.h. */
7229
7230 CORE_ADDR
7231 linux_get_hwcap (int wordsize)
7232 {
7233 CORE_ADDR hwcap = 0;
7234 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7235 return hwcap;
7236 }
7237
7238 /* See linux-low.h. */
7239
7240 CORE_ADDR
7241 linux_get_hwcap2 (int wordsize)
7242 {
7243 CORE_ADDR hwcap2 = 0;
7244 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7245 return hwcap2;
7246 }
7247
7248 #ifdef HAVE_LINUX_REGSETS
7249 void
7250 initialize_regsets_info (struct regsets_info *info)
7251 {
7252 for (info->num_regsets = 0;
7253 info->regsets[info->num_regsets].size >= 0;
7254 info->num_regsets++)
7255 ;
7256 }
7257 #endif
7258
7259 void
7260 initialize_low (void)
7261 {
7262 struct sigaction sigchld_action;
7263
7264 memset (&sigchld_action, 0, sizeof (sigchld_action));
7265 set_target_ops (the_linux_target);
7266
7267 linux_ptrace_init_warnings ();
7268 linux_proc_init_warnings ();
7269
7270 sigchld_action.sa_handler = sigchld_handler;
7271 sigemptyset (&sigchld_action.sa_mask);
7272 sigchld_action.sa_flags = SA_RESTART;
7273 sigaction (SIGCHLD, &sigchld_action, NULL);
7274
7275 initialize_low_arch ();
7276
7277 linux_check_ptrace_features ();
7278 }
This page took 0.195121 seconds and 4 git commands to generate.